INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Called when a dangerous action is about to be done to make sure it s okay. prompt is printed ; user response is returned.
def confirm(self, prompt, default): """ Called when a dangerous action is about to be done to make sure it's okay. `prompt' is printed; user response is returned.""" while True: try: self.write_confirm(prompt, default) reply = self.readline('').strip().lower() except EOFError: return default if reply in ('y', 'yes'): return True elif reply in ('n', 'no'): return False else: self.msg("Please answer y or n.") pass pass return default
r \ s +
def t_whitespace(self, s): r'\s+' self.add_token('SPACE', s) self.pos += len(s) pass
r [ ^ ]. +
def t_single_quote_file(self, s): r"'[^'].+'" # Pick out text inside of singe-quoted string base = s[1:-1] self.add_token('FILENAME', base) self.pos += len(s)
r [ ^ ] +
def t_double_quote_file(self, s): r'"[^"]+"' # Pick out text inside of singe-quoted string base = s[1:-1] self.add_token('FILENAME', base) self.pos += len(s)
r:
def t_colon(self, s): r':' # Used to separate a filename from a line number self.add_token('COLON', s) self.pos += len(s)
r
def t_comma(self, s): r',' # Used in "list" to separate first from last self.add_token('COMMA', s) self.pos += len(s)
r ^ [ + - ] $
def t_direction(self, s): r'^[+-]$' # Used in the "list" command self.add_token('DIRECTION', s) self.pos += len(s)
r \ d +
def t_number(self, s): r'\d+' pos = self.pos self.add_token('NUMBER', int(s)) self.pos = pos + len(s)
r [ + ] \ d +
def t_offset(self, s): r'[+]\d+' pos = self.pos self.add_token('OFFSET', s) self.pos = pos + len(s)
r [ * ] \ d +
def t_address(self, s): r'[*]\d+' pos = self.pos self.add_token('ADDRESS', s) self.pos = pos + len(s)
extract_concepts takes a list of sentences and ids ( optional ) then returns a list of Concept objects extracted via MetaMap.
def extract_concepts(self, sentences=None, ids=None, composite_phrase=4, filename=None, file_format='sldi', allow_acronym_variants=False, word_sense_disambiguation=False, allow_large_n=False, strict_model=False, relaxed_model=False, allow_overmatches=False, allow_concept_gaps=False, term_processing=False, no_derivational_variants=False, derivational_variants=False, ignore_word_order=False, unique_acronym_variants=False, prefer_multiple_concepts=False, ignore_stop_phrases=False, compute_all_mappings=False, mm_data_version=False, exclude_sources=[], restrict_to_sources=[], restrict_to_sts=[], exclude_sts=[]): """ extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMap. Supported Options: Composite Phrase -Q Word Sense Disambiguation -y use strict model -A use relaxed model -C allow large N -l allow overmatches -o allow concept gaps -g term processing -z No Derivational Variants -d All Derivational Variants -D Ignore Word Order -i Allow Acronym Variants -a Unique Acronym Variants -u Prefer Multiple Concepts -Y Ignore Stop Phrases -K Compute All Mappings -b MM Data Version -V Exclude Sources -e Restrict to Sources -R Restrict to Semantic Types -J Exclude Semantic Types -k For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found. """ if allow_acronym_variants and unique_acronym_variants: raise ValueError("You can't use both allow_acronym_variants and " "unique_acronym_variants.") if (sentences is not None and filename is not None) or \ (sentences is None and filename is None): raise ValueError("You must either pass a list of sentences " "OR a filename.") if file_format not in ['sldi','sldiID']: raise ValueError("file_format must be either sldi or sldiID") input_file = None if sentences is not None: input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False) else: input_file = open(filename, 'r') output_file = tempfile.NamedTemporaryFile(mode="r", delete=False) error = None try: if sentences is not None: if ids is not None: for identifier, sentence in zip(ids, sentences): input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) else: for sentence in sentences: input_file.write('{0!r}\n'.format(sentence).encode('utf8')) input_file.flush() command = [self.metamap_filename, '-N'] command.append('-Q') command.append(str(composite_phrase)) if mm_data_version is not False: if mm_data_version not in ['Base', 'USAbase', 'NLM']: raise ValueError("mm_data_version must be Base, USAbase, or NLM.") command.append('-V') command.append(str(mm_data_version)) if word_sense_disambiguation: command.append('-y') if strict_model: command.append('-A') if relaxed_model: command.append('-C') if allow_large_n: command.append('-l') if allow_overmatches: command.append('-o') if allow_concept_gaps: command.append('-g') if term_processing: command.append('-z') if no_derivational_variants: command.append('-d') if derivational_variants: command.append('-D') if ignore_word_order: command.append('-i') if allow_acronym_variants: command.append('-a') if unique_acronym_variants: command.append('-u') if prefer_multiple_concepts: command.append('-Y') if ignore_stop_phrases: command.append('-K') if compute_all_mappings: command.append('-b') if len(exclude_sources) > 0: command.append('-e') command.append(str(','.join(exclude_sources))) if len(restrict_to_sources) > 0: command.append('-R') command.append(str(','.join(restrict_to_sources))) if len(restrict_to_sts) > 0: command.append('-J') command.append(str(','.join(restrict_to_sts))) if len(exclude_sts) > 0: command.append('-k') command.append(str(','.join(exclude_sts))) if ids is not None or (file_format == 'sldiID' and sentences is None): command.append('--sldiID') else: command.append('--sldi') command.append(input_file.name) command.append(output_file.name) metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE) while metamap_process.poll() is None: stdout = str(metamap_process.stdout.readline()) if 'ERROR' in stdout: metamap_process.terminate() error = stdout.rstrip() output = str(output_file.read()) finally: if sentences is not None: os.remove(input_file.name) else: input_file.close() os.remove(output_file.name) concepts = Corpus.load(output.splitlines()) return (concepts, error)
extract_concepts takes a list of sentences and ids ( optional ) then returns a list of Concept objects extracted via MetaMapLite.
def extract_concepts(self, sentences=None, ids=None, filename=None, restrict_to_sts=None, restrict_to_sources=None): """ extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMapLite. Supported Options: Restrict to Semantic Types --restrict_to_sts Restrict to Sources --restrict_to_sources For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found. """ if (sentences is not None and filename is not None) or \ (sentences is None and filename is None): raise ValueError("You must either pass a list of sentences " "OR a filename.") input_file = None if sentences is not None: input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False) else: input_file = open(filename, 'r') # Unlike MetaMap, MetaMapLite does not take an output filename as a parameter. # It creates a new output file at same location as "input_file" with the default file extension ".mmi". # output_file = tempfile.NamedTemporaryFile(mode="r", delete=False) output_file_name = None error = None try: if sentences is not None: if ids is not None: for identifier, sentence in zip(ids, sentences): input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) else: for sentence in sentences: input_file.write('{0!r}\n'.format(sentence).encode('utf8')) input_file.flush() command = ["bash", os.path.join(self.metamap_filename, "metamaplite.sh")] if restrict_to_sts: if isinstance(restrict_to_sts, str): restrict_to_sts = [restrict_to_sts] if len(restrict_to_sts) > 0: command.append('--restrict_to_sts') command.append(str(','.join(restrict_to_sts))) if restrict_to_sources: if isinstance(restrict_to_sources, str): restrict_to_sources = [restrict_to_sources] if len(restrict_to_sources) > 0: command.append('--restrict_to_sources') command.append(str(','.join(restrict_to_sources))) if ids is not None: command.append('--inputformat=sldiwi') command.append(input_file.name) # command.append(output_file.name) metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE) while metamap_process.poll() is None: stdout = str(metamap_process.stdout.readline()) if 'ERROR' in stdout: metamap_process.terminate() error = stdout.rstrip() # print("input file name: {0}".format(input_file.name)) output_file_name, file_extension = os.path.splitext(input_file.name) output_file_name += "." + "mmi" # print("output_file_name: {0}".format(output_file_name)) with open(output_file_name) as fd: output = fd.read() # output = str(output_file.read()) # print("output: {0}".format(output)) finally: if sentences is not None: os.remove(input_file.name) else: input_file.close() # os.remove(output_file.name) os.remove(output_file_name) concepts = CorpusLite.load(output.splitlines()) return concepts, error
Wrap a sqlalchemy. orm. query. Query object into a concurrent. futures. Future so that it can be yielded.
def as_future(self, query): """Wrap a `sqlalchemy.orm.query.Query` object into a `concurrent.futures.Future` so that it can be yielded. Parameters ---------- query : sqlalchemy.orm.query.Query SQLAlchemy query object to execute Returns ------- tornado.concurrent.Future A `Future` object wrapping the given query so that tornado can await/yield on it """ # concurrent.futures.Future is not compatible with the "new style" # asyncio Future, and awaiting on such "old-style" futures does not # work. # # tornado includes a `run_in_executor` function to help with this # problem, but it's only included in version 5+. Hence, we copy a # little bit of code here to handle this incompatibility. if not self._pool: self._pool = ThreadPoolExecutor(max_workers=self._max_workers) old_future = self._pool.submit(query) new_future = Future() IOLoop.current().add_future( old_future, lambda f: chain_future(f, new_future) ) return new_future
Utility function for forcing a login as specific user -- be careful about calling this carelessly: )
def login_as(user, request, store_original_user=True): """ Utility function for forcing a login as specific user -- be careful about calling this carelessly :) """ # Save the original user pk before it is replaced in the login method original_user_pk = request.user.pk # Find a suitable backend. if not hasattr(user, "backend"): for backend in django_settings.AUTHENTICATION_BACKENDS: if not hasattr(load_backend(backend), "get_user"): continue if user == load_backend(backend).get_user(user.pk): user.backend = backend break else: raise ImproperlyConfigured("Could not found an appropriate authentication backend") # Add admin audit log entry if original_user_pk: change_message = "User {0} logged in as {1}.".format(request.user, user) LogEntry.objects.log_action( user_id=original_user_pk, content_type_id=ContentType.objects.get_for_model(user).pk, object_id=user.pk, object_repr=str(user), change_message=change_message, action_flag=CHANGE, ) # Log the user in. if not hasattr(user, "backend"): return if la_settings.UPDATE_LAST_LOGIN: login(request, user) else: with no_update_last_login(): login(request, user) # Set a flag on the session if store_original_user: messages.warning( request, la_settings.MESSAGE_LOGIN_SWITCH.format(username=user.__dict__[username_field]), extra_tags=la_settings.MESSAGE_EXTRA_TAGS, ) request.session[la_settings.USER_SESSION_FLAG] = signer.sign(original_user_pk)
Restore an original login session checking the signed session
def restore_original_login(request): """ Restore an original login session, checking the signed session """ original_session = request.session.get(la_settings.USER_SESSION_FLAG) logout(request) if not original_session: return try: original_user_pk = signer.unsign( original_session, max_age=timedelta(days=la_settings.USER_SESSION_DAYS_TIMESTAMP).total_seconds() ) user = get_user_model().objects.get(pk=original_user_pk) messages.info( request, la_settings.MESSAGE_LOGIN_REVERT.format(username=user.__dict__[username_field]), extra_tags=la_settings.MESSAGE_EXTRA_TAGS, ) login_as(user, request, store_original_user=False) if la_settings.USER_SESSION_FLAG in request.session: del request.session[la_settings.USER_SESSION_FLAG] except SignatureExpired: pass
Code to load create user module. Copied off django - browserid.
def _load_module(path): """Code to load create user module. Copied off django-browserid.""" i = path.rfind(".") module, attr = path[:i], path[i + 1 :] try: mod = import_module(module) except ImportError: raise ImproperlyConfigured("Error importing CAN_LOGIN_AS function: {}".format(module)) except ValueError: raise ImproperlyConfigured("Error importing CAN_LOGIN_AS" " function. Is CAN_LOGIN_AS a" " string?") try: can_login_as = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured("Module {0} does not define a {1} " "function.".format(module, attr)) return can_login_as
Yield each document in a Luminoso project in turn. Requires a client whose URL points to a project.
def iterate_docs(client, expanded=False, progress=False): """ Yield each document in a Luminoso project in turn. Requires a client whose URL points to a project. If expanded=True, it will include additional fields that Luminoso added in its analysis, such as 'terms' and 'vector'. Otherwise, it will contain only the fields necessary to reconstruct the document: 'title', 'text', and 'metadata'. Shows a progress bar if progress=True. """ # Get total number of docs from the project record num_docs = client.get()['document_count'] progress_bar = None try: if progress: progress_bar = tqdm(desc='Downloading documents', total=num_docs) for offset in range(0, num_docs, DOCS_PER_BATCH): response = client.get('docs', offset=offset, limit=DOCS_PER_BATCH) docs = response['result'] for doc in docs: # Get the appropriate set of fields for each document if expanded: for field in UNNECESSARY_FIELDS: doc.pop(field, None) else: doc = {field: doc[field] for field in CONCISE_FIELDS} if progress: progress_bar.update() yield doc finally: if progress: progress_bar.close()
Given a LuminosoClient pointing to a project and a filename to write to retrieve all its documents in batches and write them to a JSON lines (. jsons ) file with one document per line.
def download_docs(client, output_filename=None, expanded=False): """ Given a LuminosoClient pointing to a project and a filename to write to, retrieve all its documents in batches, and write them to a JSON lines (.jsons) file with one document per line. """ if output_filename is None: # Find a default filename to download to, based on the project name. projname = _sanitize_filename(client.get()['name']) output_filename = '{}.jsons'.format(projname) # If the file already exists, add .1, .2, ..., after the project name # to unobtrusively get a unique filename. counter = 0 while os.access(output_filename, os.F_OK): counter += 1 output_filename = '{}.{}.jsons'.format(projname, counter) print('Downloading project to {!r}'.format(output_filename)) with open(output_filename, 'w', encoding='utf-8') as out: for doc in iterate_docs(client, expanded=expanded, progress=True): print(json.dumps(doc, ensure_ascii=False), file=out)
Handle arguments for the lumi - download command.
def _main(argv): """ Handle arguments for the 'lumi-download' command. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( '-b', '--base-url', default=URL_BASE, help='API root url, default: %s' % URL_BASE, ) parser.add_argument( '-e', '--expanded', help="Include Luminoso's analysis of each document, such as terms and" ' document vectors', action='store_true', ) parser.add_argument('-t', '--token', help='API authentication token') parser.add_argument( '-s', '--save-token', action='store_true', help='save --token for --base-url to ~/.luminoso/tokens.json', ) parser.add_argument( 'project_id', help='The ID of the project in the Daylight API' ) parser.add_argument( 'output_file', nargs='?', default=None, help='The JSON lines (.jsons) file to write to' ) args = parser.parse_args(argv) if args.save_token: if not args.token: raise ValueError("error: no token provided") LuminosoClient.save_token(args.token, domain=urlparse(args.base_url).netloc) client = LuminosoClient.connect(url=args.base_url, token=args.token) proj_client = client.client_for_path('projects/{}'.format(args.project_id)) download_docs(proj_client, args.output_file, args.expanded)
Convert a JSON or CSV file of input to a JSON stream (. jsons ). This kind of file can be easily uploaded using luminoso_api. upload.
def transcode(input_filename, output_filename=None, date_format=None): """ Convert a JSON or CSV file of input to a JSON stream (.jsons). This kind of file can be easily uploaded using `luminoso_api.upload`. """ if output_filename is None: # transcode to standard output output = sys.stdout else: if output_filename.endswith('.json'): logger.warning("Changing .json to .jsons, because this program " "outputs a JSON stream format that is not " "technically JSON itself.") output_filename += 's' output = open(output_filename, 'w') for entry in open_json_or_csv_somehow(input_filename, date_format=date_format): output.write(json.dumps(entry, ensure_ascii=False).encode('utf-8')) output.write('\n') output.close()
Read a JSON or CSV file and convert it into a JSON stream which will be saved in an anonymous temp file.
def transcode_to_stream(input_filename, date_format=None): """ Read a JSON or CSV file and convert it into a JSON stream, which will be saved in an anonymous temp file. """ tmp = tempfile.TemporaryFile() for entry in open_json_or_csv_somehow(input_filename, date_format=date_format): tmp.write(json.dumps(entry, ensure_ascii=False).encode('utf-8')) tmp.write(b'\n') tmp.seek(0) return tmp
Deduce the format of a file within reason.
def open_json_or_csv_somehow(filename, date_format=None): """ Deduce the format of a file, within reason. - If the filename ends with .csv or .txt, it's csv. - If the filename ends with .jsons, it's a JSON stream (conveniently the format we want to output). - If the filename ends with .json, it could be a legitimate JSON file, or it could be a JSON stream, following a nonstandard convention that many people including us are guilty of. In that case: - If the first line is a complete JSON document, and there is more in the file besides the first line, then it is a JSON stream. - Otherwise, it is probably really JSON. - If the filename does not end with .json, .jsons, or .csv, we have to guess whether it's still CSV or tab-separated values or something like that. If it's JSON, the first character would almost certainly have to be a bracket or a brace. If it isn't, assume it's CSV or similar. """ fileformat = None if filename.endswith('.csv'): fileformat = 'csv' elif filename.endswith('.jsons'): fileformat = 'jsons' else: with open(filename) as opened: line = opened.readline() if line[0] not in '{[' and not filename.endswith('.json'): fileformat = 'csv' else: if (line.count('{') == line.count('}') and line.count('[') == line.count(']')): # This line contains a complete JSON document. This probably # means it's in linewise JSON ('.jsons') format, unless the # whole file is on one line. char = ' ' while char.isspace(): char = opened.read() if char == '': fileformat = 'json' break if fileformat is None: fileformat = 'jsons' else: fileformat = 'json' if fileformat == 'json': stream = json.load(open(filename), encoding='utf-8') elif fileformat == 'csv': stream = open_csv_somehow(filename) else: stream = stream_json_lines(filename) return _normalize_data(stream, date_format=date_format)
This function is meant to normalize data for upload to the Luminoso Analytics system. Currently it only normalizes dates.
def _normalize_data(stream, date_format=None): """ This function is meant to normalize data for upload to the Luminoso Analytics system. Currently it only normalizes dates. If date_format is not specified, or if there's no date in a particular doc, the the doc is yielded unchanged. """ for doc in stream: if 'date' in doc and date_format is not None: try: doc['date'] = _convert_date(doc['date'], date_format) except ValueError: # ValueErrors cover the cases when date_format does not match # the actual format of the date, both for epoch and non-epoch # times. logger.exception('%s does not match the date format %s;' % (doc['date'], date_format)) yield doc
Convert a date in a given format to epoch time. Mostly a wrapper for datetime s strptime.
def _convert_date(date_string, date_format): """ Convert a date in a given format to epoch time. Mostly a wrapper for datetime's strptime. """ if date_format != 'epoch': return datetime.strptime(date_string, date_format).timestamp() else: return float(date_string)
Use ftfy to detect the encoding of a file based on a sample of its first megabyte.
def detect_file_encoding(filename): """ Use ftfy to detect the encoding of a file, based on a sample of its first megabyte. ftfy's encoding detector is limited. The only encodings it can detect are UTF-8, CESU-8, UTF-16, Windows-1252, and occasionally MacRoman. But it does much better than chardet. """ with open(filename, 'rb') as opened: sample = opened.read(2 ** 20) _, encoding = ftfy.guess_bytes(sample) return encoding
Load a JSON stream and return a generator yielding one object at a time.
def stream_json_lines(file): """ Load a JSON stream and return a generator, yielding one object at a time. """ if isinstance(file, string_type): file = open(file, 'rb') for line in file: line = line.strip() if line: if isinstance(line, bytes): line = line.decode('utf-8') yield json.loads(line)
Convert a file in some other encoding into a temporary file that s in UTF - 8.
def transcode_to_utf8(filename, encoding): """ Convert a file in some other encoding into a temporary file that's in UTF-8. """ tmp = tempfile.TemporaryFile() for line in io.open(filename, encoding=encoding): tmp.write(line.strip('\uFEFF').encode('utf-8')) tmp.seek(0) return tmp
Open a CSV file using Python 2 s CSV module working around the deficiency where it can t handle the null bytes of UTF - 16.
def open_csv_somehow_py2(filename): """ Open a CSV file using Python 2's CSV module, working around the deficiency where it can't handle the null bytes of UTF-16. """ encoding = detect_file_encoding(filename) if encoding.startswith('UTF-16'): csvfile = transcode_to_utf8(filename, encoding) encoding = 'UTF-8' else: csvfile = open(filename, 'rU') line = csvfile.readline() csvfile.seek(0) if '\t' in line: # tab-separated reader = csv.reader(csvfile, delimiter='\t') else: reader = csv.reader(csvfile, dialect='excel') header = reader.next() header = [cell.decode(encoding).lower().strip() for cell in header] encode_fn = lambda x: x.decode(encoding, 'replace') return _read_csv(reader, header, encode_fn)
Given a constructed CSV reader object a header row that we ve read and a detected encoding yield its rows as dictionaries.
def _read_csv(reader, header, encode_fn): """ Given a constructed CSV reader object, a header row that we've read, and a detected encoding, yield its rows as dictionaries. """ for row in reader: if len(row) == 0: continue row = [encode_fn(cell) for cell in row] row_list = zip(header, row) row_dict = dict(row_list) if len(row_dict['text']) == 0: continue row_dict['text'] = unicodedata.normalize( 'NFKC', row_dict['text'].strip() ) if row_dict.get('title') == '': del row_dict['title'] if 'date' in row_dict: # We handle dates further in open_json_or_csv_somehow if row_dict['date'] == '': del row_dict['date'] if 'subset' in row_dict: subsets = [cell[1] for cell in row_list if cell[1] != '' and cell[0] == 'subset'] if subsets: row_dict['subsets'] = subsets if 'subset' in row_dict: del row_dict['subset'] yield row_dict
Handle command line arguments to convert a file to a JSON stream as a script.
def main(): """ Handle command line arguments to convert a file to a JSON stream as a script. """ logging.basicConfig(level=logging.INFO) import argparse parser = argparse.ArgumentParser( description="Translate CSV or JSON input to a JSON stream, or verify " "something that is already a JSON stream." ) parser.add_argument('input', help='A CSV, JSON, or JSON stream file to read.') parser.add_argument('output', nargs='?', default=None, help="The filename to output to. Recommended extension is .jsons. " "If omitted, use standard output.") args = parser.parse_args() transcode(args.input, args.output)
When sent in an authorized REST request only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON.
def jsonify_parameters(params): """ When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON. """ result = {} for param, value in params.items(): if isinstance(value, (int, str)): result[param] = value else: result[param] = json.dumps(value) return result
Returns an object that makes requests to the API authenticated with a saved or specified long - lived token at URLs beginning with url.
def connect(cls, url=None, token_file=None, token=None): """ Returns an object that makes requests to the API, authenticated with a saved or specified long-lived token, at URLs beginning with `url`. If no URL is specified, or if the specified URL is a path such as '/projects' without a scheme and domain, the client will default to https://analytics.luminoso.com/api/v5/. If neither token nor token_file are specified, the client will look for a token in $HOME/.luminoso/tokens.json. The file should contain a single json dictionary of the format `{'root_url': 'token', 'root_url2': 'token2', ...}`. """ if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE if token is None: token_file = token_file or get_token_filename() try: with open(token_file) as tf: token_dict = json.load(tf) except FileNotFoundError: raise LuminosoAuthError('No token file at %s' % token_file) try: token = token_dict[urlparse(root_url).netloc] except KeyError: raise LuminosoAuthError('No token stored for %s' % root_url) session = requests.session() session.auth = _TokenAuth(token) return cls(session, url)
Take a long - lived API token and store it to a local file. Long - lived tokens can be retrieved through the UI. Optional arguments are the domain for which the token is valid and the file in which to store the token.
def save_token(token, domain='analytics.luminoso.com', token_file=None): """ Take a long-lived API token and store it to a local file. Long-lived tokens can be retrieved through the UI. Optional arguments are the domain for which the token is valid and the file in which to store the token. """ token_file = token_file or get_token_filename() if os.path.exists(token_file): saved_tokens = json.load(open(token_file)) else: saved_tokens = {} saved_tokens[domain] = token directory, filename = os.path.split(token_file) if directory and not os.path.exists(directory): os.makedirs(directory) with open(token_file, 'w') as f: json.dump(saved_tokens, f)
Returns an object that makes requests to the API authenticated with a short - lived token retrieved from username and password. If username or password is not supplied the method will prompt for a username and/ or password to be entered interactively.
def connect_with_username_and_password(cls, url=None, username=None, password=None): """ Returns an object that makes requests to the API, authenticated with a short-lived token retrieved from username and password. If username or password is not supplied, the method will prompt for a username and/or password to be entered interactively. See the connect method for more details about the `url` argument. PLEASE NOTE: This method is being provided as a temporary measure. We strongly encourage users of the Luminoso API to use a long-lived token instead, as explained in the V5_README file. """ from .v4_client import LuminosoClient as v4LC if username is None: username = input('Username: ') v4client = v4LC.connect(url=url, username=username, password=password) if url is None: url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE return cls(v4client.session, root_url)
Make a request via the requests module. If the result has an HTTP error status convert that to a Python exception.
def _request(self, req_type, url, **kwargs): """ Make a request via the `requests` module. If the result has an HTTP error status, convert that to a Python exception. """ logger.debug('%s %s' % (req_type, url)) result = self.session.request(req_type, url, **kwargs) try: result.raise_for_status() except requests.HTTPError: error = result.text try: error = json.loads(error) except ValueError: pass if result.status_code in (401, 403): error_class = LuminosoAuthError elif result.status_code in (400, 404, 405): error_class = LuminosoClientError elif result.status_code >= 500: error_class = LuminosoServerError else: error_class = LuminosoError raise error_class(error) return result
Make a POST request to the given path and return the JSON - decoded result.
def post(self, path='', **params): """ Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, data=json.dumps(params), headers={'Content-Type': 'application/json'})
Make a DELETE request to the given path and return the JSON - decoded result.
def delete(self, path='', **params): """ Make a DELETE request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to URL parameters. DELETE requests ask to delete the object represented by this URL. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('delete', url, params=params)
Returns a new client with the same root URL and authentication but a different specific URL. For instance if you have a client pointed at https:// analytics. luminoso. com/ api/ v5/ and you want new ones for Project A and Project B you would call:
def client_for_path(self, path): """ Returns a new client with the same root URL and authentication, but a different specific URL. For instance, if you have a client pointed at https://analytics.luminoso.com/api/v5/, and you want new ones for Project A and Project B, you would call: client_a = client.client_for_path('projects/<project_id_a>') client_b = client.client_for_path('projects/<project_id_b>') and your base client would remian unchanged. Paths with leading slashes are appended to the root url; otherwise, paths are set relative to the current path. """ if path.startswith('/'): url = self.root_url + path else: url = self.url + path return self.__class__(self.session, url)
A deprecated alias for post ( path docs = docs ) included only for backward compatibility.
def upload(self, path, docs, **params): """ A deprecated alias for post(path, docs=docs), included only for backward compatibility. """ logger.warning('The upload method is deprecated; use post instead.') return self.post(path, docs=docs)
A convenience method designed to inform you when a project build has completed. It polls the API every interval seconds until there is not a build running. At that point it returns the last_build_info field of the project record if the build succeeded and raises a LuminosoError with the field as its message if the build failed.
def wait_for_build(self, interval=5, path=None): """ A convenience method designed to inform you when a project build has completed. It polls the API every `interval` seconds until there is not a build running. At that point, it returns the "last_build_info" field of the project record if the build succeeded, and raises a LuminosoError with the field as its message if the build failed. If a `path` is not specified, this method will assume that its URL is the URL for the project. Otherwise, it will use the specified path (which should be "/projects/<project_id>/"). """ path = path or '' start = time.time() next_log = 0 while True: response = self.get(path)['last_build_info'] if not response: raise ValueError('This project is not building!') if response['stop_time']: if response['success']: return response else: raise LuminosoError(response) elapsed = time.time() - start if elapsed > next_log: logger.info('Still waiting (%d seconds elapsed).', next_log) next_log += 120 time.sleep(interval)
Saves binary content to a file with name filename. filename should include the appropriate file extension such as. xlsx or. txt e. g. filename = sample. xlsx.
def save_to_file(self, path, filename, **params): """ Saves binary content to a file with name filename. filename should include the appropriate file extension, such as .xlsx or .txt, e.g., filename = 'sample.xlsx'. Useful for downloading .xlsx files. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) content = self._request('get', url, params=params).content with open(filename, 'wb') as f: f.write(content)
Get the root URL for a URL as described in the LuminosoClient documentation.
def get_root_url(url, warn=True): """ Get the "root URL" for a URL, as described in the LuminosoClient documentation. """ parsed_url = urlparse(url) # Make sure it's a complete URL, not a relative one if not parsed_url.scheme: raise ValueError('Please supply a full URL, beginning with http:// ' 'or https:// .') # Issue a warning if the path didn't already start with /api/v4 root_url = '%s://%s/api/v4' % (parsed_url.scheme, parsed_url.netloc) if warn and not parsed_url.path.startswith('/api/v4'): logger.warning('Using %s as the root url' % root_url) return root_url
When sent in an authorized REST request only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON.
def jsonify_parameters(params): """ When sent in an authorized REST request, only strings and integers can be transmitted accurately. Other types of data need to be encoded into JSON. """ result = {} for param, value in params.items(): if isinstance(value, types_not_to_encode): result[param] = value else: result[param] = json.dumps(value) return result
Returns an object that makes requests to the API authenticated with the provided username/ password at URLs beginning with url.
def connect(cls, url=None, username=None, password=None, token=None, token_file=None): """ Returns an object that makes requests to the API, authenticated with the provided username/password, at URLs beginning with `url`. You can leave out the URL and get your 'default URL', a base path that is probably appropriate for creating projects on your account: client = LuminosoClient.connect(username=username) If the URL is simply a path, omitting the scheme and domain, then it will default to https://analytics.luminoso.com/api/v4/, which is probably what you want: client = LuminosoClient.connect('/projects/public', username=username) If you leave out the username, it will use your system username, which is convenient if it matches your Luminoso username: client = LuminosoClient.connect() """ auto_account = False if url is None: auto_account = True url = '/' if url.startswith('http'): root_url = get_root_url(url) else: url = URL_BASE + '/' + url.lstrip('/') root_url = URL_BASE auth = cls._get_token_auth(username, password, token, token_file, root_url) session = requests.session() session.auth = auth client = cls(session, url) if auto_account: client = client.change_path('/projects/%s' % client._get_default_account()) return client
Obtain the user s long - lived API token and save it in a local file. If the user has no long - lived API token one will be created. Returns the token that was saved.
def save_token(self, token_file=None): """ Obtain the user's long-lived API token and save it in a local file. If the user has no long-lived API token, one will be created. Returns the token that was saved. """ tokens = self._json_request('get', self.root_url + '/user/tokens/') long_lived = [token['type'] == 'long_lived' for token in tokens] if any(long_lived): dic = tokens[long_lived.index(True)] else: # User doesn't have a long-lived token, so create one dic = self._json_request('post', self.root_url + '/user/tokens/') token = dic['token'] token_file = token_file or get_token_filename() if os.path.exists(token_file): saved_tokens = json.load(open(token_file)) else: saved_tokens = {} saved_tokens[urlparse(self.root_url).netloc] = token directory, filename = os.path.split(token_file) if directory and not os.path.exists(directory): os.makedirs(directory) with open(token_file, 'w') as f: json.dump(saved_tokens, f) return token
Make a request of the specified type and expect a JSON object in response.
def _json_request(self, req_type, url, **kwargs): """ Make a request of the specified type and expect a JSON object in response. If the result has an 'error' value, raise a LuminosoAPIError with its contents. Otherwise, return the contents of the 'result' value. """ response = self._request(req_type, url, **kwargs) try: json_response = response.json() except ValueError: logger.error("Received response with no JSON: %s %s" % (response, response.content)) raise LuminosoError('Response body contained no JSON. ' 'Perhaps you meant to use get_raw?') if json_response.get('error'): raise LuminosoAPIError(json_response.get('error')) return json_response['result']
Make a POST request to the given path and return the JSON - decoded result.
def post(self, path='', **params): """ Make a POST request to the given path, and return the JSON-decoded result. Keyword parameters will be converted to form values, sent in the body of the POST. POST requests are requests that cause a change on the server, especially those that ask to create and return an object of some kind. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, data=params)
Make a POST request to the given path with data in its body. Return the JSON - decoded result.
def post_data(self, path, data, content_type, **params): """ Make a POST request to the given path, with `data` in its body. Return the JSON-decoded result. The content_type must be set to reflect the kind of data being sent, which is often `application/json`. Keyword parameters will be converted to URL parameters. This is unlike other POST requests which encode those parameters in the body, because the body is already being used. This is used by the Luminoso API to upload new documents in JSON format. """ params = jsonify_parameters(params) url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._json_request('post', url, params=params, data=data, headers={'Content-Type': content_type} )
Return a new LuminosoClient for a subpath of this one.
def change_path(self, path): """ Return a new LuminosoClient for a subpath of this one. For example, you might want to start with a LuminosoClient for `https://analytics.luminoso.com/api/v4/`, then get a new one for `https://analytics.luminoso.com/api/v4/projects/myaccount/myprojectid`. You accomplish that with the following call: newclient = client.change_path('projects/myaccount/myproject_id') If you start the path with `/`, it will start from the root_url instead of the current url: project_area = newclient.change_path('/projects/myaccount') The advantage of using `.change_path` is that you will not need to re-authenticate like you would if you ran `.connect` again. You can use `.change_path` to split off as many sub-clients as you want, and you don't have to stop using the old one just because you got a new one with `.change_path`. """ if path.startswith('/'): url = self.root_url + path else: url = self.url + path return self.__class__(self.session, url)
Get the ID of an account you can use to access projects.
def _get_default_account(self): """ Get the ID of an account you can use to access projects. """ newclient = self.__class__(self.session, self.root_url) account_info = newclient.get('/accounts/') if account_info['default_account'] is not None: return account_info['default_account'] valid_accounts = [a['account_id'] for a in account_info['accounts'] if a['account_id'] != 'public'] if len(valid_accounts) == 0: raise ValueError("Can't determine your default URL. " "Please request a specific URL or ask " "Luminoso for support.") return valid_accounts[0]
Get the documentation that the server sends for the API.
def documentation(self): """ Get the documentation that the server sends for the API. """ newclient = self.__class__(self.session, self.root_url) return newclient.get_raw('/')
A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to which will look like ROOT_URL/ projects/ myaccount/ project_id/ docs.
def upload(self, path, docs, **params): """ A convenience method for uploading a set of dictionaries representing documents. You still need to specify the URL to upload to, which will look like ROOT_URL/projects/myaccount/project_id/docs. """ json_data = json.dumps(list(docs)) return self.post_data(path, json_data, 'application/json', **params)
Wait for an asynchronous task to finish.
def wait_for(self, job_id, base_path=None, interval=5): """ Wait for an asynchronous task to finish. Unlike the thin methods elsewhere on this object, this one is actually specific to how the Luminoso API works. This will poll an API endpoint to find out the status of the job numbered `job_id`, repeating every 5 seconds (by default) until the job is done. When the job is done, it will return an object representing the result of that job. In the Luminoso API, requests that may take a long time return a job ID instead of a result, so that your code can continue running in the meantime. When it needs the job to be done to proceed, it can use this method to wait. The base URL where it looks for that job is by default `jobs/id/` under the current URL, assuming that this LuminosoClient's URL represents a project. You can specify a different URL by changing `base_path`. If the job failed, will raise a LuminosoError with the job status as its message. """ if base_path is None: base_path = 'jobs/id' path = '%s%d' % (ensure_trailing_slash(base_path), job_id) start = time.time() next_log = 0 while True: response = self.get(path) if response['stop_time']: if response['success']: return response else: raise LuminosoError(response) elapsed = time.time() - start if elapsed > next_log: logger.info('Still waiting (%d seconds elapsed).', next_log) next_log += 120 time.sleep(interval)
Get the raw text of a response.
def get_raw(self, path, **params): """ Get the raw text of a response. This is only generally useful for specific URLs, such as documentation. """ url = ensure_trailing_slash(self.url + path.lstrip('/')) return self._request('get', url, params=params).text
Print a JSON list of JSON objects in CSV format.
def _print_csv(result): """Print a JSON list of JSON objects in CSV format.""" if type(result) is not list: raise TypeError("output not able to be displayed as CSV.") first_line = result[0] w = csv.DictWriter(sys.stdout, fieldnames=sorted(first_line.keys())) w.writeheader() for line in result: w.writerow(line)
Read parameters from input file - j and - p arguments in that order.
def _read_params(input_file, json_body, p_params): """Read parameters from input file, -j, and -p arguments, in that order.""" params = {} try: if input_file: params.update(json.load(input_file)) if json_body is not None: params.update(json.loads(json_body)) except ValueError as e: raise ValueError("input is not valid JSON: %s" % e) try: params.update({p.split('=', 1)[0]: p.split('=', 1)[1] for p in p_params}) except IndexError: raise ValueError("--param arguments must have key=value format") return params
Take an iterator and yield its contents in groups of size items.
def _batches(iterable, size): """ Take an iterator and yield its contents in groups of `size` items. """ sourceiter = iter(iterable) while True: try: batchiter = islice(sourceiter, size) yield chain([next(batchiter)], batchiter) except StopIteration: return
Limit a document to just the three fields we should upload.
def _simplify_doc(doc): """ Limit a document to just the three fields we should upload. """ # Mutate a copy of the document to fill in missing fields doc = dict(doc) if 'text' not in doc: raise ValueError("The document {!r} has no text field".format(doc)) return { 'text': doc['text'], 'metadata': doc.get('metadata', []), 'title': doc.get('title', '') }
Given an iterator of documents upload them as a Luminoso project.
def create_project_with_docs( client, docs, language, name, account=None, progress=False ): """ Given an iterator of documents, upload them as a Luminoso project. """ description = 'Uploaded using lumi-upload at {}'.format(time.asctime()) if account is not None: proj_record = client.post( 'projects', name=name, language=language, description=description, account_id=account, ) else: proj_record = client.post( 'projects', name=name, language=language, description=description ) proj_id = proj_record['project_id'] proj_client = client.client_for_path('projects/' + proj_id) try: if progress: progress_bar = tqdm(desc='Uploading documents') else: progress_bar = None for batch in _batches(docs, BATCH_SIZE): docs_to_upload = [_simplify_doc(doc) for doc in batch] proj_client.post('upload', docs=docs_to_upload) if progress: progress_bar.update(BATCH_SIZE) finally: if progress: progress_bar.close() print('The server is building project {!r}.'.format(proj_id)) proj_client.post('build') while True: time.sleep(10) proj_status = proj_client.get() build_info = proj_status['last_build_info'] if 'success' in build_info: if not build_info['success']: raise LuminosoServerError(build_info['reason']) return proj_status
Given a LuminosoClient pointing to the root of the API and a filename to read JSON lines from create a project from the documents in that file.
def upload_docs( client, input_filename, language, name, account=None, progress=False ): """ Given a LuminosoClient pointing to the root of the API, and a filename to read JSON lines from, create a project from the documents in that file. """ docs = iterate_json_lines(input_filename) return create_project_with_docs( client, docs, language, name, account, progress=progress )
Handle arguments for the lumi - upload command.
def _main(argv): """ Handle arguments for the 'lumi-upload' command. """ parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '-b', '--base-url', default=URL_BASE, help='API root url, default: %s' % URL_BASE, ) parser.add_argument( '-a', '--account-id', default=None, help='Account ID that should own the project, if not the default', ) parser.add_argument( '-l', '--language', default='en', help='The language code for the language the text is in. Default: en', ) parser.add_argument('-t', '--token', help="API authentication token") parser.add_argument( '-s', '--save-token', action='store_true', help='save --token for --base-url to ~/.luminoso/tokens.json', ) parser.add_argument( 'input_filename', help='The JSON-lines (.jsons) file of documents to upload', ) parser.add_argument( 'project_name', nargs='?', default=None, help='What the project should be called', ) args = parser.parse_args(argv) if args.save_token: if not args.token: raise ValueError("error: no token provided") LuminosoClient.save_token(args.token, domain=urlparse(args.base_url).netloc) client = LuminosoClient.connect(url=args.base_url, token=args.token) name = args.project_name if name is None: name = input('Enter a name for the project: ') if not name: print('Aborting because no name was provided.') return result = upload_docs( client, args.input_filename, args.language, name, account=args.account_id, progress=True, ) print( 'Project {!r} created with {} documents'.format( result['project_id'], result['document_count'] ) )
Take an iterator and yield its contents in groups of size items.
def batches(iterable, size): """ Take an iterator and yield its contents in groups of `size` items. """ sourceiter = iter(iterable) while True: batchiter = islice(sourceiter, size) yield chain([next(batchiter)], batchiter)
Given a file - like object containing a JSON stream upload it to Luminoso with the given account name and project name.
def upload_stream(stream, server, account, projname, language=None, username=None, password=None, append=False, stage=False): """ Given a file-like object containing a JSON stream, upload it to Luminoso with the given account name and project name. """ client = LuminosoClient.connect(server, username=username, password=password) if not append: # If we're not appending to an existing project, create new project. info = client.post('/projects/' + account, name=projname) project_id = info['project_id'] print('New project ID:', project_id) else: projects = client.get('/projects/' + account, name=projname) if len(projects) == 0: print('No such project exists!') return if len(projects) > 1: print('Warning: Multiple projects with name "%s". ' % projname, end='') project_id = projects[0]['project_id'] print('Using existing project with id %s.' % project_id) project = client.change_path('/projects/' + account + '/' + project_id) counter = 0 for batch in batches(stream, 1000): counter += 1 documents = list(batch) project.upload('docs', documents) print('Uploaded batch #%d' % (counter)) if not stage: # Calculate the docs into the assoc space. print('Calculating.') kwargs = {} if language is not None: kwargs = {'language': language} job_id = project.post('docs/recalculate', **kwargs) project.wait_for(job_id)
Upload a file to Luminoso with the given account and project name.
def upload_file(filename, server, account, projname, language=None, username=None, password=None, append=False, stage=False, date_format=None): """ Upload a file to Luminoso with the given account and project name. Given a file containing JSON, JSON stream, or CSV data, this verifies that we can successfully convert it to a JSON stream, then uploads that JSON stream. """ stream = transcode_to_stream(filename, date_format) upload_stream(stream_json_lines(stream), server, account, projname, language=language, username=username, password=password, append=append, stage=stage)
Handle command line arguments to upload a file to a Luminoso project as a script.
def main(): """ Handle command line arguments, to upload a file to a Luminoso project as a script. """ import argparse parser = argparse.ArgumentParser() parser.add_argument('filename') parser.add_argument('account') parser.add_argument('project_name') parser.add_argument( '--append', help=("If append flag is used, upload documents to existing project, " "rather than creating a new project."), action="store_true" ) parser.add_argument( '-s', '--stage', help="If stage flag is used, just upload docs, don't recalculate.", action="store_true" ) parser.add_argument( '-a', '--api-url', help="Specify an alternate API url", default=URL_BASE ) parser.add_argument( '-l', '--language', help=("Two-letter language code to use when recalculating (e.g. 'en' " "or 'ja')") ) parser.add_argument( '-u', '--username', default=None, help="username (defaults to your username on your computer)" ) parser.add_argument( '-p', '--password', default=None, help="password (you can leave this out and type it in later)" ) parser.add_argument( '-d', '--date-format', default='iso', help=("format string for parsing dates, following " "http://strftime.org/. Default is 'iso', which is " "'%%Y-%%m-%%dT%%H:%%M:%%S+00:00'. Other shortcuts are 'epoch' " "for epoch time or 'us-standard' for '%%m/%%d/%%y'") ) args = parser.parse_args() # Implement some human-understandable shortcuts for date_format date_format_lower = args.date_format.lower() if date_format_lower == 'iso': date_format = '%Y-%m-%dT%H:%M:%S+00:00' elif date_format_lower in ['unix', 'epoch']: date_format = 'epoch' elif date_format_lower == 'us-standard': date_format = '%m/%d/%y' else: date_format = args.date_format upload_file(args.filename, args.api_url, args.account, args.project_name, language=args.language, username=args.username, password=args.password, append=args.append, stage=args.stage, date_format=date_format)
Obtain a short - lived token using a username and password and use that token to create an auth object.
def from_user_creds(cls, username, password, url=URL_BASE): """ Obtain a short-lived token using a username and password, and use that token to create an auth object. """ session = requests.session() token_resp = session.post(url.rstrip('/') + '/user/login/', data={'username': username, 'password': password}) if token_resp.status_code != 200: error = token_resp.text try: error = json.loads(error)['error'] except (KeyError, ValueError): pass raise LuminosoLoginError(error) return cls(token_resp.json()['result']['token'])
Set http session.
def login(self): """Set http session.""" if self._session is None: self._session = requests.session() # adding fake user-agent header self._session.headers.update({'User-agent': str(UserAgent().random)}) return self._post_login_page()
Login to enedis.
def _post_login_page(self): """Login to enedis.""" data = { 'IDToken1': self.username, 'IDToken2': self.password, 'SunQueryParamsString': base64.b64encode(b'realm=particuliers'), 'encoded': 'true', 'gx_charset': 'UTF-8' } try: self._session.post(LOGIN_URL, data=data, allow_redirects=False, timeout=self._timeout) except OSError: raise PyLinkyError("Can not submit login form") if 'iPlanetDirectoryPro' not in self._session.cookies: raise PyLinkyError("Login error: Please check your username/password.") return True
Get data.
def _get_data(self, p_p_resource_id, start_date=None, end_date=None): """Get data.""" data = { '_' + REQ_PART + '_dateDebut': start_date, '_' + REQ_PART + '_dateFin': end_date } params = { 'p_p_id': REQ_PART, 'p_p_lifecycle': 2, 'p_p_state': 'normal', 'p_p_mode': 'view', 'p_p_resource_id': p_p_resource_id, 'p_p_cacheability': 'cacheLevelPage', 'p_p_col_id': 'column-1', 'p_p_col_pos': 1, 'p_p_col_count': 3 } try: raw_res = self._session.post(DATA_URL, data=data, params=params, allow_redirects=False, timeout=self._timeout) if 300 <= raw_res.status_code < 400: raw_res = self._session.post(DATA_URL, data=data, params=params, allow_redirects=False, timeout=self._timeout) except OSError as e: raise PyLinkyError("Could not access enedis.fr: " + str(e)) if raw_res.text is "": raise PyLinkyError("No data") if 302 == raw_res.status_code and "/messages/maintenance.html" in raw_res.text: raise PyLinkyError("Site in maintenance") try: json_output = raw_res.json() except (OSError, json.decoder.JSONDecodeError, simplejson.errors.JSONDecodeError) as e: raise PyLinkyError("Impossible to decode response: " + str(e) + "\nResponse was: " + str(raw_res.text)) if json_output.get('etat').get('valeur') == 'erreur': raise PyLinkyError("Enedis.fr answered with an error: " + str(json_output)) return json_output.get('graphe')
Get the latest data from Enedis.
def fetch_data(self): """Get the latest data from Enedis.""" for t in [HOURLY, DAILY, MONTHLY, YEARLY]: self._data[t] = self.get_data_per_period(t)
Main function
def main(): """Main function""" parser = argparse.ArgumentParser() parser.add_argument('-u', '--username', required=True, help='enedis username') parser.add_argument('-p', '--password', required=True, help='Password') args = parser.parse_args() client = LinkyClient(args.username, args.password) try: client.login() client.fetch_data() except BaseException as exp: print(exp) return 1 finally: client.close_session() print(json.dumps(client.get_data(), indent=2))
Load the view on first load
def prepare(self): """ Load the view on first load """ if self.__class__.view: return #: Load the View class from the dotted view name with enaml.imports(): View = pydoc.locate(self.page.view) assert View, "Failed to import View: {}".format(self.page.view) #: Set initial view properties self.__class__.view = View( site=self.site, page=self.page, request=self.request, )
Load the view on first load could also load based on session group etc..
def initialize(self): """ Load the view on first load could also load based on session, group, etc.. """ if self.__class__.view: self.view.handler = self self.view.request = self.request return #: Load the View class from the dotted view name with enaml.imports(): from views.index import View #: Set initial view properties self.__class__.view = View( company=current_company, request=self.request, handler=self, )
Execute the correct handler depending on what is connecting.
def get(self, *args, **kwargs): #: Render view for get request, view is cached for websocket """ Execute the correct handler depending on what is connecting. """ if self.is_websocket(): return super(DemoHandler, self).get(*args, **kwargs) else: #return tornado.web.RequestHandler.get(self, *args, **kwargs) self.write(self.view.render())
When enaml. js sends a message
def on_message(self, message): """ When enaml.js sends a message """ #: Decode message change = tornado.escape.json_decode(message) #print change #: Get the owner ID ref = change.get('ref') if not ref: return #: Get the server side representation of the node #: If found will return the View declaration node node = self.view.xpath('//*[@ref="{}"]'.format(ref), first=True) if node is None: return #: Handle the event if change.get('type') and change.get('name'): if change['type'] == 'event': #: Trigger the event trigger = getattr(node, change['name']) trigger() if change['type'] == 'update': #: Trigger the update setattr(node, change['name'], change['value'])
When pages change update the menus
def _update_menus(self,change): """ When pages change, update the menus""" menus = {} #: Get all links links = [p.link for p in self.pages if p.link] + self.links #: Put all links in the correct menu for link in links: for menu in link.menus: if menu not in menus: menus[menu] = [] menus[menu].append(link) #: Update the menus for name,menu in menus.items(): k = '{}_menu'.format(name) if hasattr(self,k): setattr(self,k,menu)
Generate the handlers for this site
def _default_handlers(self): """ Generate the handlers for this site """ static_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"static")) urls = [ (r"/static/(.*)", cyclone.web.StaticFileHandler, {"path": static_path}), ] for p in self.pages: handler = p.handler handler.site = self handler.page = p urls.append((p.link.url,handler)) return urls
When we get an event from js lookup the node and invoke the action on the enaml node.
def on_message(self, message): """ When we get an event from js, lookup the node and invoke the action on the enaml node. """ change = json.loads(message) log.debug(f'Update from js: {change}') # Lookup the node ref = change.get('ref') if not ref: return nodes = self.viewer.xpath('//*[@ref=$ref]', ref=ref) if not nodes: return # Unknown node node = nodes[0] # Trigger the change on the enaml node if change.get('type') and change.get('name'): if change['type'] == 'event': trigger = getattr(node, change['name']) trigger() elif change['type'] == 'update': # Trigger the update setattr(node, change['name'], change['value']) else: log.warning(f"Unhandled event {self} {node}: {change}")
When an event from enaml occurs send it out the websocket so the client s browser can update accordingly.
def on_dom_modified(self, change): """ When an event from enaml occurs, send it out the websocket so the client's browser can update accordingly. """ log.debug(f'Update from enaml: {change}') self.write_message(json.dumps(change['value']))
Create the toolkit widget for the proxy object.
def create_widget(self): """ Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute. """ self.widget = SubElement(self.parent_widget(), self.declaration.tag)
Initialize the state of the toolkit widget.
def init_widget(self): """ Initialize the state of the toolkit widget. This method is called during the top-down pass, just after the 'create_widget()' method is called. This method should init the state of the widget. The child widgets will not yet be created. """ widget = self.widget d = self.declaration #: Save ref id ref = d.ref CACHE[ref] = atomref(self) widget.set('ref', ref) if d.text: self.set_text(d.text) if d.tail: self.set_tail(d.tail) if d.style: self.set_style(d.style) if d.cls: self.set_cls(d.cls) if d.attrs: self.set_attrs(d.attrs) if d.id: widget.set('id', d.id) if d.draggable: self.set_draggable(d.draggable) # Set any attributes that may be defined for name, member in d.members().items(): if not member.metadata: continue meta = member.metadata # Exclude any attr tags if not (meta.get('d_member') and meta.get('d_final')): continue # Skip any items with attr=false elif not meta.get('attr', True): continue elif isinstance(member, Event): continue value = getattr(d, name) if value: self.set_attribute(name, value)
A reimplemented destructor.
def destroy(self): """ A reimplemented destructor. This destructor will clear the reference to the toolkit widget and set its parent to None. """ widget = self.widget if widget is not None: parent = widget.getparent() if parent is not None: parent.remove(widget) del self.widget d = self.declaration try: del CACHE[d.ref] except KeyError: pass super(WebComponent, self).destroy()
Handle the child added event from the declaration.
def child_added(self, child): """ Handle the child added event from the declaration. This handler will insert the child toolkit widget in the correct. position. Subclasses which need more control should reimplement this method. """ super(WebComponent, self).child_added(child) if child.widget is not None: # Use insert to put in the correct spot for i, c in enumerate(self.children()): if c == child: self.widget.insert(i, child.widget) break
Handle the child removed event from the declaration.
def child_removed(self, child): """ Handle the child removed event from the declaration. This handler will unparent the child toolkit widget. Subclasses which need more control should reimplement this method. """ super(WebComponent, self).child_removed(child) if child.widget is not None: for i, c in enumerate(self.children()): if c == child: del self.widget[i] break
Get the node ( s ) matching the query
def find(self, query, **kwargs): """ Get the node(s) matching the query""" nodes = self.widget.xpath(query, **kwargs) if not nodes: return [] matches = [] for node in nodes: aref = CACHE.get(node.attrib.get('ref')) obj = aref() if aref else None if obj is None: continue matches.append(obj) return matches
Get the child toolkit widgets for this object.
def child_widgets(self): """ Get the child toolkit widgets for this object. Returns ------- result : iterable of QObject The child widgets defined for this object. """ for child in self.children(): w = child.widget if w is not None: yield w
Default handler for those not explicitly defined
def set_attribute(self, name, value): """ Default handler for those not explicitly defined """ if value is True: self.widget.set(name, name) elif value is False: del self.widget.attrib[name] else: self.widget.set(name, str(value))
Update the proxy widget when the Widget data changes.
def _update_proxy(self, change): """ Update the proxy widget when the Widget data changes. """ #: Try default handler if change['type'] == 'update' and self.proxy_is_active: handler = getattr(self.proxy, 'set_' + change['name'], None) if handler is not None: handler(change['value']) else: self.proxy.set_attribute(change['name'], change['value']) self._notify_modified(change)
If a change occurs when we have a websocket connection active notify the websocket client of the change.
def _notify_modified(self, change): """ If a change occurs when we have a websocket connection active notify the websocket client of the change. """ root = self.root_object() if isinstance(root, Html): name = change['name'] change = { 'ref': self.ref, 'type': change['type'], 'name': change['name'], 'value': change['value'] } root.modified(change)
Find nodes matching the given xpath query
def xpath(self, query, **kwargs): """ Find nodes matching the given xpath query """ nodes = self.proxy.find(query, **kwargs) return [n.declaration for n in nodes]
Prepare for rendering
def prepare(self, **kwargs): """ Prepare for rendering """ for k, v in kwargs.items(): setattr(self, k, v) if not self.is_initialized: self.initialize() if not self.proxy_is_active: self.activate_proxy()
Initialize the widget with the source.
def init_widget(self): """ Initialize the widget with the source. """ d = self.declaration if d.source: self.set_source(d.source) else: super(RawComponent, self).init_widget()
Set the source by parsing the source and inserting it into the component.
def set_source(self, source): """ Set the source by parsing the source and inserting it into the component. """ self.widget.clear() html = etree.HTML(source) self.widget.extend(html[0]) # Clear removes everything so it must be reinitialized super(RawComponent, self).init_widget()
If the mode changes. Refresh the items.
def _observe_mode(self, change): """ If the mode changes. Refresh the items. """ block = self.block if block and self.is_initialized and change['type'] == 'update': if change['oldvalue'] == 'replace': raise NotImplementedError for c in self.children: block.children.remove(c) c.set_parent(None) self.refresh_items()
A change handler for the objects list of the Include.
def _observe_block(self, change): """ A change handler for the 'objects' list of the Include. If the object is initialized objects which are removed will be unparented and objects which are added will be reparented. Old objects will be destroyed if the 'destroy_old' flag is True. """ if self.is_initialized and change['type'] == 'update': old_block = change['oldvalue'] for c in self.children: old_block.children.remove(c) c.set_parent(None) self.refresh_items()
When the children of the block change. Update the referenced block.
def _observe__children(self, change): """ When the children of the block change. Update the referenced block. """ if not self.is_initialized or change['type'] != 'update': return block = self.block new_children = change['value'] old_children = change['oldvalue'] for c in old_children: if c not in new_children and not c.is_destroyed: c.destroy() else: c.set_parent(None) if block: # This block is inserting into another block before = None if self.mode == 'replace': block.children = [] if self.mode == 'prepend' and block.children: before = block.children[0] block.insert_children(before, new_children) else: # This block is a placeholder self.parent.insert_children(self, new_children)
Read the contents of a file located relative to setup. py
def read(*pathcomponents): """Read the contents of a file located relative to setup.py""" with open(join(abspath(dirname(__file__)), *pathcomponents)) as thefile: return thefile.read()
Print the dict returned by a MongoDB Query in the standard output.
def print_obj(obj, verbose, metadata, mongo_version): """ Print the dict returned by a MongoDB Query in the standard output. """ if verbose: sys.stdout.write(json_encoder.encode(obj) + '\n') sys.stdout.flush() else: try: ts_time = obj['ts'] operation = obj['op'] doc = None if operation == 'query': if mongo_version < "3.2": doc = obj['ns'].split(".")[-1] query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: if "query" in obj: cmd = obj['query'] # Mongo 3.2 - 3.4 else: cmd = obj['command'] # Mongo 3.6+ doc = cmd['find'] query = json_encoder.encode(cmd['filter']) if 'filter' in cmd else "{}" if 'sort' in cmd: query += ', sort: ' + json_encoder.encode(cmd['sort']) query += '. %s returned.' % obj['nreturned'] elif operation == 'update': doc = obj['ns'].split(".")[-1] if mongo_version < "3.6": query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" query += ', ' + json_encoder.encode(obj['updateobj']) else: query = json_encoder.encode(obj['command']['q']) if 'command' in obj and 'q' in obj['command'] else "{}" query += ', ' + json_encoder.encode(obj['command']['u']) if 'nModified' in obj: query += '. %s updated.' % obj['nModified'] elif 'nMatched' in obj: query += '. %s updated.' % obj['nMatched'] elif operation == 'insert': if mongo_version < "3.2": doc = obj['ns'].split(".")[-1] query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: if 'query' in obj: doc = obj['query']['insert'] if 'documents' in obj['query']: if isinstance(obj['query']['documents'], collections.Iterable) \ and len(obj['query']['documents']) > 1: query = json_encoder.encode(obj['query']['documents']) + ". " else: query = json_encoder.encode(obj['query']['documents'][0]) + ". " else: query = "" else: # Mongo 3.6+ profiler looks like doens't record insert details (document object), and # some tools like Robo 3T (formerly Robomongo) allows to duplicate collections # but the profiler doesn't record the element inserted doc = obj['ns'].split(".")[-1] query = "" query += '%s inserted.' % obj['ninserted'] elif operation == 'remove': doc = obj['ns'].split(".")[-1] if mongo_version < "3.6": query = json_encoder.encode(obj['query']) if 'query' in obj else "{}" else: query = json_encoder.encode(obj['command']['q']) if 'command' in obj and 'q' in obj['command'] else "{}" query += '. %s deleted.' % obj['ndeleted'] elif operation == "command": if 'count' in obj["command"]: operation = "count" query = json_encoder.encode(obj['command']['query']) elif 'aggregate' in obj["command"]: operation = "aggregate" query = json_encoder.encode(obj['command']['pipeline']) elif 'distinct' in obj["command"]: operation = "distinct" query = json_encoder.encode(obj['command']['query']) query = '"%s", %s' % (obj['command']['key'], query) elif 'drop' in obj["command"]: operation = "drop" query = "" elif 'findandmodify' in obj["command"]: operation = "findandmodify" query = "query: " + json_encoder.encode(obj['command']['query']) if 'sort' in obj["command"]: query += ", sort: " + json_encoder.encode(obj['command']['sort']) if 'update' in obj["command"]: query += ", update: " + json_encoder.encode(obj['command']['update']) if 'remove' in obj["command"]: query += ", remove: " + str(obj['command']['remove']).lower() if 'fields' in obj["command"]: query += ", fields: " + json_encoder.encode(obj['command']['fields']) if 'upsert' in obj["command"]: query += ", upsert: " + str(obj['command']['upsert']).lower() if 'new' in obj["command"]: query += ", new: " + str(obj['command']['new']).lower() elif 'group' in obj["command"]: operation = "group" doc = obj["command"]['group']["ns"] if 'key' in obj['command']['group']: key = "key: " + json_encoder.encode(obj['command']['group']['key']) else: key = None if 'initial' in obj['command']['group']: initial = "initial: " + json_encoder.encode(obj['command']['group']['initial']) else: initial = None if 'cond' in obj['command']['group']: cond = "cond: " + json_encoder.encode(obj['command']['group']['cond']) else: cond = None if '$keyf' in obj['command']['group']: key_function = "keyf: " + min_script(obj['command']['group']['$keyf']) else: key_function = None if '$reduce' in obj['command']['group']: reduce_func = "reduce: " + min_script(obj['command']['group']['$reduce']) else: reduce_func = None if 'finalize' in obj['command']['group']: finalize_func = "finalize: " + min_script(obj['command']['group']['finalize']) else: finalize_func = None query = ", ".join(list(filter(lambda x: x, (key, reduce_func, initial, key_function, cond, finalize_func)))) elif 'map' in obj["command"]: operation = "map" doc = obj["command"]["mapreduce"] del obj["command"]["mapreduce"] map_func = min_script(obj['command']["map"]) del obj['command']["map"] reduce_func = min_script(obj['command']["reduce"]) del obj['command']["reduce"] query = "{%s, %s, %s}" % (map_func, reduce_func, json_encoder.encode(obj['command'])) else: warn('Unknown command operation\nDump: %s' % json_encoder.encode(obj)) if not doc: doc = obj["command"][operation] else: warn('Unknown operation "%s"\nDump: %s' % (operation, json_encoder.encode(obj))) if metadata: met = [] for m in metadata: if m in obj and obj[m] != {}: q = m + ": " if isinstance(obj[m], str): q += '"%s"' % obj[m] elif isinstance(obj[m], dict): q += json_encoder.encode(obj[m]) else: q += str(obj[m]) met.append(q) if met: if not query.endswith("."): query += ". " if not query.endswith(" "): query += " " query += ", ".join(met) sys.stdout.write("%s %s [%s] : %s\n" % (ts_time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3], operation.upper().ljust(9), doc, query)) sys.stdout.flush() # Allows pipe the output during the execution with others tools like 'grep' except (KeyError, TypeError): warn('Unknown registry\nDump: %s' % json_encoder.encode(obj))
Connect with address and return a tuple with a: class: ~pymongo. MongoClient and a: class: ~pymongo. database. Database object.: param address: a string representation with the db address: param args: connection arguments: - username: username for authentication ( optional ) - password: password for authentication. If username is given and password isn t it s asked from tty. - auth_database: authenticate the username and password against that database ( optional ). If not specified the database specified in address will be used. - ssl ssl_certfile ssl_keyfile ssl_cert_reqs ssl_ca_certs: SSL authentication options: return: a tuple with ( client db )
def connect(address, args): """ Connect with `address`, and return a tuple with a :class:`~pymongo.MongoClient`, and a :class:`~pymongo.database.Database` object. :param address: a string representation with the db address :param args: connection arguments: - username: username for authentication (optional) - password: password for authentication. If username is given and password isn't, it's asked from tty. - auth_database: authenticate the username and password against that database (optional). If not specified, the database specified in address will be used. - ssl, ssl_certfile, ssl_keyfile, ssl_cert_reqs, ssl_ca_certs: SSL authentication options :return: a tuple with ``(client, db)`` """ try: host, port, dbname = get_res_address(address) except AddressError as e: error_parsing(str(e).replace("resource", "database")) try: options = {} if args.ssl: options["ssl"] = True options["ssl_certfile"] = args.ssl_cert_file options["ssl_keyfile"] = args.ssl_key_file options["ssl_cert_reqs"] = args.ssl_cert_reqs options["ssl_ca_certs"] = args.ssl_ca_certs client = MongoClient(host=host, port=port, **options) except Exception as e: error("Error trying to connect: %s" % str(e), ECONNREFUSED) username = args.username password = args.password auth_database = args.auth_database if username: if password is None: password = getpass.getpass() if auth_database is None: auth_database = dbname try: auth_db = client[auth_database] auth_db.authenticate(username, password) except Exception as e: error("Error trying to authenticate: %s" % str(e), -3) db = client[dbname] return client, db
Print msg error and exit with status exit_code
def error(msg, exit_code): """ Print `msg` error and exit with status `exit_code` """ sys.stderr.write("%s\ntry 'mongotail --help' for more information\n" % msg) sys.stderr.flush() exit(exit_code)