INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
r Convert Result to dict.
def asdict(self, rawkey=False): r"""Convert Result to dict. Parameters: rawkey(bool): * True: dict key is Descriptor instance * False: dict key is str Returns: dict """ if rawkey: return dict(self.items()) else: return { str(k): v for k, v in self.items() }
r Access descriptor value by descriptor name or instance.
def name(self): r"""Access descriptor value by descriptor name or instance. >>> from mordred import Calculator, descriptors >>> from rdkit import Chem >>> result = Calculator(descriptors)(Chem.MolFromSmiles("C1CCCCC1")) >>> result.name["C2SP3"] 6 """ if self._name_to_value is None: self._name_to_value = {str(d): v for d, v in zip(self._descriptors, self._values)} return GetValueByName(self._name_to_value)
Decorator to log function calls.
def log_calls(func): '''Decorator to log function calls.''' def wrapper(*args, **kargs): callStr = "%s(%s)" % (func.__name__, ", ".join([repr(p) for p in args] + ["%s=%s" % (k, repr(v)) for (k, v) in list(kargs.items())])) debug(">> %s", callStr) ret = func(*args, **kargs) debug("<< %s: %s", callStr, repr(ret)) return ret return wrapper
Decorator to synchronize function.
def synchronized(func): '''Decorator to synchronize function.''' func.__lock__ = threading.Lock() def synced_func(*args, **kargs): with func.__lock__: return func(*args, **kargs) return synced_func
Show current progress message to stderr. This function will remember the previous message so that next time it will clear the previous message before showing next one.
def progress(msg, *args): '''Show current progress message to stderr. This function will remember the previous message so that next time, it will clear the previous message before showing next one. ''' # Don't show any progress if the output is directed to a file. if not (sys.stdout.isatty() and sys.stderr.isatty()): return text = (msg % args) if progress.prev_message: sys.stderr.write(' ' * len(progress.prev_message) + '\r') sys.stderr.write(text + '\r') progress.prev_message = text
Program message output.
def message(msg, *args): '''Program message output.''' clear_progress() text = (msg % args) sys.stdout.write(text + '\n')
Utility function to handle runtime failures gracefully. Show concise information if possible then terminate program.
def fail(message, exc_info=None, status=1, stacktrace=False): '''Utility function to handle runtime failures gracefully. Show concise information if possible, then terminate program. ''' text = message if exc_info: text += str(exc_info) error(text) if stacktrace: error(traceback.format_exc()) clean_tempfiles() if __name__ == '__main__': sys.exit(status) else: raise RuntimeError(status)
Get a temp filename for atomic download.
def tempfile_get(target): '''Get a temp filename for atomic download.''' fn = '%s-%s.tmp' % (target, ''.join(random.Random().sample("0123456789abcdefghijklmnopqrstuvwxyz", 15))) TEMP_FILES.add(fn) return fn
Atomically rename and clean tempfile
def tempfile_set(tempfile, target): '''Atomically rename and clean tempfile''' if target: os.rename(tempfile, target) else: os.unlink(tempfile) if target in TEMP_FILES: TEMP_FILES.remove(tempfile)
Clean up temp files
def clean_tempfiles(): '''Clean up temp files''' for fn in TEMP_FILES: if os.path.exists(fn): os.unlink(fn)
Return a list of the logger methods: ( debug info warn error )
def get_loggers(self): '''Return a list of the logger methods: (debug, info, warn, error)''' return self.log.debug, self.log.info, self.log.warn, self.log.error
Get the fixed part of the path without wildcard
def get_fixed_path(self): '''Get the fixed part of the path without wildcard''' pi = self.path.split(PATH_SEP) fi = [] for p in pi: if '*' in p or '?' in p: break fi.append(p) return PATH_SEP.join(fi)
Given a API name list all legal parameters using boto3 service model.
def get_legal_params(self, method): '''Given a API name, list all legal parameters using boto3 service model.''' if method not in self.client.meta.method_to_api_mapping: # Injected methods. Ignore. return [] api = self.client.meta.method_to_api_mapping[method] shape = self.client.meta.service_model.operation_model(api).input_shape if shape is None: # No params needed for this API. return [] return shape.members.keys()
Combine existing parameters with extra options supplied from command line options. Carefully merge special type of parameter if needed.
def merge_opt_params(self, method, kargs): '''Combine existing parameters with extra options supplied from command line options. Carefully merge special type of parameter if needed. ''' for key in self.legal_params[method]: if not hasattr(self.opt, key) or getattr(self.opt, key) is None: continue if key in kargs and type(kargs[key]) == dict: assert(type(getattr(self.opt, key)) == dict) # Merge two dictionaries. for k, v in getattr(self.opt, key).iteritems(): kargs[key][k] = v else: # Overwrite values. kargs[key] = getattr(self.opt, key) return kargs
Add the whole list of API parameters into optparse.
def add_options(parser): '''Add the whole list of API parameters into optparse.''' for param, param_type, param_doc in BotoClient.EXTRA_CLIENT_PARAMS: parser.add_option('--API-' + param, help=param_doc, type=param_type, dest=param)
Override original join () with a timeout and handle keyboard interrupt.
def join(self): '''Override original join() with a timeout and handle keyboard interrupt.''' self.all_tasks_done.acquire() try: while self.unfinished_tasks: self.all_tasks_done.wait(1000) # Child thread has exceptions, fail main thread too. if self.exc_info: fail('[Thread Failure] ', exc_info=self.exc_info) except KeyboardInterrupt: raise Failure('Interrupted by user') finally: self.all_tasks_done.release()
Terminate all threads by deleting the queue and forcing the child threads to quit.
def terminate(self, exc_info=None): '''Terminate all threads by deleting the queue and forcing the child threads to quit. ''' if exc_info: self.exc_info = exc_info try: while self.get_nowait(): self.task_done() except Queue.Empty: pass
Utility function to add a single task into task queue
def add_task(self, func_name, *args, **kargs): '''Utility function to add a single task into task queue''' self.tasks.put((func_name, 0, args, kargs))
Utility function to wait all tasks to complete
def join(self): '''Utility function to wait all tasks to complete''' self.tasks.join() # Force each thread to break loop. for worker in self.workers: self.tasks.put(None) # Wait for all thread to terminate. for worker in self.workers: worker.join() worker.s3 = None
Increase the processed task counter and show progress message
def processed(self): '''Increase the processed task counter and show progress message''' self.processed_tasks += 1 qsize = self.tasks.qsize() if qsize > 0: progress('[%d task(s) completed, %d remaining, %d thread(s)]', self.processed_tasks, qsize, len(self.workers)) else: progress('[%d task(s) completed, %d thread(s)]', self.processed_tasks, len(self.workers))
Retrieve S3 access keys from the environment or None if not present.
def s3_keys_from_env(): '''Retrieve S3 access keys from the environment, or None if not present.''' env = os.environ if S3_ACCESS_KEY_NAME in env and S3_SECRET_KEY_NAME in env: keys = (env[S3_ACCESS_KEY_NAME], env[S3_SECRET_KEY_NAME]) debug("read S3 keys from environment") return keys else: return None
Retrieve S3 access keys from the command line or None if not present.
def s3_keys_from_cmdline(opt): '''Retrieve S3 access keys from the command line, or None if not present.''' if opt.access_key != None and opt.secret_key != None: keys = (opt.access_key, opt.secret_key) debug("read S3 keys from commandline") return keys else: return None
Retrieve S3 access key settings from s3cmd s config file if present ; otherwise return None.
def s3_keys_from_s3cfg(opt): '''Retrieve S3 access key settings from s3cmd's config file, if present; otherwise return None.''' try: if opt.s3cfg != None: s3cfg_path = "%s" % opt.s3cfg else: s3cfg_path = "%s/.s3cfg" % os.environ["HOME"] if not os.path.exists(s3cfg_path): return None config = ConfigParser.ConfigParser() config.read(s3cfg_path) keys = config.get("default", "access_key"), config.get("default", "secret_key") debug("read S3 keys from %s file", s3cfg_path) return keys except Exception as e: info("could not read S3 keys from %s file; skipping (%s)", s3cfg_path, e) return None
Initialize s3 access keys from environment variable or s3cfg config file.
def init_s3_keys(opt): '''Initialize s3 access keys from environment variable or s3cfg config file.''' S3Handler.S3_KEYS = S3Handler.s3_keys_from_cmdline(opt) or S3Handler.s3_keys_from_env() \ or S3Handler.s3_keys_from_s3cfg(opt)
Connect to S3 storage
def connect(self): '''Connect to S3 storage''' try: if S3Handler.S3_KEYS: self.s3 = BotoClient(self.opt, S3Handler.S3_KEYS[0], S3Handler.S3_KEYS[1]) else: self.s3 = BotoClient(self.opt) except Exception as e: raise RetryFailure('Unable to connect to s3: %s' % e)
List all buckets
def list_buckets(self): '''List all buckets''' result = [] for bucket in self.s3.list_buckets().get('Buckets') or []: result.append({ 'name': S3URL.combine('s3', bucket['Name'], ''), 'is_dir': True, 'size': 0, 'last_modified': bucket['CreationDate'] }) return result
Walk through a S3 directory. This function initiate a walk with a basedir. It also supports multiple wildcards.
def s3walk(self, basedir, show_dir=None): '''Walk through a S3 directory. This function initiate a walk with a basedir. It also supports multiple wildcards. ''' # Provide the default value from command line if no override. if not show_dir: show_dir = self.opt.show_dir # trailing slash normalization, this is for the reason that we want # ls 's3://foo/bar/' has the same result as 's3://foo/bar'. Since we # call partial_match() to check wildcards, we need to ensure the number # of slashes stays the same when we do this. if basedir[-1] == PATH_SEP: basedir = basedir[0:-1] s3url = S3URL(basedir) result = [] pool = ThreadPool(ThreadUtil, self.opt) pool.s3walk(s3url, s3url.get_fixed_path(), s3url.path, result) pool.join() # automatic directory detection if not show_dir and len(result) == 1 and result[0]['is_dir']: path = result[0]['name'] s3url = S3URL(path) result = [] pool = ThreadPool(ThreadUtil, self.opt) pool.s3walk(s3url, s3url.get_fixed_path(), s3url.path, result) pool.join() def compare(x, y): '''Comparator for ls output''' result = -cmp(x['is_dir'], y['is_dir']) if result != 0: return result return cmp(x['name'], y['name']) return sorted(result, key=cmp_to_key(compare))
Walk through local directories from root basedir
def local_walk(self, basedir): '''Walk through local directories from root basedir''' result = [] for root, dirs, files in os.walk(basedir): for f in files: result.append(os.path.join(root, f)) return result
Unix style basename. This fuction will return bar for/ foo/ bar/ instead of empty string. It is used to normalize the input trailing slash.
def get_basename(self, path): '''Unix style basename. This fuction will return 'bar' for '/foo/bar/' instead of empty string. It is used to normalize the input trailing slash. ''' if path[-1] == PATH_SEP: path = path[0:-1] return os.path.basename(path)
Expand the wildcards for an S3 path. This emulates the shall expansion for wildcards if the input is local path.
def source_expand(self, source): '''Expand the wildcards for an S3 path. This emulates the shall expansion for wildcards if the input is local path. ''' result = [] if not isinstance(source, list): source = [source] for src in source: # XXX Hacky: We need to disable recursive when we expand the input # parameters, need to pass this as an override parameter if # provided. tmp = self.opt.recursive self.opt.recursive = False result += [f['name'] for f in self.s3walk(src, True)] self.opt.recursive = tmp if (len(result) == 0) and (not self.opt.ignore_empty_source): fail("[Runtime Failure] Source doesn't exist.") return result
Upload a single file or a directory by adding a task into queue
def put_single_file(self, pool, source, target): '''Upload a single file or a directory by adding a task into queue''' if os.path.isdir(source): if self.opt.recursive: for f in (f for f in self.local_walk(source) if not os.path.isdir(f)): target_url = S3URL(target) # deal with ./ or ../ here by normalizing the path. joined_path = os.path.normpath(os.path.join(target_url.path, os.path.relpath(f, source))) pool.upload(f, S3URL.combine('s3', target_url.bucket, joined_path)) else: message('omitting directory "%s".' % source) else: pool.upload(source, target)
Upload files to S3. This function can handle multiple file upload if source is a list. Also it works for recursive mode which copy all files and keep the directory structure under the given source directory.
def put_files(self, source, target): '''Upload files to S3. This function can handle multiple file upload if source is a list. Also, it works for recursive mode which copy all files and keep the directory structure under the given source directory. ''' pool = ThreadPool(ThreadUtil, self.opt) if not isinstance(source, list): source = [source] if target[-1] == PATH_SEP: for src in source: self.put_single_file(pool, src, os.path.join(target, self.get_basename(src))) else: if len(source) == 1: self.put_single_file(pool, source[0], target) else: raise Failure('Target "%s" is not a directory (with a trailing slash).' % target) pool.join()
Use the create_bucket API to create a new bucket
def create_bucket(self, source): '''Use the create_bucket API to create a new bucket''' s3url = S3URL(source) message('Creating %s', source) if not self.opt.dry_run: resp = self.s3.create_bucket(Bucket=s3url.bucket) if resp['ResponseMetadata']["HTTPStatusCode"] == 200: message('Done.') else: raise Failure('Unable to create bucket %s' % source)
Get privileges from metadata of the source in s3 and apply them to target
def update_privilege(self, obj, target): '''Get privileges from metadata of the source in s3, and apply them to target''' if 'privilege' in obj['Metadata']: os.chmod(target, int(obj['Metadata']['privilege'], 8))
Print out a series of files
def print_files(self, source): '''Print out a series of files''' sources = self.source_expand(source) for source in sources: s3url = S3URL(source) response = self.s3.get_object(Bucket=s3url.bucket, Key=s3url.path) message('%s', response['Body'].read())
Download a single file or a directory by adding a task into queue
def get_single_file(self, pool, source, target): '''Download a single file or a directory by adding a task into queue''' if source[-1] == PATH_SEP: if self.opt.recursive: basepath = S3URL(source).path for f in (f for f in self.s3walk(source) if not f['is_dir']): pool.download(f['name'], os.path.join(target, os.path.relpath(S3URL(f['name']).path, basepath))) else: message('omitting directory "%s".' % source) else: pool.download(source, target)
Download files. This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by download all files and keep the directory structure.
def get_files(self, source, target): '''Download files. This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by download all files and keep the directory structure. ''' pool = ThreadPool(ThreadUtil, self.opt) source = self.source_expand(source) if os.path.isdir(target): for src in source: self.get_single_file(pool, src, os.path.join(target, self.get_basename(S3URL(src).path))) else: if len(source) > 1: raise Failure('Target "%s" is not a directory.' % target) # Get file if it exists on s3 otherwise do nothing elif len(source) == 1: self.get_single_file(pool, source[0], target) else: #Source expand may return empty list only if ignore-empty-source is set to true pass pool.join()
Remove remote files that are not present in the local source. ( Obsolete ) It is used for old sync command now.
def delete_removed_files(self, source, target): '''Remove remote files that are not present in the local source. (Obsolete) It is used for old sync command now. ''' message("Deleting files found in %s and not in %s", source, target) if os.path.isdir(source): unecessary = [] basepath = S3URL(target).path for f in [f for f in self.s3walk(target) if not f['is_dir']]: local_name = os.path.join(source, os.path.relpath(S3URL(f['name']).path, basepath)) if not os.path.isfile(local_name): message("%s not found locally, adding to delete queue", local_name) unecessary.append(f['name']) if len(unecessary) > 0: pool = ThreadPool(ThreadUtil, self.opt) for del_file in unecessary: pool.delete(del_file) pool.join() else: raise Failure('Source "%s" is not a directory.' % target)
Copy a single file or a directory by adding a task into queue
def cp_single_file(self, pool, source, target, delete_source): '''Copy a single file or a directory by adding a task into queue''' if source[-1] == PATH_SEP: if self.opt.recursive: basepath = S3URL(source).path for f in (f for f in self.s3walk(source) if not f['is_dir']): pool.copy(f['name'], os.path.join(target, os.path.relpath(S3URL(f['name']).path, basepath)), delete_source=delete_source) else: message('omitting directory "%s".' % source) else: pool.copy(source, target, delete_source=delete_source)
Copy files This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by copying all files and keep the directory structure.
def cp_files(self, source, target, delete_source=False): '''Copy files This function can handle multiple files if source S3 URL has wildcard characters. It also handles recursive mode by copying all files and keep the directory structure. ''' pool = ThreadPool(ThreadUtil, self.opt) source = self.source_expand(source) if target[-1] == PATH_SEP: for src in source: self.cp_single_file(pool, src, os.path.join(target, self.get_basename(S3URL(src).path)), delete_source) else: if len(source) > 1: raise Failure('Target "%s" is not a directory (with a trailing slash).' % target) # Copy file if it exists otherwise do nothing elif len(source) == 1: self.cp_single_file(pool, source[0], target, delete_source) else: # Source expand may return empty list only if ignore-empty-source is set to true pass pool.join()
Delete files on S3
def del_files(self, source): '''Delete files on S3''' src_files = [] for obj in self.s3walk(source): if not obj['is_dir']: # ignore directories src_files.append(obj['name']) pool = ThreadPool(ThreadUtil, self.opt) pool.batch_delete(src_files) pool.join()
Generic version of directory walk. Return file list without base path for comparison.
def relative_dir_walk(self, dir): '''Generic version of directory walk. Return file list without base path for comparison. ''' result = [] if S3URL.is_valid(dir): basepath = S3URL(dir).path for f in (f for f in self.s3walk(dir) if not f['is_dir']): result.append(os.path.relpath(S3URL(f['name']).path, basepath)) else: for f in (f for f in self.local_walk(dir) if not os.path.isdir(f)): result.append(os.path.relpath(f, dir)) return result
Sync directory to directory.
def dsync_files(self, source, target): '''Sync directory to directory.''' src_s3_url = S3URL.is_valid(source) dst_s3_url = S3URL.is_valid(target) source_list = self.relative_dir_walk(source) if len(source_list) == 0 or '.' in source_list: raise Failure('Sync command need to sync directory to directory.') sync_list = [(os.path.join(source, f), os.path.join(target, f)) for f in source_list] pool = ThreadPool(ThreadUtil, self.opt) if src_s3_url and not dst_s3_url: for src, dest in sync_list: pool.download(src, dest) elif not src_s3_url and dst_s3_url: for src, dest in sync_list: pool.upload(src, dest) elif src_s3_url and dst_s3_url: for src, dest in sync_list: pool.copy(src, dest) else: raise InvalidArgument('Cannot sync two local directories.') pool.join() if self.opt.delete_removed: target_list = self.relative_dir_walk(target) remove_list = [os.path.join(target, f) for f in (set(target_list) - set(source_list))] if S3URL.is_valid(target): pool = ThreadPool(ThreadUtil, self.opt) pool.batch_delete(remove_list) pool.join() else: for f in remove_list: try: os.unlink(f) message('Delete %s', f) except: pass
Sync files to S3. Does implement deletions if syncing TO s3. Currently identical to get/ put - r - f -- sync - check with exception of deletions.
def sync_files(self, source, target): '''Sync files to S3. Does implement deletions if syncing TO s3. Currently identical to get/put -r -f --sync-check with exception of deletions. ''' src_s3_url = S3URL.is_valid(source) dst_s3_url = S3URL.is_valid(target) if src_s3_url and not dst_s3_url: self.get_files(source, target) elif not src_s3_url and dst_s3_url: self.put_files(source, target) if self.opt.delete_removed: self.delete_removed_files(source, target) elif src_s3_url and dst_s3_url: self.cp_files(source, target) else: raise InvalidArgument('No S3 URI provided')
Get the size component of the given s3url. If it is a directory combine the sizes of all the files under that directory. Subdirectories will not be counted unless -- recursive option is set.
def size(self, source): '''Get the size component of the given s3url. If it is a directory, combine the sizes of all the files under that directory. Subdirectories will not be counted unless --recursive option is set. ''' result = [] for src in self.source_expand(source): size = 0 for f in self.s3walk(src): size += f['size'] result.append((src, size)) return result
Calculate MD5 hash code for a local file
def file_hash(self, filename, block_size=2**20): '''Calculate MD5 hash code for a local file''' m = hashlib.md5() with open(filename, 'rb') as f: while True: data = f.read(block_size) if not data: break m.update(data) return m.hexdigest()
Get or calculate MD5 value of the local file.
def get_md5(self): '''Get or calculate MD5 value of the local file.''' if self.md5 is None: self.md5 = self.file_hash(self.filename) return self.md5
Ensure all directories are created for a given target file.
def mkdirs(self, target): '''Ensure all directories are created for a given target file.''' path = os.path.dirname(target) if path and path != PATH_SEP and not os.path.isdir(path): # Multi-threading means there will be intervleaved execution # between the check and creation of the directory. try: os.makedirs(path) except OSError as ose: if ose.errno != errno.EEXIST: raise Failure('Unable to create directory (%s)' % (path,))
Check MD5 for a local file and a remote file. Return True if they have the same md5 hash otherwise False.
def sync_check(self, md5cache, remoteKey): '''Check MD5 for a local file and a remote file. Return True if they have the same md5 hash, otherwise False. ''' if not remoteKey: return False if not os.path.exists(md5cache.filename): return False localmd5 = md5cache.get_md5() # check multiple md5 locations return ('ETag' in remoteKey and remoteKey['ETag'] == '"%s"' % localmd5) or \ ('md5' in remoteKey and remoteKey['md5'] == localmd5) or \ ('md5' in remoteKey['Metadata'] and remoteKey['Metadata']['md5'] == localmd5)
Partially match a path and a filter_path with wildcards. This function will return True if this path partially match a filter path. This is used for walking through directories with multiple level wildcard.
def partial_match(self, path, filter_path): '''Partially match a path and a filter_path with wildcards. This function will return True if this path partially match a filter path. This is used for walking through directories with multiple level wildcard. ''' if not path or not filter_path: return True # trailing slash normalization if path[-1] == PATH_SEP: path = path[0:-1] if filter_path[-1] == PATH_SEP: filter_path += '*' pi = path.split(PATH_SEP) fi = filter_path.split(PATH_SEP) # Here, if we are in recursive mode, we allow the pi to be longer than fi. # Otherwise, length of pi should be equal or less than the lenght of fi. min_len = min(len(pi), len(fi)) matched = fnmatch.fnmatch(PATH_SEP.join(pi[0:min_len]), PATH_SEP.join(fi[0:min_len])) return matched and (self.opt.recursive or len(pi) <= len(fi))
Thread worker for s3walk. Recursively walk into all subdirectories if they still match the filter path partially.
def s3walk(self, s3url, s3dir, filter_path, result): '''Thread worker for s3walk. Recursively walk into all subdirectories if they still match the filter path partially. ''' paginator = self.s3.get_paginator('list_objects') filter_path_level = filter_path.count(PATH_SEP) for page in paginator.paginate(Bucket=s3url.bucket, Prefix=s3dir, Delimiter=PATH_SEP, PaginationConfig={'PageSize': 1000}): # Get subdirectories first. for obj in page.get('CommonPrefixes') or []: obj_name = obj['Prefix'] if not self.partial_match(obj_name, filter_path): continue if self.opt.recursive or (obj_name.count(PATH_SEP) != filter_path_level + 1): self.pool.s3walk(s3url, obj_name, filter_path, result) else: self.conditional(result, { 'name': S3URL.combine(s3url.proto, s3url.bucket, obj_name), 'is_dir': True, 'size': 0, 'last_modified': None }) # Then get all items in this folder. for obj in page.get('Contents') or []: obj_name = obj['Key'] if not self.partial_match(obj_name, filter_path): continue if self.opt.recursive or obj_name.count(PATH_SEP) == filter_path_level: self.conditional(result, { 'name': S3URL.combine(s3url.proto, s3url.bucket, obj_name), 'is_dir': False, 'size': obj['Size'], 'last_modified': obj['LastModified'] })
Check all file item with given conditions.
def conditional(self, result, obj): '''Check all file item with given conditions.''' fileonly = (self.opt.last_modified_before is not None) or (self.opt.last_modified_after is not None) if obj['is_dir']: if not fileonly: result.append(obj) return if (self.opt.last_modified_before is not None) and obj['last_modified'] >= self.opt.last_modified_before: return if (self.opt.last_modified_after is not None) and obj['last_modified'] <= self.opt.last_modified_after: return result.append(obj)
Get file splits for upload/ download/ copy operation.
def get_file_splits(self, id, source, target, fsize, splitsize): '''Get file splits for upload/download/copy operation.''' pos = 0 part = 1 # S3 part id starts from 1 mpi = ThreadUtil.MultipartItem(id) splits = [] while pos < fsize: chunk = min(splitsize, fsize - pos) assert(chunk > 0) splits.append((source, target, mpi, pos, chunk, part)) part += 1 pos += chunk mpi.total = len(splits) return splits
Get privileges of a local file
def get_file_privilege(self, source): '''Get privileges of a local file''' try: return str(oct(os.stat(source).st_mode)[-3:]) except Exception as e: raise Failure('Could not get stat for %s, error_message = %s', source, e)
Get the s3 object with the S3 URL. Return None if not exist.
def lookup(self, s3url): '''Get the s3 object with the S3 URL. Return None if not exist.''' try: return self.s3.head_object(Bucket=s3url.bucket, Key=s3url.path) except BotoClient.ClientError as e: if e.response['ResponseMetadata']['HTTPStatusCode'] == 404: return None else: raise e
Read local file chunk
def read_file_chunk(self, source, pos, chunk): '''Read local file chunk''' if chunk==0: return StringIO() data = None with open(source, 'rb') as f: f.seek(pos) data = f.read(chunk) if not data: raise Failure('Unable to read data from source: %s' % source) return StringIO(data)
Thread worker for upload operation.
def upload(self, source, target, mpi=None, pos=0, chunk=0, part=0): '''Thread worker for upload operation.''' s3url = S3URL(target) obj = self.lookup(s3url) # Initialization: Set up multithreaded uploads. if not mpi: fsize = os.path.getsize(source) md5cache = LocalMD5Cache(source) # optional checks if self.opt.dry_run: message('%s => %s', source, target) return elif self.opt.sync_check and self.sync_check(md5cache, obj): message('%s => %s (synced)', source, target) return elif not self.opt.force and obj: raise Failure('File already exists: %s' % target) if fsize < self.opt.max_singlepart_upload_size: data = self.read_file_chunk(source, 0, fsize) self.s3.put_object(Bucket=s3url.bucket, Key=s3url.path, Body=data, Metadata={'md5': md5cache.get_md5(), 'privilege': self.get_file_privilege(source)}) message('%s => %s', source, target) return # Here we need to have our own md5 value because multipart upload calculates # different md5 values. response = self.s3.create_multipart_upload(Bucket=s3url.bucket, Key=s3url.path, Metadata={'md5': md5cache.get_md5(), 'privilege': self.get_file_privilege(source)}) upload_id = response['UploadId'] for args in self.get_file_splits(upload_id, source, target, fsize, self.opt.multipart_split_size): self.pool.upload(*args) return data = self.read_file_chunk(source, pos, chunk) response = self.s3.upload_part(Bucket=s3url.bucket, Key=s3url.path, UploadId=mpi.id, Body=data, PartNumber=part) # Finalize if mpi.complete({'ETag': response['ETag'], 'PartNumber': part}): try: self.s3.complete_multipart_upload(Bucket=s3url.bucket, Key=s3url.path, UploadId=mpi.id, MultipartUpload={'Parts': mpi.sorted_parts()}) message('%s => %s', source, target) except Exception as e: message('Unable to complete upload: %s', str(e)) self.s3.abort_multipart_upload(Bucket=s3url.bucket, Key=s3url.path, UploadId=mpi.id) raise RetryFailure('Upload failed: Unable to complete upload %s.' % source)
Verify the file size of the downloaded file.
def _verify_file_size(self, obj, downloaded_file): '''Verify the file size of the downloaded file.''' file_size = os.path.getsize(downloaded_file) if int(obj['ContentLength']) != file_size: raise RetryFailure('Downloaded file size inconsistent: %s' % (repr(obj)))
Write local file chunk
def write_file_chunk(self, target, pos, chunk, body): '''Write local file chunk''' fd = os.open(target, os.O_CREAT | os.O_WRONLY) try: os.lseek(fd, pos, os.SEEK_SET) data = body.read(chunk) num_bytes_written = os.write(fd, data) if(num_bytes_written != len(data)): raise RetryFailure('Number of bytes written inconsistent: %s != %s' % (num_bytes_written, sys.getsizeof(data))) finally: os.close(fd)
Thread worker for download operation.
def download(self, source, target, mpi=None, pos=0, chunk=0, part=0): '''Thread worker for download operation.''' s3url = S3URL(source) obj = self.lookup(s3url) if obj is None: raise Failure('The obj "%s" does not exists.' % (s3url.path,)) # Initialization: Set up multithreaded downloads. if not mpi: # optional checks if self.opt.dry_run: message('%s => %s', source, target) return elif self.opt.sync_check and self.sync_check(LocalMD5Cache(target), obj): message('%s => %s (synced)', source, target) return elif not self.opt.force and os.path.exists(target): raise Failure('File already exists: %s' % target) fsize = int(obj['ContentLength']) # Small file optimization. if fsize < self.opt.max_singlepart_download_size: # Create a single part to chain back main download operation. mpi = ThreadUtil.MultipartItem(tempfile_get(target)) mpi.total = 1 pos = 0 chunk = fsize # Continue as one part download. else: # Here we use temp filename as the id of mpi. for args in self.get_file_splits(tempfile_get(target), source, target, fsize, self.opt.multipart_split_size): self.pool.download(*args) return tempfile = mpi.id if self.opt.recursive: self.mkdirs(tempfile) # Download part of the file, range is inclusive. response = self.s3.get_object(Bucket=s3url.bucket, Key=s3url.path, Range='bytes=%d-%d' % (pos, pos + chunk - 1)) self.write_file_chunk(tempfile, pos, chunk, response['Body']) # Finalize if mpi.complete({'PartNumber': part}): try: self.update_privilege(obj, tempfile) self._verify_file_size(obj, tempfile) tempfile_set(tempfile, target) message('%s => %s', source, target) except Exception as e: # Note that we don't retry in this case, because # We are going to remove the temp file, and if we # retry here with original parameters (wrapped in # the task item), it would fail anyway tempfile_set(tempfile, None) raise Failure('Download Failure: %s, Source: %s.' % (e.message, source))
Copy a single file from source to target using boto S3 library.
def copy(self, source, target, mpi=None, pos=0, chunk=0, part=0, delete_source=False): '''Copy a single file from source to target using boto S3 library.''' if self.opt.dry_run: message('%s => %s' % (source, target)) return source_url = S3URL(source) target_url = S3URL(target) if not mpi: obj = self.lookup(source_url) fsize = int(obj['ContentLength']) if fsize < self.opt.max_singlepart_copy_size: self.s3.copy_object(Bucket=target_url.bucket, Key=target_url.path, CopySource={'Bucket': source_url.bucket, 'Key': source_url.path}) message('%s => %s' % (source, target)) if delete_source: self.delete(source) return response = self.s3.create_multipart_upload(Bucket=target_url.bucket, Key=target_url.path, Metadata=obj['Metadata']) upload_id = response['UploadId'] for args in self.get_file_splits(upload_id, source, target, fsize, self.opt.multipart_split_size): self.pool.copy(*args, delete_source=delete_source) return response = self.s3.upload_part_copy(Bucket=target_url.bucket, Key=target_url.path, CopySource={'Bucket': source_url.bucket, 'Key': source_url.path}, CopySourceRange='bytes=%d-%d' % (pos, pos + chunk - 1), UploadId=mpi.id, PartNumber=part) if mpi.complete({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part}): try: # Finalize copy operation. self.s3.complete_multipart_upload(Bucket=target_url.bucket, Key=target_url.path, UploadId=mpi.id, MultipartUpload={'Parts': mpi.sorted_parts()}) if delete_source: self.delete(source) message('%s => %s' % (source, target)) except Exception as e: message('Unable to complete upload: %s', str(e)) self.s3.abort_multipart_upload(Bucket=source_url.bucket, Key=source_url.path, UploadId=mpi.id) raise RetryFailure('Copy failed: Unable to complete copy %s.' % source)
Thread worker for download operation.
def delete(self, source): '''Thread worker for download operation.''' s3url = S3URL(source) message('Delete %s', source) if not self.opt.dry_run: self.s3.delete_object(Bucket=s3url.bucket, Key=s3url.path)
Delete a list of files in batch of batch_delete_size ( default = 1000 ).
def batch_delete(self, sources): '''Delete a list of files in batch of batch_delete_size (default=1000).''' assert(type(sources) == list) if len(sources) == 0: return elif len(sources) == 1: self.delete(sources[0]) elif len(sources) > self.opt.batch_delete_size: for i in range(0, len(sources), self.opt.batch_delete_size): self.pool.batch_delete(sources[i:i+self.opt.batch_delete_size]) else: bucket = S3URL(sources[0]).bucket deletes = [] for source in sources: s3url = S3URL(source) if s3url.bucket != bucket: raise Failure('Unable to delete keys in different bucket %s and %s.' % (s3url.bucket, bucket)) deletes.append({'Key': s3url.path}) response = self.s3.delete_objects(Bucket=bucket, Delete={'Objects': deletes}) # Output result of deletion. for res in response.get('Deleted') or []: message('Delete %s', S3URL.combine('s3', bucket, res['Key'])) for err in response.get('Errors') or []: message('Error deleting %s, code(%s) %s', S3URL.combine('s3', bucket, res['Key']), err['Code'], err['Message']) if response.get('Errors') is not None: raise RetryFailure('Unable to complete deleting %d files.' % len(response.get('Errors')))
Main entry to handle commands. Dispatch to individual command handler.
def run(self, args): '''Main entry to handle commands. Dispatch to individual command handler.''' if len(args) == 0: raise InvalidArgument('No command provided') cmd = args[0] if cmd + '_handler' in CommandHandler.__dict__: CommandHandler.__dict__[cmd + '_handler'](self, args) else: raise InvalidArgument('Unknown command %s' % cmd)
Validate input parameters with given format. This function also checks for wildcards for recursive mode.
def validate(self, format, args): '''Validate input parameters with given format. This function also checks for wildcards for recursive mode. ''' fmtMap = { 'cmd': 'Command', 's3': 's3 path', 'local': 'local path' } fmts = format.split('|') if len(fmts) != len(args): raise InvalidArgument('Invalid number of parameters') for i, fmt in enumerate(fmts): valid = False for f in fmt.split(','): if f == 'cmd' and args[i] + '_handler' in CommandHandler.__dict__: valid = True if f == 's3' and S3URL.is_valid(args[i]): valid = True if f == 'local' and not S3URL.is_valid(args[i]): valid = True if not valid: raise InvalidArgument('Invalid parameter: %s, %s expected' % (args[i], fmtMap[fmt.split(',')[0]]))
Pretty print the result of s3walk. Here we calculate the maximum width of each column and align them.
def pretty_print(self, objlist): '''Pretty print the result of s3walk. Here we calculate the maximum width of each column and align them. ''' def normalize_time(timestamp): '''Normalize the timestamp format for pretty print.''' if timestamp is None: return ' ' * 16 return TIMESTAMP_FORMAT % (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute) cwidth = [0, 0, 0] format = '%%%ds %%%ds %%-%ds' # Calculate maximum width for each column. result = [] for obj in objlist: last_modified = normalize_time(obj['last_modified']) size = str(obj['size']) if not obj['is_dir'] else 'DIR' name = obj['name'] item = (last_modified, size, name) for i, value in enumerate(item): if cwidth[i] < len(value): cwidth[i] = len(value) result.append(item) # Format output. for item in result: text = (format % tuple(cwidth)) % item message('%s', text.rstrip())
Handler for ls command
def ls_handler(self, args): '''Handler for ls command''' if len(args) == 1: self.pretty_print(self.s3handler().list_buckets()) return self.validate('cmd|s3', args) self.pretty_print(self.s3handler().s3walk(args[1]))
Handler for mb command
def mb_handler(self, args): '''Handler for mb command''' if len(args) == 1: raise InvalidArgument('No s3 bucketname provided') self.validate('cmd|s3', args) self.s3handler().create_bucket(args[1])
Handler for put command
def put_handler(self, args): '''Handler for put command''' # Special check for shell expansion if len(args) < 3: raise InvalidArgument('Invalid number of parameters') self.validate('|'.join(['cmd'] + ['local'] * (len(args) - 2) + ['s3']), args) source = args[1:-1] # shell expansion target = args[-1] self.s3handler().put_files(source, target)
Handler for get command
def get_handler(self, args): '''Handler for get command''' # Special case when we don't have target directory. if len(args) == 2: args += ['.'] self.validate('cmd|s3|local', args) source = args[1] target = args[2] self.s3handler().get_files(source, target)
Handler for cat command
def cat_handler(self, args): '''Handler for cat command''' self.validate('cmd|s3', args) source = args[1] self.s3handler().print_files(source)
Handler for dsync command.
def dsync_handler(self, args): '''Handler for dsync command.''' self.opt.recursive = True self.opt.sync_check = True self.opt.force = True self.validate('cmd|s3,local|s3,local', args) source = args[1] target = args[2] self.s3handler().dsync_files(source, target)
Handler for sync command. XXX Here we emulate sync command with get/ put - r - f -- sync - check. So it doesn t provide delete operation.
def sync_handler(self, args): '''Handler for sync command. XXX Here we emulate sync command with get/put -r -f --sync-check. So it doesn't provide delete operation. ''' self.opt.recursive = True self.opt.sync_check = True self.opt.force = True self.validate('cmd|s3,local|s3,local', args) source = args[1] target = args[2] self.s3handler().sync_files(source, target)
Handler for cp command
def cp_handler(self, args): '''Handler for cp command''' self.validate('cmd|s3|s3', args) source = args[1] target = args[2] self.s3handler().cp_files(source, target)
Handler for mv command
def mv_handler(self, args): '''Handler for mv command''' self.validate('cmd|s3|s3', args) source = args[1] target = args[2] self.s3handler().cp_files(source, target, delete_source=True)
Handler for del command
def del_handler(self, args): '''Handler for del command''' self.validate('cmd|s3', args) source = args[1] self.s3handler().del_files(source)
Handler for size command
def du_handler(self, args): '''Handler for size command''' for src, size in self.s3handler().size(args[1:]): message('%s\t%s' % (size, src))
Handler of total_size command
def _totalsize_handler(self, args): '''Handler of total_size command''' total_size = 0 for src, size in self.s3handler().size(args[1:]): total_size += size message(str(total_size))
Search for date information in the string
def match_date(self, value): '''Search for date information in the string''' m = self.REGEX_DATE.search(value) date = datetime.datetime.utcnow().date() if m: date = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3))) value = self.REGEX_DATE.sub('', value) return (date, value)
Search for time information in the string
def match_time(self, value): '''Search for time information in the string''' m = self.REGEX_TIME.search(value) time = datetime.datetime.utcnow().time() if m: time = datetime.time(int(m.group(1)), int(m.group(2))) value = self.REGEX_TIME.sub('', value) return (time, value)
Search for timedelta information in the string
def match_delta(self, value): '''Search for timedelta information in the string''' m = self.REGEX_DELTA.search(value) delta = datetime.timedelta(days=0) if m: d = int(m.group(1)) if m.group(3) == 'ago' or m.group(3) == 'before': d = -d if m.group(2) == 'minute': delta = datetime.timedelta(minutes=d) elif m.group(2) == 'hour': delta = datetime.timedelta(hours=d) elif m.group(2) == 'day': delta = datetime.timedelta(days=d) elif m.group(2) == 'week': delta = datetime.timedelta(weeks=d) value = self.REGEX_DELTA.sub('', value) return (delta, value)
Take json as dictionary parameter
def check_dict(self, opt, value): '''Take json as dictionary parameter''' try: return json.loads(value) except: raise optparse.OptionValueError("Option %s: invalid dict value: %r" % (opt, value))
Discover gateways using multicast
def discover_gateways(self): """Discover gateways using multicast""" _socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) _socket.settimeout(5.0) if self._interface != 'any': _socket.bind((self._interface, 0)) for gateway in self._gateways_config: host = gateway.get('host') port = gateway.get('port') sid = gateway.get('sid') if not (host and port and sid): continue try: ip_address = socket.gethostbyname(host) if gateway.get('disable'): _LOGGER.info( 'Xiaomi Gateway %s is disabled by configuration', sid) self.disabled_gateways.append(ip_address) continue _LOGGER.info( 'Xiaomi Gateway %s configured at IP %s:%s', sid, ip_address, port) self.gateways[ip_address] = XiaomiGateway( ip_address, port, sid, gateway.get('key'), self._device_discovery_retries, self._interface, gateway.get('proto')) except OSError as error: _LOGGER.error( "Could not resolve %s: %s", host, error) try: _socket.sendto('{"cmd":"whois"}'.encode(), (self.MULTICAST_ADDRESS, self.GATEWAY_DISCOVERY_PORT)) while True: data, (ip_add, _) = _socket.recvfrom(1024) if len(data) is None or ip_add in self.gateways: continue if ip_add in self.gateways.keys() or ip_add in self.disabled_gateways: continue resp = json.loads(data.decode()) if resp["cmd"] != 'iam': _LOGGER.error("Response does not match return cmd") continue if resp["model"] not in GATEWAY_MODELS: _LOGGER.error("Response must be gateway model") continue disabled = False gateway_key = None for gateway in self._gateways_config: sid = gateway.get('sid') if sid is None or sid == resp["sid"]: gateway_key = gateway.get('key') if sid and sid == resp['sid'] and gateway.get('disable'): disabled = True sid = resp["sid"] if disabled: _LOGGER.info("Xiaomi Gateway %s is disabled by configuration", sid) self.disabled_gateways.append(ip_add) else: _LOGGER.info('Xiaomi Gateway %s found at IP %s', sid, ip_add) self.gateways[ip_add] = XiaomiGateway( ip_add, resp["port"], sid, gateway_key, self._device_discovery_retries, self._interface, resp["proto_version"] if "proto_version" in resp else None) except socket.timeout: _LOGGER.info("Gateway discovery finished in 5 seconds") _socket.close()
Start listening.
def listen(self): """Start listening.""" _LOGGER.info('Creating Multicast Socket') self._mcastsocket = self._create_mcast_socket() self._listening = True thread = Thread(target=self._listen_to_msg, args=()) self._threads.append(thread) thread.daemon = True thread.start()
Stop listening.
def stop_listen(self): """Stop listening.""" self._listening = False if self._mcastsocket is not None: _LOGGER.info('Closing multisocket') self._mcastsocket.close() self._mcastsocket = None for thread in self._threads: thread.join()
Send data to gateway to turn on/ off device
def write_to_hub(self, sid, **kwargs): """Send data to gateway to turn on / off device""" if self.key is None: _LOGGER.error('Gateway Key is not provided. Can not send commands to the gateway.') return False data = {} for key in kwargs: data[key] = kwargs[key] if not self.token: _LOGGER.debug('Gateway Token was not obtained yet. Cannot send commands to the gateway.') return False cmd = dict() cmd['cmd'] = 'write' cmd['sid'] = sid if int(self.proto[0:1]) == 1: data['key'] = self._get_key() cmd['data'] = data else: cmd['key'] = self._get_key() cmd['params'] = [data] resp = self._send_cmd(json.dumps(cmd), "write_ack") if int(self.proto[0:1]) == 1 \ else self._send_cmd(json.dumps(cmd), "write_rsp") _LOGGER.debug("write_ack << %s", resp) if _validate_data(resp): return True if not _validate_keyerror(resp): return False # If 'invalid key' message we ask for a new token resp = self._send_cmd('{"cmd" : "get_id_list"}', "get_id_list_ack") if int(self.proto[0:1]) == 1 \ else self._send_cmd('{"cmd" : "discovery"}', "discovery_rsp") _LOGGER.debug("get_id_list << %s", resp) if resp is None or "token" not in resp: _LOGGER.error('No new token from gateway. Can not send commands to the gateway.') return False self.token = resp['token'] if int(self.proto[0:1]) == 1: data['key'] = self._get_key() cmd['data'] = data else: cmd['key'] = self._get_key() cmd['params'] = [data] resp = self._send_cmd(json.dumps(cmd), "write_ack") if int(self.proto[0:1]) == 1 \ else self._send_cmd(json.dumps(cmd), "write_rsp") _LOGGER.debug("write_ack << %s", resp) return _validate_data(resp)
Get data from gateway
def get_from_hub(self, sid): """Get data from gateway""" cmd = '{ "cmd":"read","sid":"' + sid + '"}' resp = self._send_cmd(cmd, "read_ack") if int(self.proto[0:1]) == 1 else self._send_cmd(cmd, "read_rsp") _LOGGER.debug("read_ack << %s", resp) return self.push_data(resp)
Push data broadcasted from gateway to device
def push_data(self, data): """Push data broadcasted from gateway to device""" if not _validate_data(data): return False jdata = json.loads(data['data']) if int(self.proto[0:1]) == 1 else _list2map(data['params']) if jdata is None: return False sid = data['sid'] for func in self.callbacks[sid]: func(jdata, data) return True
Get key using token from gateway
def _get_key(self): """Get key using token from gateway""" init_vector = bytes(bytearray.fromhex('17996d093d28ddb3ba695a2e6f58562e')) encryptor = Cipher(algorithms.AES(self.key.encode()), modes.CBC(init_vector), backend=default_backend()).encryptor() ciphertext = encryptor.update(self.token.encode()) + encryptor.finalize() if isinstance(ciphertext, str): # For Python 2 compatibility return ''.join('{:02x}'.format(ord(x)) for x in ciphertext) return ''.join('{:02x}'.format(x) for x in ciphertext)
Train your awesome model.
def train(hparams, *args): """Train your awesome model. :param hparams: The arguments to run the model with. """ # Initialize experiments and track all the hyperparameters exp = Experiment( name=hparams.test_tube_exp_name, # Location to save the metrics. save_dir=hparams.log_path, autosave=False, ) exp.argparse(hparams) # Pretend to train. x = torch.rand((1, hparams.x_val)) for train_step in range(0, 100): y = torch.rand((hparams.x_val, 1)) out = x.mm(y) exp.log({'fake_err': out.item()}) # Save exp when . exp.save()
Train your awesome model.
def train(hparams, *args): """Train your awesome model. :param hparams: The arguments to run the model with. """ # Initialize experiments and track all the hyperparameters exp = Experiment( name=hparams.test_tube_exp_name, # Location to save the metrics. save_dir=hparams.log_path, # The experiment version is optional, but using the one # from SLURM means the exp will not collide with other # versions if SLURM runs multiple at once. version=hparams.hpc_exp_number, autosave=False, ) exp.argparse(hparams) # Pretend to train. x = hparams.x_val for train_step in range(0, 100): y = hparams.y_val out = x * y exp.log({'fake_err': out.item()}) # Log metrics. # Save exp when done. exp.save()
Called by RQ when there is a failure in a worker.
def exception_handler(job, *exc_info): """ Called by RQ when there is a failure in a worker. NOTE: Make sure that in your RQ worker process, rollbar.init() has been called with handler='blocking'. The default handler, 'thread', does not work from inside an RQ worker. """ # Report data about the job with the exception. job_info = job.to_dict() # job_info['data'] is the pickled representation of the job, and doesn't json-serialize well. # repr() works nicely. job_info['data'] = repr(job_info['data']) extra_data = {'job': job_info} payload_data = {'framework': 'rq'} rollbar.report_exc_info(exc_info, extra_data=extra_data, payload_data=payload_data) # continue to the next handler return True
Patches the pyramid_debugtoolbar ( if installed ) to display a link to the related rollbar item.
def patch_debugtoolbar(settings): """ Patches the pyramid_debugtoolbar (if installed) to display a link to the related rollbar item. """ try: from pyramid_debugtoolbar import tbtools except ImportError: return rollbar_web_base = settings.get('rollbar.web_base', DEFAULT_WEB_BASE) if rollbar_web_base.endswith('/'): rollbar_web_base = rollbar_web_base[:-1] def insert_rollbar_console(request, html): # insert after the closing </h1> item_uuid = request.environ.get('rollbar.uuid') if not item_uuid: return html url = '%s/item/uuid/?uuid=%s' % (rollbar_web_base, item_uuid) link = '<a style="color:white;" href="%s">View in Rollbar</a>' % url new_data = "<h2>Rollbar: %s</h2>" % link insertion_marker = "</h1>" replacement = insertion_marker + new_data return html.replace(insertion_marker, replacement, 1) # patch tbtools.Traceback.render_full old_render_full = tbtools.Traceback.render_full def new_render_full(self, request, *args, **kw): html = old_render_full(self, request, *args, **kw) return insert_rollbar_console(request, html) tbtools.Traceback.render_full = new_render_full
Pyramid entry point
def includeme(config): """ Pyramid entry point """ settings = config.registry.settings config.add_tween('rollbar.contrib.pyramid.rollbar_tween_factory', over=EXCVIEW) # run patch_debugtoolbar, unless they disabled it if asbool(settings.get('rollbar.patch_debugtoolbar', True)): patch_debugtoolbar(settings) def hook(request, data): data['framework'] = 'pyramid' if request: request.environ['rollbar.uuid'] = data['uuid'] if request.matched_route: data['context'] = request.matched_route.name rollbar.BASE_DATA_HOOK = hook kw = parse_settings(settings) access_token = kw.pop('access_token') environment = kw.pop('environment', 'production') if kw.get('scrub_fields'): kw['scrub_fields'] = set([str.strip(x) for x in kw.get('scrub_fields').split('\n') if x]) if kw.get('exception_level_filters'): r = DottedNameResolver() exception_level_filters = [] for line in kw.get('exception_level_filters').split('\n'): if line: dotted_path, level = line.split() try: cls = r.resolve(dotted_path) exception_level_filters.append((cls, level)) except ImportError: log.error('Could not import %r' % dotted_path) kw['exception_level_filters'] = exception_level_filters kw['enabled'] = asbool(kw.get('enabled', True)) rollbar.init(access_token, environment, **kw)
If there s no log configuration set up a default handler.
def _ensure_log_handler(self): """ If there's no log configuration, set up a default handler. """ if log.handlers: return handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s') handler.setFormatter(formatter) log.addHandler(handler)
Get the current request object. Implementation varies on library support. Modified below when we know which framework is being used.
def get_request(): """ Get the current request object. Implementation varies on library support. Modified below when we know which framework is being used. """ # TODO(cory): add in a generic _get_locals_request() which # will iterate up through the call stack and look for a variable # that appears to be valid request object. for fn in (_get_bottle_request, _get_flask_request, _get_pyramid_request, _get_pylons_request): try: req = fn() if req is not None: return req except: pass return None
Saves configuration variables in this module s SETTINGS.
def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw): """ Saves configuration variables in this module's SETTINGS. access_token: project access token. Get this from the Rollbar UI: - click "Settings" in the top nav - click "Projects" in the left nav - copy-paste the appropriate token. environment: environment name. Can be any string; suggestions: 'production', 'development', 'staging', 'yourname' **kw: provided keyword arguments will override keys in SETTINGS. """ global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threads if scrub_fields is not None: SETTINGS['scrub_fields'] = list(scrub_fields) if url_fields is not None: SETTINGS['url_fields'] = list(url_fields) # Merge the extra config settings into SETTINGS SETTINGS = dict_merge(SETTINGS, kw) if _initialized: # NOTE: Temp solution to not being able to re-init. # New versions of pyrollbar will support re-initialization # via the (not-yet-implemented) configure() method. if not SETTINGS.get('suppress_reinit_warning'): log.warning('Rollbar already initialized. Ignoring re-init.') return SETTINGS['access_token'] = access_token SETTINGS['environment'] = environment if SETTINGS.get('allow_logging_basic_config'): logging.basicConfig() if SETTINGS.get('handler') == 'agent': agent_log = _create_agent_log() # We will perform these transforms in order: # 1. Serialize the payload to be all python built-in objects # 2. Scrub the payloads based on the key suffixes in SETTINGS['scrub_fields'] # 3. Scrub URLs in the payload for keys that end with 'url' # 4. Optional - If local variable gathering is enabled, transform the # trace frame values using the ShortReprTransform. _serialize_transform = SerializableTransform(safe_repr=SETTINGS['locals']['safe_repr'], whitelist_types=SETTINGS['locals']['whitelisted_types']) _transforms = [ ScrubRedactTransform(), _serialize_transform, ScrubTransform(suffixes=[(field,) for field in SETTINGS['scrub_fields']], redact_char='*'), ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['url_fields']], params_to_scrub=SETTINGS['scrub_fields']) ] # A list of key prefixes to apply our shortener transform to. The request # being included in the body key is old behavior and is being retained for # backwards compatibility. shortener_keys = [ ('request', 'POST'), ('request', 'json'), ('body', 'request', 'POST'), ('body', 'request', 'json'), ] if SETTINGS['locals']['enabled']: shortener_keys.append(('body', 'trace', 'frames', '*', 'code')) shortener_keys.append(('body', 'trace', 'frames', '*', 'args', '*')) shortener_keys.append(('body', 'trace', 'frames', '*', 'kwargs', '*')) shortener_keys.append(('body', 'trace', 'frames', '*', 'locals', '*')) shortener_keys.extend(SETTINGS['shortener_keys']) shortener = ShortenerTransform(safe_repr=SETTINGS['locals']['safe_repr'], keys=shortener_keys, **SETTINGS['locals']['sizes']) _transforms.append(shortener) _threads = queue.Queue() events.reset() filters.add_builtin_filters(SETTINGS) _initialized = True
Decorator for making error handling on AWS Lambda easier
def lambda_function(f): """ Decorator for making error handling on AWS Lambda easier """ @functools.wraps(f) def wrapper(event, context): global _CURRENT_LAMBDA_CONTEXT _CURRENT_LAMBDA_CONTEXT = context try: result = f(event, context) return wait(lambda: result) except: cls, exc, trace = sys.exc_info() report_exc_info((cls, exc, trace.tb_next)) wait() raise return wrapper
Reports an exception to Rollbar using exc_info ( from calling sys. exc_info () )
def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw): """ Reports an exception to Rollbar, using exc_info (from calling sys.exc_info()) exc_info: optional, should be the result of calling sys.exc_info(). If omitted, sys.exc_info() will be called here. request: optional, a WebOb, Werkzeug-based or Sanic request object. extra_data: optional, will be included in the 'custom' section of the payload payload_data: optional, dict that will override values in the final payload (e.g. 'level' or 'fingerprint') kw: provided for legacy purposes; unused. Example usage: rollbar.init(access_token='YOUR_PROJECT_ACCESS_TOKEN') try: do_something() except: rollbar.report_exc_info(sys.exc_info(), request, {'foo': 'bar'}, {'level': 'warning'}) """ if exc_info is None: exc_info = sys.exc_info() try: return _report_exc_info(exc_info, request, extra_data, payload_data, level=level) except Exception as e: log.exception("Exception while reporting exc_info to Rollbar. %r", e)
Reports an arbitrary string message to Rollbar.
def report_message(message, level='error', request=None, extra_data=None, payload_data=None): """ Reports an arbitrary string message to Rollbar. message: the string body of the message level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug' request: the request object for the context of the message extra_data: dictionary of params to include with the message. 'body' is reserved. payload_data: param names to pass in the 'data' level of the payload; overrides defaults. """ try: return _report_message(message, level, request, extra_data, payload_data) except Exception as e: log.exception("Exception while reporting message to Rollbar. %r", e)