prompt
stringlengths
23
36.3k
response
stringlengths
0
1.55k
def add_sibling(self, sibling): """ Designate this a multi-feature representative and add a co-feature. Some features exist discontinuously on the sequence, and therefore cannot be declared with a single GFF3 entry (which can encode only a single interval). The canonical encoding for these types of features is called a multi-feature, in which a single feature is declared on multiple lines with multiple entries all sharing the same feature type and ID attribute. This is commonly done with coding sequence (CDS) features. In this package, each multi-feature has a single "representative" feature object, and all other objects/entries associated with that multi-feature are attached to it as "siblings". Invoking this method will designate the calling feature as the multi-feature representative and add the argument as a sibling. """
assert self.is_pseudo is False if self.siblings is None: self.siblings = list() self.multi_rep = self sibling.multi_rep = self self.siblings.append(sibling)
def register(self, callback, name): 'Register a callback on server and on connected clients.' server.CALLBACKS[name] = callback self.run(""" window.skink.%s = function(args=[]) { window.skink.call("%s", args); }"""
% (name, name))
def _create_subject_identifier(self, user_id, client_id, redirect_uri): # type (str, str, str) -> str """ Creates a subject identifier for the specified client and user see <a href="http://openid.net/specs/openid-connect-core-1_0.html#Terminology"> "OpenID Connect Core 1.0", Section 1.2</a>. :param user_id: local user identifier :param client_id: which client to generate a subject identifier for :param redirect_uri: the clients' redirect_uri :return: a subject identifier for the user intended for client who made the authentication request """
supported_subject_types = self.configuration_information['subject_types_supported'][0] subject_type = self.clients[client_id].get('subject_type', supported_subject_types) sector_identifier = urlparse(redirect_uri).netloc return self.authz_state.get_subject_identifier(subject_type, user_id, sector_identifier)
def _redshift(distance, **kwargs): r"""Uses astropy to get redshift from the given luminosity distance. Parameters ---------- distance : float The luminosity distance, in Mpc. \**kwargs : All other keyword args are passed to :py:func:`get_cosmology` to select a cosmology. If none provided, will use :py:attr:`DEFAULT_COSMOLOGY`. Returns ------- float : The redshift corresponding to the given luminosity distance. """
cosmology = get_cosmology(**kwargs) return z_at_value(cosmology.luminosity_distance, distance, units.Mpc)
def find_longest_match(self, alo, ahi, blo, bhi): """Find longest matching block in a[alo:ahi] and b[blo:bhi]. Wrapper for the C implementation of this function. """
besti, bestj, bestsize = _cdifflib.find_longest_match(self, alo, ahi, blo, bhi) return _Match(besti, bestj, bestsize)
def do_delete(endpoint, access_token): """Do an HTTP GET request and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. access_token (str): A valid Azure authentication token. Returns: HTTP response. """
headers = {"Authorization": 'Bearer ' + access_token} headers['User-Agent'] = get_user_agent() return requests.delete(endpoint, headers=headers)
def update_lists(self): """Update packages list and ChangeLog.txt file after upgrade distribution """
print("{0}Update the package lists ?{1}".format( self.meta.color["GREEN"], self.meta.color["ENDC"])) print("=" * 79) if self.msg.answer() in ["y", "Y"]: Update().repository(["slack"])
def movable_items(self): """Filter selection Filter items of selection that cannot be moved (i.e. are not instances of `Item`) and return the rest. """
view = self.view if self._move_name_v: yield InMotion(self._item, view) else: selected_items = set(view.selected_items) for item in selected_items: if not isinstance(item, Item): continue yield InMotion(item, view)
def delete_maintenance_window(self, id, **kwargs): # noqa: E501 """Delete a specific maintenance window # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_maintenance_window(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerMaintenanceWindow If the method is called asynchronously, returns the request thread. """
kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_maintenance_window_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.delete_maintenance_window_with_http_info(id, **kwargs) # noqa: E501 return data
def create_ad(self, ad_content): """ create_ad: """
if not 'is_sandbox' in ad_content: return False else: if ad_content['is_sandbox']: req = self.create_record( 'sandboxad', ad_content, self.access_key, self.secret_key ) else: req = self.create_record( 'ad', ad_content, self.access_key, self.secret_key ) if req.status_code == 201: return req.json()['ad_id'] else: return False
def safe_import(self, name): """Helper utility for reimporting previously imported modules while inside the env"""
module = None if name not in self._modules: self._modules[name] = importlib.import_module(name) module = self._modules[name] if not module: dist = next(iter( dist for dist in self.base_working_set if dist.project_name == name ), None) if dist: dist.activate() module = importlib.import_module(name) if name in sys.modules: try: six.moves.reload_module(module) six.moves.reload_module(sys.modules[name]) except TypeError: del sys.modules[name] sys.modules[name] = self._modules[name] return module
def sign(s, passphrase, sig_format=SER_COMPACT, curve='secp160r1'): """ Signs `s' with passphrase `passphrase' """
if isinstance(s, six.text_type): raise ValueError("Encode `s` to a bytestring yourself to" + " prevent problems with different default encodings") curve = Curve.by_name(curve) privkey = curve.passphrase_to_privkey(passphrase) return privkey.sign(hashlib.sha512(s).digest(), sig_format)
def binary_shader_for_rules(self, output_jar, jar, rules, jvm_options=None): """Yields an `Executor.Runner` that will perform shading of the binary `jar` when `run()`. No default rules are applied; only the rules passed in as a parameter will be used. :param unicode output_jar: The path to dump the shaded jar to; will be over-written if it exists. :param unicode jar: The path to the jar file to shade. :param list rules: The rules to apply for shading. :param list jvm_options: an optional sequence of options for the underlying jvm :returns: An `Executor.Runner` that can be `run()` to shade the given `jar`. :rtype: :class:`pants.java.executor.Executor.Runner` """
with self.temporary_rules_file(rules) as rules_file: logger.debug('Running jarjar with rules:\n{}'.format(' '.join(rule.render() for rule in rules))) yield self._executor.runner(classpath=self._jarjar_classpath, main='org.pantsbuild.jarjar.Main', jvm_options=jvm_options, args=['process', rules_file, jar, output_jar])
def apply(self, a, return_Ya=False): r"""Apply the projection to an array. The computation is carried out without explicitly forming the matrix corresponding to the projection (which would be an array with ``shape==(N,N)``). See also :py:meth:`_apply`. """
# is projection the zero operator? if self.V.shape[1] == 0: Pa = numpy.zeros(a.shape) if return_Ya: return Pa, numpy.zeros((0, a.shape[1])) return Pa if return_Ya: x, Ya = self._apply(a, return_Ya=return_Ya) else: x = self._apply(a) for i in range(self.iterations-1): z = a - x w = self._apply(z) x = x + w if return_Ya: return x, Ya return x
def order(self, order): """Returns name order key. Returns tuple with two strings that can be compared to other such tuple obtained from different name. Note that if you want locale-dependent ordering then you need to compare strings using locale-aware method (e.g. ``locale.strxfrm``). :param order: One of the ORDER_* constants. :returns: tuple of two strings """
given = self.given surname = self.surname if order in (ORDER_MAIDEN_GIVEN, ORDER_GIVEN_MAIDEN): surname = self.maiden or self.surname # We are collating empty names to come after non-empty, # so instead of empty we return "2" and add "1" as prefix to others given = ("1" + given) if given else "2" surname = ("1" + surname) if surname else "2" if order in (ORDER_SURNAME_GIVEN, ORDER_MAIDEN_GIVEN): return (surname, given) elif order in (ORDER_GIVEN_SURNAME, ORDER_GIVEN_MAIDEN): return (given, surname) else: raise ValueError("unexpected order: {}".format(order))
def lastAnchor(self, block, column): """Find the last open bracket before the current line. Return (block, column, char) or (None, None, None) """
currentPos = -1 currentBlock = None currentColumn = None currentChar = None for char in '({[': try: foundBlock, foundColumn = self.findBracketBackward(block, column, char) except ValueError: continue else: pos = foundBlock.position() + foundColumn if pos > currentPos: currentBlock = foundBlock currentColumn = foundColumn currentChar = char currentPos = pos return currentBlock, currentColumn, currentChar
def set_volume(self, pct, channel=None): """ Sets the sound volume to the given percentage [0-100] by calling ``amixer -q set <channel> <pct>%``. If the channel is not specified, it tries to determine the default one by running ``amixer scontrols``. If that fails as well, it uses the ``Playback`` channel, as that is the only channel on the EV3. """
if channel is None: channel = self._get_channel() cmd_line = '/usr/bin/amixer -q set {0} {1:d}%'.format(channel, pct) Popen(shlex.split(cmd_line)).wait()
def get_what_txt(self): """ Overrides the base behaviour defined in ValidationError in order to add details about the class field. :return: """
return 'field [{field}] for class [{clazz}]'.format(field=self.get_variable_str(), clazz=self.validator.get_validated_class_display_name())
def remove_hairs_from_tags(dom): """ Use :func:`.remove_hairs` to some of the tags: - mods:title - mods:placeTerm """
transform_content( dom.match("mods:mods", "mods:titleInfo", "mods:title"), lambda x: remove_hairs(x.getContent()) ) transform_content( dom.match( "mods:originInfo", "mods:place", ["mods:placeTerm", {"type": "text"}] ), lambda x: remove_hairs(x.getContent()) )
def __postCallAction_codebp(self, event): """ Handles code breakpoint events on return from the function. @type event: L{ExceptionEvent} @param event: Breakpoint hit event. """
# If the breakpoint was accidentally hit by another thread, # pass it to the debugger instead of calling the "post" callback. # # XXX FIXME: # I suppose this check will fail under some weird conditions... # tid = event.get_tid() if tid not in self.__paramStack: return True # Remove the code breakpoint at the return address. pid = event.get_pid() address = event.breakpoint.get_address() event.debug.dont_break_at(pid, address) # Call the "post" callback. try: self.__postCallAction(event) # Forget the parameters. finally: self.__pop_params(tid)
def setcompress(self, comp_type, value=0, v2=0): """Compresses the dataset using a specified compression method. Args:: comp_type compression type, identified by one of the SDC.COMP_xxx constants value,v2 auxiliary value(s) needed by some compression types SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32) Returns:: None .. note:: Starting with v0.8, an exception is always raised if pyhdf was installed with the NOCOMPRESS macro set. SDC.COMP_DEFLATE applies the GZIP compression to the dataset, and the value varies from 1 to 9, according to the level of compression desired. SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide for details about the encoding scheme and the number of pixels per block. SZIP is new with HDF 4.2. 'setcompress' must be called before writing to the dataset. The dataset must be written all at once, unless it is appendable (has an unlimited dimension). Updating the dataset in not allowed. Refer to the HDF user's guide for more details on how to use data compression. C library equivalent: SDsetcompress """
status = _C._SDsetcompress(self._id, comp_type, value, v2) _checkErr('setcompress', status, 'cannot execute')
def module_degree_zscore(W, ci, flag=0): """ The within-module degree z-score is a within-module version of degree centrality. Parameters ---------- W : NxN np.narray binary/weighted directed/undirected connection matrix ci : Nx1 np.array_like community affiliation vector flag : int Graph type. 0: undirected graph (default) 1: directed graph in degree 2: directed graph out degree 3: directed graph in and out degree Returns ------- Z : Nx1 np.ndarray within-module degree Z-score """
_, ci = np.unique(ci, return_inverse=True) ci += 1 if flag == 2: W = W.copy() W = W.T elif flag == 3: W = W.copy() W = W + W.T n = len(W) Z = np.zeros((n,)) # number of vertices for i in range(1, int(np.max(ci) + 1)): Koi = np.sum(W[np.ix_(ci == i, ci == i)], axis=1) Z[np.where(ci == i)] = (Koi - np.mean(Koi)) / np.std(Koi) Z[np.where(np.isnan(Z))] = 0 return Z
def display(self, typ, data): """ display section of typ with data """
if hasattr(self, 'print_' + typ): getattr(self, 'print_' + typ)(data) elif not data: self._print("%s: %s" % (typ, data)) elif isinstance(data, collections.Mapping): self._print("\n", typ) for k, v in data.items(): self.print(k, v) elif isinstance(data, (list, tuple)): # tabular data layout for lists of dicts if isinstance(data[0], collections.Mapping): self.display_set(typ, data, self._get_columns(data[0])) else: for each in data: self.print(typ, each) else: self._print("%s: %s" % (typ, data)) self.fobj.flush()
def _init(self): """ Prepare to run (if not ready) """
num = 0 for field in self._fields: field._initialize() self._need_second_pass |= field._need_second_pass for field in self._fields: num += field.num_mutations() self._calculate_mutations(num) self._initialize_default_buffer()
def get_image_list(self, page=1, per_page=20): """Return a list of user's saved images :param page: (optional) Page number (default: 1) :param per_page: (optional) Number of images per page (default: 20, min: 1, max: 100) """
url = self.api_url + '/api/images' params = { 'page': page, 'per_page': per_page } response = self._request_url( url, 'get', params=params, with_access_token=True) headers, result = self._parse_and_check(response) images = ImageList.from_list(result) images.set_attributes_from_headers(headers) return images
def bases_walker(cls): """ Loop through all bases of cls >>> str = u'hai' >>> for base in bases_walker(unicode): ... isinstance(str, base) True True :param cls: The class in which we want to loop through the base classes. """
for base in cls.__bases__: yield base for more in bases_walker(base): yield more
def get_edges(ont): """ Fetches all basic edges from a remote ontology """
logging.info("QUERYING:"+ont) edges = [(c,SUBCLASS_OF, d) for (c,d) in fetchall_isa(ont)] edges += fetchall_svf(ont) edges += [(c,SUBPROPERTY_OF, d) for (c,d) in fetchall_subPropertyOf(ont)] if len(edges) == 0: logging.warn("No edges for {}".format(ont)) return edges
def insertion_rate(Nref, Ninsertions, eps=numpy.spacing(1)): """Insertion rate Parameters ---------- Nref : int >=0 Number of entries in the reference. Ninsertions : int >=0 Number of insertions. eps : float eps. Default value numpy.spacing(1) Returns ------- insertion_rate: float Insertion rate """
return float(Ninsertions / (Nref + eps))
def request(self, host, handler, request_body, verbose): """ Make an xmlrpc request. """
headers = {'User-Agent': self.user_agent, #Proxy-Connection': 'Keep-Alive', #'Content-Range': 'bytes oxy1.0/-1', 'Accept': 'text/xml', 'Content-Type': 'text/xml' } url = self._build_url(host, handler) try: resp = requests.post(url, data=request_body, headers=headers) except ValueError: raise except Exception: raise # something went wrong else: try: resp.raise_for_status() except requests.RequestException as e: raise xmlrpc.ProtocolError(url, resp.status_code, str(e), resp.headers) else: return self.parse_response(resp)
def server_list(endpoint_id): """ Executor for `globus endpoint server list` """
# raises usage error on shares for us endpoint, server_list = get_endpoint_w_server_list(endpoint_id) if server_list == "S3": # not GCS -- this is an S3 endpoint server_list = {"s3_url": endpoint["s3_url"]} fields = [("S3 URL", "s3_url")] text_format = FORMAT_TEXT_RECORD else: # regular GCS host endpoint fields = ( ("ID", "id"), ("URI", lambda s: (s["uri"] or "none (Globus Connect Personal)")), ) text_format = FORMAT_TEXT_TABLE formatted_print(server_list, text_format=text_format, fields=fields)
def parse(tokens): """Parse the provided string to produce *args and **kwargs"""
args = [] kwargs = {} last = None for token in tokens: if token.startswith('--'): # If this is a keyword flag, but we've already got one that we've # parsed, then we're going to interpret it as a bool if last: kwargs[last] = True # See if it is the --foo=5 style last, _, value = token.strip('-').partition('=') if value: kwargs[last] = value last = None elif last != None: kwargs[last] = token last = None else: args.append(token) # If there's a dangling last, set that bool if last: kwargs[last] = True return args, kwargs
def spline_matrix2d(x,y,px,py,mask=None): """For boundary constraints, the first two and last two spline pieces are constrained to be part of the same cubic curve."""
V = np.kron(spline_matrix(x,px),spline_matrix(y,py)) lenV = len(V) if mask is not None: indices = np.nonzero(mask.T.flatten()) if len(indices)>1: indices = np.nonzero(mask.T.flatten())[1][0] newV=V.T[indices] V=newV.T V=V.reshape((V.shape[0],V.shape[1])) return V
def isin(self, values): """ Compute boolean array of whether each index value is found in the passed set of values. Parameters ---------- values : set or sequence of values Returns ------- is_contained : ndarray (boolean dtype) """
if not isinstance(values, type(self)): try: values = type(self)(values) except ValueError: return self.astype(object).isin(values) return algorithms.isin(self.asi8, values.asi8)
def metas(self, prefix=None, limit=None, delimiter=None): """ RETURN THE METADATA DESCRIPTORS FOR EACH KEY """
limit = coalesce(limit, TOO_MANY_KEYS) keys = self.bucket.list(prefix=prefix, delimiter=delimiter) prefix_len = len(prefix) output = [] for i, k in enumerate(k for k in keys if len(k.key) == prefix_len or k.key[prefix_len] in [".", ":"]): output.append({ "key": strip_extension(k.key), "etag": convert.quote2string(k.etag), "expiry_date": Date(k.expiry_date), "last_modified": Date(k.last_modified) }) if i >= limit: break return wrap(output)
def create_program_action(parent, text, name, icon=None, nt_name=None): """Create action to run a program"""
if is_text_string(icon): icon = get_icon(icon) if os.name == 'nt' and nt_name is not None: name = nt_name path = programs.find_program(name) if path is not None: return create_action(parent, text, icon=icon, triggered=lambda: programs.run_program(name))
def thumbnail_url(source, alias): """ Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt=""> """
try: thumb = get_thumbnailer(source)[alias] except Exception: return '' return thumb.url
def record_ce_entries(self): # type: () -> bytes """ Return a string representing the Rock Ridge entries in the Continuation Entry. Parameters: None. Returns: A string representing the Rock Ridge entry. """
if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized') return self._record(self.ce_entries)
def frigg(branch: str): """ Performs necessary checks to ensure that the frigg build is one that should create releases. :param branch: The branch the environment should be running against. """
assert os.environ.get('FRIGG_BUILD_BRANCH') == branch assert not os.environ.get('FRIGG_PULL_REQUEST')
def GET_AUTH(self): # pylint: disable=arguments-differ """ GET request """
auth_methods = self.user_manager.get_auth_methods() user_data = self.database.users.find_one({"username": self.user_manager.session_username()}) bindings = user_data.get("bindings", {}) return self.template_helper.get_renderer().preferences.bindings(bindings, auth_methods, "", False)
def load_config_file(config_file_path, config_file): """ Loads a config file, whether it is a yaml file or a .INI file. :param config_file_path: Path of the configuration file, used to infer the file format. :returns: Dictionary representation of the configuration file. """
if config_file_path.lower().endswith(".yaml"): return yaml.load(config_file) if any(config_file_path.lower().endswith(extension) for extension in INI_FILE_EXTENSIONS): return load_config_from_ini_file(config_file) # At this point we have to guess the format of the configuration file. try: return yaml.load(config_file) except yaml.YAMLError: pass try: return load_config_from_ini_file(config_file) except: pass raise Exception("Could not load configuration file!")
def git_pretty(): """returns a pretty summary of the commit or unkown if not in git repo"""
if git_repo() is None: return "unknown" pretty = subprocess.check_output( ["git", "log", "--pretty=format:%h %s", "-n", "1"]) pretty = pretty.decode("utf-8") pretty = pretty.strip() return pretty
def _helper_wrefs(self, targets, recurse=True): """Internal helper function"""
for c in self: if isinstance(c,Word) or isinstance(c,Morpheme) or isinstance(c, Phoneme): targets.append(c) elif isinstance(c,WordReference): try: targets.append(self.doc[c.id]) #try to resolve except KeyError: targets.append(c) #add unresolved elif isinstance(c, AbstractSpanAnnotation) and recurse: #recursion c._helper_wrefs(targets) #pylint: disable=protected-access elif isinstance(c, Correction) and c.auth: #recurse into corrections for e in c: if isinstance(e, AbstractCorrectionChild) and e.auth: for e2 in e: if isinstance(e2, AbstractSpanAnnotation): #recursion e2._helper_wrefs(targets)
def where(self, where_string, **kwargs): """ Select from a given Table or Column with the specified WHERE clause string. Additional keywords are passed to ExploreSqlDB.query(). For convenience, if there is no '=', '>', '<', 'like', or 'LIKE' clause in the WHERE statement .where() tries to match the input string against the primary key column of the Table. Args: where_string (str): Where clause for the query against the Table or Column Kwars: **kwargs: Optional **kwargs passed to the QueryDb.query() call Returns: result (pandas.DataFrame or sqlalchemy ResultProxy): Query result as a DataFrame (default) or sqlalchemy result. """
col, id_col = self._query_helper(by=None) where_string = str(where_string) # Coerce here, for .__contains___ where_operators = ["=", ">", "<", "LIKE", "like"] if np.any([where_string.__contains__(w) for w in where_operators]): select = ("SELECT %s FROM %s WHERE %s" % (col, self.table.name, where_string)) else: select = ("SELECT %s FROM %s WHERE %s = %s" % (col, self.table.name, id_col, where_string)) return self._db.query(select, **kwargs)
def _setLearningMode(self): """ Sets the learning mode. """
for region in self.L4Regions: region.setParameter("learn", True) for region in self.L2Regions: region.setParameter("learningMode", True)
def to_json(self) -> dict: """export the Deck object to json-ready format"""
d = self.__dict__ d['p2th_wif'] = self.p2th_wif return d
def asset_add_asset(self, *args, **kwargs): """Add more assets to the asset. :returns: None :rtype: None :raises: None """
if not self.cur_asset: return dialog = AssetAdderDialog(asset=self.cur_asset) dialog.exec_() assets = dialog.assets atypes = {} for c in self.asset_asset_model.root.childItems: atypes[c.internal_data()] = c for asset in assets: atypeitem = atypes.get(asset.atype) if not atypeitem: atypedata = djitemdata.AtypeItemData(asset.atype) atypeitem = treemodel.TreeItem(atypedata, self.asset_asset_model.root) atypes[asset.atype] = atypeitem assetdata = djitemdata.AssetItemData(asset) treemodel.TreeItem(assetdata, atypeitem) self.cur_asset.save()
def get_profile(self, img_type, coordinate, num_points): """ Extract a profile from (lat1,lon1) to (lat2,lon2) Args: img_type (str): Either lola or wac. coordinate (float,float,float,flaot): A tupple ``(lon0,lon1,lat0,lat1)`` with: - lon0: First point longitude - lat0: First point latitude - lon1: Second point longitude - lat1: Second point latitude num_points (int): Number of points to use in the interpolation process. Note: Be carefull, longitude has to be in between 0-360 ! """
lon0, lon1, lat0, lat1 = coordinate X, Y, Z = self.get_arrays(img_type) y0, x0 = np.argmin(np.abs(X[0, :] - lon0) ), np.argmin(np.abs(Y[:, 0] - lat0)) y1, x1 = np.argmin(np.abs(X[0, :] - lon1) ), np.argmin(np.abs(Y[:, 0] - lat1)) x, y = np.linspace(x0, x1, num_points), np.linspace(y0, y1, num_points) zi = scipy.ndimage.map_coordinates(Z, np.vstack((x, y))) return zi
def _send_request(self): """ Sends the request to the backend. """
if isinstance(self._worker, str): classname = self._worker else: classname = '%s.%s' % (self._worker.__module__, self._worker.__name__) self.request_id = str(uuid.uuid4()) self.send({'request_id': self.request_id, 'worker': classname, 'data': self._args})
def md_to_obj(cls, file_path=None, text='', columns=None, key_on=None, ignore_code_blocks=True, eval_cells=True): """ This will convert a mark down file to a seaborn table :param file_path: str of the path to the file :param text: str of the mark down text :param columns: list of str of columns to use :param key_on: list of str of columns to key on :param ignore_code_blocks: bool if true will filter out any lines between ``` :param eval_cells: bool if True will try to evaluate numbers :return: SeabornTable """
return cls.mark_down_to_obj(file_path=file_path, text=text, columns=columns, key_on=key_on, ignore_code_blocks=ignore_code_blocks, eval_cells=eval_cells)
def unload(self): """unload module"""
self.settings.set('speech', 0) if self.mpstate.functions.say == self.mpstate.functions.say: self.mpstate.functions.say = self.old_mpstate_say_function self.kill_speech_dispatcher()
def write_result (self, url_data): """Write url_data.result."""
self.write(self.part("result") + self.spaces("result")) if url_data.valid: color = self.colorvalid self.write(_("Valid"), color=color) else: color = self.colorinvalid self.write(_("Error"), color=color) if url_data.result: self.write(u": " + url_data.result, color=color) self.writeln()
def download(self, obj, directory, structure=True): """ Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters. """
if not os.path.isdir(directory): raise exc.FolderNotFound("The directory '%s' does not exist." % directory) obj_name = utils.get_name(obj) path, fname = os.path.split(obj_name) if structure: fullpath = os.path.join(directory, path) if not os.path.exists(fullpath): os.makedirs(fullpath) target = os.path.join(fullpath, fname) else: target = os.path.join(directory, fname) with open(target, "wb") as dl: content = self.fetch(obj) try: dl.write(content) except UnicodeEncodeError: encoding = pyrax.get_encoding() dl.write(content.encode(encoding))
def _load(self): """ Load a configuration file. This method will be called when the Config class is instantiated. The configuration file can be json or yaml. """
if os.path.isdir(self._path): for file_ext in ('yml', 'yaml', 'json'): test_path = os.path.join(self._path, 'freight-forwarder.{0}'.format(file_ext)) if os.path.isfile(test_path): self._path = test_path break if os.path.isfile(self._path): file_name, file_extension = os.path.splitext(self._path) with open(self._path, 'r') as config_file: if file_extension in ('.yaml', '.yml'): self._load_yml_config(config_file.read()) elif file_extension == '.json': try: config_data = json.loads(config_file.read()) self._data = normalize_keys(config_data) except Exception: raise SyntaxError("There is a syntax error in your freight-forwarder config.") else: raise TypeError("Configuration file most be yaml or json.") else: raise LookupError("Was unable to find a freight-forwarder configuration file.")
def add_to_package_numpy(self, root, ndarray, node_path, target, source_path, transform, custom_meta): """ Save a Numpy array to the store. """
filehash = self.save_numpy(ndarray) metahash = self.save_metadata(custom_meta) self._add_to_package_contents(root, node_path, [filehash], target, source_path, transform, metahash)
def dispatch(self, request, **kwargs): """ Entry point for this class, here we decide basic stuff """
# Delete method must happen with POST not with GET if request.method == 'POST': # Check if this is a webservice request self.__authtoken = (bool(getattr(self.request, "authtoken", False))) self.json_worker = self.__authtoken or (self.json is True) # Call the base implementation return super(GenDelete, self).dispatch(request, **kwargs) else: json_answer = json.dumps({ 'error': True, 'errortxt': _('Method not allowed, use POST to delete or DELETE on the detail url'), }) return HttpResponse(json_answer, content_type='application/json')
def is_array(self, key): """Return True if variable is a numpy array"""
data = self.model.get_data() return isinstance(data[key], (ndarray, MaskedArray))
def copy(self): """ Copy the text in the Entry() and place it on the clipboard. """
try: pygame.scrap.put(SCRAP_TEXT, self.get()) return True except: # pygame.scrap is experimental, allow for changes return False
def distance_matrix(a, b, periodic): """Calculate a distrance matrix between coordinates sets a and b """
a = a b = b[:, np.newaxis] return periodic_distance(a, b, periodic)
def pysal_Geary(self, **kwargs): """ Compute Geary’s C for GeoRaster Usage: geo.pysal_C(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Geary See help(gr.raster_weights), help(pysal.Geary) for options """
if self.weights is None: self.raster_weights(**kwargs) rasterf = self.raster.flatten() rasterf = rasterf[rasterf.mask==False] self.Geary = pysal.Geary(rasterf, self.weights, **kwargs)
def list_database(db): """Print credential as a table"""
credentials = db.credentials() if credentials: table = Table( db.config['headers'], table_format=db.config['table_format'], colors=db.config['colors'], hidden=db.config['hidden'], hidden_string=db.config['hidden_string'], ) click.echo(table.render(credentials))
def strip_hidden(key_tuples, visibilities): """Filter each tuple according to visibility. Args: key_tuples: A sequence of tuples of equal length (i.e. rectangular) visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples. Returns: A sequence equal in length to key_tuples where the items are tuples with a length corresponding to the number of items in visibility which are True. """
result = [] for key_tuple in key_tuples: if len(key_tuple) != len(visibilities): raise ValueError( "length of key tuple {} is not equal to length of visibilities {}".format( key_tuple, visibilities ) ) filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible) result.append(filtered_tuple) return result
def take_steps(self, number_of_steps: int=1, step_size: Timedelta=None, with_logging: bool=True): """Run the simulation for the given number of steps. Parameters ---------- number_of_steps The number of steps to take. step_size An optional size of step to take. Must be the same type as the simulation clock's step size (usually a pandas.Timedelta). with_logging Whether or not to log the simulation steps. Only works in an ipython environment. """
if not isinstance(number_of_steps, int): raise ValueError('Number of steps must be an integer.') if run_from_ipython() and with_logging: for _ in log_progress(range(number_of_steps), name='Step'): self.step(step_size) else: for _ in range(number_of_steps): self.step(step_size)
def get_obsmeta(self, lcid): """Get the observation metadata for the given id. This is table 3 of Sesar 2010 """
if self._obsdata is None: self._obsdata = fetch_rrlyrae_fitdata() i = np.where(self._obsdata['id'] == lcid)[0] if len(i) == 0: raise ValueError("invalid lcid: {0}".format(lcid)) return self._obsdata[i[0]]
def pad_block(block, block_size): """Pad a block to block_size with its most frequent value"""
unique_vals, unique_counts = np.unique(block, return_counts=True) most_frequent_value = unique_vals[np.argmax(unique_counts)] return np.pad(block, tuple((0, desired_size - actual_size) for desired_size, actual_size in zip(block_size, block.shape)), mode="constant", constant_values=most_frequent_value)
def linterp(self, setx, sety, x): """ Linear interp of model data values between time steps """
if math.isnan(sety[0]) or math.isnan(setx[0]): return np.nan #if math.isnan(sety[0]): # sety[0] = 0. #if math.isnan(sety[1]): # sety[1] = 0. return sety[0] + (x - setx[0]) * ( (sety[1]-sety[0]) / (setx[1]-setx[0]) )
def register_measurements(self, end, rows, between, refresh_presision): """Register the measurements if it has measurements and close the configuration, if it hasen't got measurements clean the temporal file on disk. Keyword arguments: f -- open memory file end -- datetime of the moment when the configuration go inactive between -- time between integral_measurements in seconds refresh_presision -- time between sensor values that compose the integral_measurements """
if not self.end and len(rows) > 0: self.append_rows(rows, between, refresh_presision) self.go_inactive(end) self.save()
def _merge_dicts(first, second): """Merge the 'second' multiple-dictionary into the 'first' one."""
new = deepcopy(first) for k, v in second.items(): if isinstance(v, dict) and v: ret = _merge_dicts(new.get(k, dict()), v) new[k] = ret else: new[k] = second[k] return new
def _html_image(page): """ returns HTML img tag """
source = _image(page) if not source: return alt = page.data.get('label') or page.data.get('title') img = "<img src=\"%s\"" % source img += " alt=\"%s\" title=\"%s\" " % (alt, alt) img += "align=\"right\" width=\"240\">" return img
def coarsegrain(F, sets): r"""Coarse-grains the flux to the given sets $fc_{i,j} = \sum_{i \in I,j \in J} f_{i,j}$ Note that if you coarse-grain a net flux, it does not necessarily have a net flux property anymore. If want to make sure you get a netflux, use to_netflux(coarsegrain(F,sets)). Parameters ---------- F : (n, n) ndarray Matrix of flux values between pairs of states. sets : list of array-like of ints The sets of states onto which the flux is coarse-grained. """
nnew = len(sets) Fin = F.tocsr() Fc = csr_matrix((nnew, nnew)) for i in range(0, nnew - 1): for j in range(i, nnew): I = list(sets[i]) J = list(sets[j]) Fc[i, j] = (Fin[I, :][:, J]).sum() Fc[j, i] = (Fin[J, :][:, I]).sum() return Fc
def _fix_mappings(self): """ Add computed stuff to mappings. """
self._mapping.update((key+'=', val+'=') for key, val in self._mapping.items() if not key.endswith('=')) if config.debug: self.LOG.debug("CMD MAPPINGS ARE: %r" % (self._mapping,))
def _worker_thread_transfer(self): # type: (Uploader) -> None """Worker thread transfer :param Uploader self: this """
while not self.termination_check: try: ud, ase, offsets, data = self._transfer_queue.get( block=False, timeout=0.1) except queue.Empty: continue try: self._process_transfer(ud, ase, offsets, data) except Exception as e: with self._upload_lock: self._exceptions.append(e)
def _detail_participant( self, channel_identifier: ChannelID, participant: Address, partner: Address, block_identifier: BlockSpecification, ) -> ParticipantDetails: """ Returns a dictionary with the channel participant information. """
data = self._call_and_check_result( block_identifier, 'getChannelParticipantInfo', channel_identifier=channel_identifier, participant=to_checksum_address(participant), partner=to_checksum_address(partner), ) return ParticipantDetails( address=participant, deposit=data[ParticipantInfoIndex.DEPOSIT], withdrawn=data[ParticipantInfoIndex.WITHDRAWN], is_closer=data[ParticipantInfoIndex.IS_CLOSER], balance_hash=data[ParticipantInfoIndex.BALANCE_HASH], nonce=data[ParticipantInfoIndex.NONCE], locksroot=data[ParticipantInfoIndex.LOCKSROOT], locked_amount=data[ParticipantInfoIndex.LOCKED_AMOUNT], )
def params_as_tensors_for(*objs, convert=True): """ Context manager which changes the representation of parameters and data holders for the specific parameterized object(s). This can also be used to turn off tensor conversion functions wrapped with `params_as_tensors`: ``` @gpflow.params_as_tensors def compute_something(self): # self is parameterized object. s = tf.reduce_sum(self.a) # self.a is a parameter. with params_as_tensors_for(self, convert=False): b = self.c.constrained_tensor return s + b ``` :param objs: one or more instances of classes deriving from Parameterized :param convert: Flag which is used for turning tensor convertion feature on, `True`, or turning it off, `False`. """
objs = set(objs) # remove duplicate objects so the tensor mode won't be changed before saving prev_values = [_params_as_tensors_enter(o, convert) for o in objs] try: yield finally: for o, pv in reversed(list(zip(objs, prev_values))): _params_as_tensors_exit(o, pv)
def fit(self, x, y, dcoef='none'): """ performs the fit x, y : list Matching data arrays that define a numerical function y(x), this is the data to be fitted. dcoef : list or string You can provide a different guess for the coefficients, or provide the string 'none' to use the inital guess. The default is 'none'. Returns ------- ierr Values between 1 and 4 signal success. Notes ----- self.fcoef, contains the fitted coefficients. """
self.x = x self.y = y if dcoef is not 'none': coef = dcoef else: coef = self.coef fcoef=optimize.leastsq(self.residual,coef,args=(y,self.func,x)) self.fcoef = fcoef[0].tolist() return fcoef[1]
def parse_charset(charset): """ Finds out whether there are intervals to expand and creates the charset """
import re regex = r'(\w-\w)' pat = re.compile(regex) found = pat.findall(charset) result = '' if found: for element in found: for char in char_range(element[0], element[-1]): result += char return result return charset
def reset(self, indices=None): """Resets environments at given indices. Does any preprocessing and adds rollouts to history. Args: indices: Indices of environments to reset. Returns: Batch of initial observations of reset environments. Raises: ValueError: when there's no current epoch. """
if self._store_rollouts and self.current_epoch is None: raise ValueError( "No current epoch. start_new_epoch() should first be called." ) if indices is None: indices = np.arange(self.batch_size) new_obs = self._reset(indices) if self._should_preprocess_on_reset: new_obs = self._preprocess_observations(new_obs) if self._store_rollouts: encoded_obs = self._encode_observations(new_obs) for (index, ob) in zip(indices, encoded_obs): frame = self._current_batch_frames[index] if frame is not None: rollout = self._current_batch_rollouts[index] rollout.append(frame._replace(action=0)) self._current_epoch_rollouts.append(rollout) self._current_batch_rollouts[index] = [] self._current_batch_frames[index] = Frame( observation=ob, reward=0, unclipped_reward=0, done=False, action=None ) return new_obs
def mark_process_dead(pid, path=None): """Do bookkeeping for when one process dies in a multi-process setup."""
if path is None: path = os.environ.get('prometheus_multiproc_dir') for f in glob.glob(os.path.join(path, 'gauge_livesum_{0}.db'.format(pid))): os.remove(f) for f in glob.glob(os.path.join(path, 'gauge_liveall_{0}.db'.format(pid))): os.remove(f)
def add(self, labels, value): """Add adds a single observation to the summary."""
if type(value) not in (float, int): raise TypeError("Summary only works with digits (int, float)") # We have already a lock for data but not for the estimator with mutex: try: e = self.get_value(labels) except KeyError: # Initialize quantile estimator e = quantile.Estimator(*self.__class__.DEFAULT_INVARIANTS) self.set_value(labels, e) e.observe(float(value))
def headerData(self, index, orientation, role): """ QHeaderView respects the following item data roles: TextAlignmentRole, DisplayRole, FontRole, DecorationRole, ForegroundRole, BackgroundRole. """
d = self.declaration if orientation == Qt.Horizontal and role == Qt.DisplayRole: try: return d.horizontal_headers[index] \ if d.horizontal_headers else index except IndexError: return index elif orientation == Qt.Vertical and role == Qt.DisplayRole: try: return d.vertical_headers[index] \ if d.vertical_headers else index except IndexError: return index return None
def time_to_first_byte(self): """ Time to first byte of the page request in ms """
# The unknown page is just a placeholder for entries with no page ID. # As such, it would not have a TTFB if self.page_id == 'unknown': return None ttfb = 0 for entry in self.entries: if entry['response']['status'] == 200: for k, v in iteritems(entry['timings']): if k != 'receive': if v > 0: ttfb += v break else: ttfb += entry['time'] return ttfb
def _on_item_clicked(self, item): """ Go to the item position in the editor. """
if item: name = item.data(0, QtCore.Qt.UserRole) if name: go = name.block.blockNumber() helper = TextHelper(self._editor) if helper.current_line_nbr() != go: helper.goto_line(go, column=name.column) self._editor.setFocus()
def analyzers_mapping(cls): """ Return instance of itself where all used properties are set to :class:`FuncInfo`. This method is used by the database, which map all the properties defined here to itself, runs the functions as new processes and stores the result in itself. Because it knows how many properties are there, it may also track the progress, which is then transported to the frontend and displayed in form of progress bar. Returns: obj: :class:`Model` instance. """
import analyzers return cls( title_tags=_compose_func(analyzers.get_title_tags), place_tags=_compose_func( analyzers.get_place_tags, lambda req_info: (req_info.index, req_info.domain) ), lang_tags=_compose_func(analyzers.get_lang_tags), keyword_tags=_compose_func(analyzers.get_keyword_tags), # yep, authors of webpage are actually publishers publisher_tags=_compose_func(analyzers.get_author_tags), annotation_tags=_compose_func(analyzers.get_annotation_tags), creation_dates=_compose_func( analyzers.get_creation_date_tags, lambda req_info: (req_info.url, req_info.domain) ), )
def print_msg(msg, ofd, hdr): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L929. Positional arguments: msg -- Netlink message (nl_msg class instance). ofd -- function to call with arguments similar to `logging.debug`. hdr -- Netlink message header (nlmsghdr class instance). """
payloadlen = c_int(nlmsg_len(hdr)) attrlen = 0 data = nlmsg_data(hdr) ops = nl_cache_ops_associate_safe(msg.nm_protocol, hdr.nlmsg_type) if ops: attrlen = nlmsg_attrlen(hdr, ops.co_hdrsize) payloadlen.value -= attrlen if msg.nm_protocol == libnl.linux_private.netlink.NETLINK_GENERIC: data = print_genl_msg(msg, ofd, hdr, ops, payloadlen) if payloadlen.value: ofd(' [PAYLOAD] %d octets', payloadlen.value) dump_hex(ofd, data, payloadlen.value, 0) if attrlen: attrs = nlmsg_attrdata(hdr, ops.co_hdrsize) attrlen = nlmsg_attrlen(hdr, ops.co_hdrsize) dump_attrs(ofd, attrs, attrlen, 0)
def _declareTimeoutExceeded(self, ev_data: UpgradeLogData): """ This function is called when time for upgrade is up """
logger.info("Timeout exceeded for {}:{}" .format(ev_data.when, ev_data.version)) last = self._actionLog.last_event # TODO test this if (last and last.ev_type == UpgradeLog.Events.failed and last.data == ev_data): return None self._action_failed(ev_data, reason="exceeded upgrade timeout") self._unscheduleAction() self._actionFailedCallback()
def send_webhook(config, payload): """Sends a HTTP request to the configured server. All exceptions are suppressed but emit a warning message in the log. """
try: response = requests.post( config['webhook_url'], data=json.dumps(payload, cls=ModelJSONEncoder), headers={config['api_key_header_name']: config['api_key']}, ) except Exception as e: logger.warning('Unable to send webhook: ({1}) {2}'.format( e.__class__.__name__, e.message, )) else: logger.debug('Webhook response: ({0}) {1}'.format( response.status_code, response.text, ))
def private_key_to_address(self, pk): """ Convert a private key (in hex format) into an address. """
pub = privtopub(pk) pub_byte, priv_byte = get_magic_bytes(self.crypto) if priv_byte >= 128: priv_byte -= 128 #pybitcointools bug return pubtoaddr(pub, pub_byte)
def resizeEvent(self, event): """ Reimplements the :meth:`Basic_QPlainTextEdit.resizeEvent` method. :param event: Event. :type event: QEvent """
Basic_QPlainTextEdit.resizeEvent(self, event) self.__margin_area_LinesNumbers_widget.update_geometry()
def is_ready(self): """ Check if pod is in READY condition :return: bool """
if PodCondition.READY in self.get_conditions(): logger.info("Pod: %s in namespace: %s is ready!", self.name, self.namespace) return True return False
def _len_lcs(x, y): """Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y """
table = _lcs(x, y) n, m = len(x), len(y) return table[n, m]
def delete_component(self, id): """Delete component by id. :param id: ID of the component to use :type id: str :rtype: Response """
url = self._get_url('component/' + str(id)) return self._session.delete(url)
def find_cached_dm(self): """ Find filename where cached data model json is stored. Returns --------- model_file : str data model json file location """
pmag_dir = find_pmag_dir.get_pmag_dir() if pmag_dir is None: pmag_dir = '.' model_file = os.path.join(pmag_dir, 'pmagpy', 'data_model', 'data_model.json') # for py2app: if not os.path.isfile(model_file): model_file = os.path.join(pmag_dir, 'data_model', 'data_model.json') if not os.path.isfile(model_file): model_file = os.path.join(os.path.split(os.path.dirname(__file__))[0],'pmagpy', 'data_model','data_model.json') if not os.path.isfile(model_file): model_file = os.path.join(os.path.split(os.path.dirname(__file__))[0], 'data_model','data_model.json') return model_file
def _index_param_value(num_samples, v, indices): """Private helper function for parameter value indexing. This determines whether a fit parameter `v` to a SearchCV.fit should be indexed along with `X` and `y`. Note that this differs from the scikit-learn version. They pass `X` and compute num_samples. We pass `num_samples` instead. """
if not _is_arraylike(v) or _num_samples(v) != num_samples: # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices)
def matches(self, msg_seq: int, msg: MessageInterface) -> bool: """The message matches if all the defined search key criteria match. Args: msg_seq: The message sequence ID. msg: The message object. """
return all(crit.matches(msg_seq, msg) for crit in self.all_criteria)
def summary( self ): """ Creates a text string representing the current query and its children for this item. :return <str> """
child_text = [] for c in range(self.childCount()): child = self.child(c) text = [child.text(0), child.text(1), child.text(2), child.text(3)] text = map(str, text) while ( '' in text ): text.remove('') child_text.append( ' '.join(text) ) return ' '.join(child_text)
def set_check(self, name, state): """set a status value"""
if self.child.is_alive(): self.parent_pipe.send(CheckItem(name, state))
def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length): """Splits a token into subtokens defined in the subtoken dict."""
ret = [] start = 0 token_len = len(token) while start < token_len: # Find the longest subtoken, so iterate backwards. for end in xrange(min(token_len, start + max_subtoken_length), start, -1): subtoken = token[start:end] if subtoken in subtoken_dict: ret.append(subtoken) start = end break else: # Did not break # If there is no possible encoding of the escaped token then one of the # characters in the token is not in the alphabet. This should be # impossible and would be indicative of a bug. raise ValueError("Was unable to split token \"%s\" into subtokens." % token) return ret
def clean(self): """Remove all of the tables and data from the warehouse"""
connection = self._backend._get_connection() self._backend.clean(connection)
def _parse_path(self, path): """Return (hosts, path) tuple"""
# Support specifying another host via hdfs://host:port/path syntax # We ignore the scheme and piece together the query and fragment # Note that HDFS URIs are not URL encoded, so a '?' or a '#' in the URI is part of the # path parts = urlsplit(path, allow_fragments=False) if not parts.path.startswith('/'): raise ValueError("Path must be absolute, was given {}".format(path)) if parts.scheme not in ('', 'hdfs', 'hftp', 'webhdfs'): warnings.warn("Unexpected scheme {}".format(parts.scheme)) assert not parts.fragment path = parts.path if parts.query: path += '?' + parts.query if parts.netloc: hosts = self._parse_hosts(parts.netloc) else: hosts = self.hosts return hosts, path
def _read_stc(stc_file): """Read Segment Table of Contents file. Returns ------- hdr : dict - next_segment : Sample frequency in Hertz - final : Number of channels stored - padding : Padding stamps : ndarray of dtype - segment_name : Name of ERD / ETC file segment - start_stamp : First sample stamp that is found in the ERD / ETC pair - end_stamp : Last sample stamp that is still found in the ERD / ETC pair - sample_num : Number of samples actually being recorded (gaps in the data are not included in this number) - sample_span : Number of samples in that .erd file Notes ----- The Segment Table of Contents file is an index into pairs of (raw data file / table of contents file). It is used for mapping samples file segments. EEG raw data is split into segments in order to break a single file size limit (used to be 2GB) while still allowing quick searches. This file ends in the extension '.stc'. Default segment size (size of ERD file after which it is closed and new [ERD / ETC] pair is opened) is 50MB. The file starts with a generic EEG file header, and is followed by a series of fixed length records called the STC entries. ERD segments are named according to the following schema: - <FIRST_NAME>, <LAST_NAME>_<GUID>.ERD (first) - <FIRST_NAME>, <LAST_NAME>_<GUID>.ETC (first) - <FIRST_NAME>, <LAST_NAME>_<GUID>_<INDEX>.ERD (second and subsequent) - <FIRST_NAME>, <LAST_NAME>_<GUID>_<INDEX>.ETC (second and subsequent) <INDEX> is formatted with "%03d" format specifier and starts at 1 (initial value being 0 and omitted for compatibility with the previous versions). """
hdr = _read_hdr_file(stc_file) # read header the normal way stc_dtype = dtype([('segment_name', 'a256'), ('start_stamp', '<i'), ('end_stamp', '<i'), ('sample_num', '<i'), ('sample_span', '<i')]) with stc_file.open('rb') as f: f.seek(352) # end of header hdr['next_segment'] = unpack('<i', f.read(4))[0] hdr['final'] = unpack('<i', f.read(4))[0] hdr['padding'] = unpack('<' + 'i' * 12, f.read(48)) stamps = fromfile(f, dtype=stc_dtype) return hdr, stamps
def checksum_status(self, filename): """ Retrieve checksum status and values for a file :param str filename: The name of the file within the Upload Area :return: a dict with checksum information :rtype: dict :raises UploadApiException: if information could not be obtained """
return self.upload_service.api_client.checksum_status(area_uuid=self.uuid, filename=filename)