prompt
stringlengths
23
36.3k
response
stringlengths
0
1.55k
def index(in_cram, config): """Ensure CRAM file has a .crai index file. """
out_file = in_cram + ".crai" if not utils.file_uptodate(out_file, in_cram): with file_transaction(config, in_cram + ".crai") as tx_out_file: tx_in_file = os.path.splitext(tx_out_file)[0] utils.symlink_plus(in_cram, tx_in_file) cmd = "samtools index {tx_in_file}" do.run(cmd.format(**locals()), "Index CRAM file") return out_file
def variable(self): """Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing. name = "@py_assert" + str(next(self.variable_counter)) self.variables.append(name) return name
def distance_inches_continuous(self): """ Measurement of the distance detected by the sensor, in inches. The sensor will continue to take measurements so they are available for future reads. Prefer using the equivalent :meth:`UltrasonicSensor.distance_inches` property. """
self._ensure_mode(self.MODE_US_DIST_IN) return self.value(0) * self._scale('US_DIST_IN')
def reindex(report): """Reindex report so that 'TOTAL' is the last row"""
index = list(report.index) i = index.index('TOTAL') return report.reindex(index[:i] + index[i+1:] + ['TOTAL'])
def visit_extslice(self, node): """return an astroid.ExtSlice node as string"""
return ", ".join(dim.accept(self) for dim in node.dims)
def BaseShapeFactory(shape_elm, parent): """ Return an instance of the appropriate shape proxy class for *shape_elm*. """
tag = shape_elm.tag if tag == qn('p:pic'): videoFiles = shape_elm.xpath('./p:nvPicPr/p:nvPr/a:videoFile') if videoFiles: return Movie(shape_elm, parent) return Picture(shape_elm, parent) shape_cls = { qn('p:cxnSp'): Connector, qn('p:grpSp'): GroupShape, qn('p:sp'): Shape, qn('p:graphicFrame'): GraphicFrame, }.get(tag, BaseShape) return shape_cls(shape_elm, parent)
def related_records2marc(self, key, value): """Populate the ``78708`` MARC field Also populates the ``78002``, ``78502`` MARC fields through side effects. """
if value.get('relation_freetext'): return { 'i': value.get('relation_freetext'), 'w': get_recid_from_ref(value.get('record')), } elif value.get('relation') == 'successor': self.setdefault('78502', []).append({ 'i': 'superseded by', 'w': get_recid_from_ref(value.get('record')), }) elif value.get('relation') == 'predecessor': self.setdefault('78002', []).append({ 'i': 'supersedes', 'w': get_recid_from_ref(value.get('record')), }) else: raise NotImplementedError(u"Unhandled relation in related_records: {}".format(value.get('relation')))
def fwhm(x, y, k=10): # http://stackoverflow.com/questions/10582795/finding-the-full-width-half-maximum-of-a-peak """ Determine full-with-half-maximum of a peaked set of points, x and y. Assumes that there is only one peak present in the datasset. The function uses a spline interpolation of order k. """
class MultiplePeaks(Exception): pass class NoPeaksFound(Exception): pass half_max = np.amax(y) / 2.0 s = splrep(x, y - half_max) roots = sproot(s) if len(roots) > 2: raise MultiplePeaks("The dataset appears to have multiple peaks, and " "thus the FWHM can't be determined.") elif len(roots) < 2: raise NoPeaksFound("No proper peaks were found in the data set; likely " "the dataset is flat (e.g. all zeros).") else: return roots[0], roots[1]
def from_dict(cls, d): """ Returns a SlabEntry by reading in an dictionary """
structure = SlabEntry.from_dict(d["structure"]) energy = SlabEntry.from_dict(d["energy"]) miller_index = d["miller_index"] label = d["label"] coverage = d["coverage"] adsorbates = d["adsorbates"] clean_entry = d["clean_entry"] = self.clean_entry return SlabEntry(structure, energy, miller_index, label=label, coverage=coverage, adsorbates=adsorbates, clean_entry=clean_entry)
def update_channels(self): """Update the GUI to reflect channels and image listing. """
if not self.gui_up: return self.logger.debug("channel configuration has changed--updating gui") try: channel = self.fv.get_channel(self.chname) except KeyError: channel = self.fv.get_channel_info() if channel is None: raise ValueError('No channel available') self.chname = channel.name w = self.w.channel_name w.clear() self.chnames = list(self.fv.get_channel_names()) #self.chnames.sort() for chname in self.chnames: w.append_text(chname) # select the channel that is the current one try: i = self.chnames.index(channel.name) except IndexError: i = 0 self.w.channel_name.set_index(i) # update the image listing self.redo()
def compose_functions(*func_list): """ Referenes: https://mathieularose.com/function-composition-in-python/ """
def apply_composition(f, g): def compose(x): return f(g(x)) return compose composed_func = functools.reduce(apply_composition, func_list) return composed_func
def patronymic(self, gender: Gender = None) -> str: """Generate random patronymic name. :param gender: Gender of person. :return: Patronymic name. :Example: Алексеевна. """
gender = self._validate_enum(gender, Gender) patronymics = self._data['patronymic'][gender] return self.random.choice(patronymics)
def last_first_initial(self): """Return a name in the format of: Lastname, F [(Nickname)] """
return ("{}{} ".format(self.last_name, ", " + self.first_name[:1] + "." if self.first_name else "") + ("({}) ".format(self.nickname) if self.nickname else ""))
def match_filtered_identities(self, fa, fb): """Determine if two filtered identities are the same. The method compares the email addresses of each filtered identity to check if they are the same. When the given filtered identities are the same object or share the same UUID, this will also produce a positive match. Identities which their email addresses are in the blacklist will be ignored and the result of the comparison will be false. :param fa: filtered identity to match :param fb: filtered identity to match :returns: True when both filtered identities are likely to be the same. Otherwise, returns False. :raises ValueError: when any of the given filtered identities is not an instance of EmailIdentity class. """
if not isinstance(fa, EmailIdentity): raise ValueError("<fa> is not an instance of UniqueIdentity") if not isinstance(fb, EmailIdentity): raise ValueError("<fb> is not an instance of EmailNameIdentity") if fa.uuid and fb.uuid and fa.uuid == fb.uuid: return True if fa.email in self.blacklist: return False # Compare email addresses first if fa.email and fa.email == fb.email: return True return False
def _replace_service_arg(self, name, index, args): """ Replace index in list with service """
args[index] = self.get_instantiated_service(name)
def download_file(local_filename, url, clobber=False): """Download the given file. Clobber overwrites file if exists."""
dir_name = os.path.dirname(local_filename) mkdirs(dir_name) if clobber or not os.path.exists(local_filename): i = requests.get(url) # if not exists if i.status_code == 404: print('Failed to download file:', local_filename, url) return False # write out in 1MB chunks chunk_size_in_bytes = 1024*1024 # 1MB with open(local_filename, 'wb') as local_file: for chunk in i.iter_content(chunk_size=chunk_size_in_bytes): local_file.write(chunk) return True
def query_parent_objects(self, context, query=None): """Return the objects of the same type from the parent object :param query: Catalog query to narrow down the objects :type query: dict :returns: Content objects of the same portal type in the parent """
# return the object values if we have no catalog query if query is None: return self.get_parent_objects(context) # avoid undefined reference of catalog in except... catalog = None # try to fetch the results via the catalog try: catalogs = api.get_catalogs_for(context) catalog = catalogs[0] return map(api.get_object, catalog(query)) except (IndexError, UnicodeDecodeError, ParseError, APIError) as e: # fall back to the object values of the parent logger.warn("UniqueFieldValidator: Catalog query {} failed " "for catalog {} ({}) -> returning object values of {}" .format(query, repr(catalog), str(e), repr(api.get_parent(context)))) return self.get_parent_objects(context)
def cli(env, volume_id, reason, immediate): """Cancel existing snapshot space for a given volume."""
file_storage_manager = SoftLayer.FileStorageManager(env.client) if not (env.skip_confirmations or formatting.no_going_back(volume_id)): raise exceptions.CLIAbort('Aborted') cancelled = file_storage_manager.cancel_snapshot_space( volume_id, reason, immediate) if cancelled: if immediate: click.echo('File volume with id %s has been marked' ' for immediate snapshot cancellation' % volume_id) else: click.echo('File volume with id %s has been marked' ' for snapshot cancellation' % volume_id) else: click.echo('Unable to cancel snapshot space for file volume %s' % volume_id)
def get_detail(self, course_id): """ Fetches course details. Args: course_id (str): An edx course id. Returns: CourseDetail """
# the request is done in behalf of the current logged in user resp = self._requester.get( urljoin( self._base_url, '/api/courses/v1/courses/{course_key}/'.format(course_key=course_id) ) ) resp.raise_for_status() return CourseDetail(resp.json())
def create(self, no_data=False): """Declare materalized view."""
if self.query: ddl_statement = self.compile_create_as() else: ddl_statement = self.compile_create() if no_data: ddl_statement += '\nWITH NO DATA' return ddl_statement, self.query_values
def _check_ssh(self, *args): """Check if SSH connection can be made to IP with username."""
ssh = subprocess.Popen( args, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) ssh.wait() return ssh.returncode == 0
def _reset_em(self): """Resets self.em and the shared instances."""
self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=False) self.em.start() self._set_shared_instances()
def decode_embedded_strs(src): """ Convert enbedded bytes to strings if possible. This is necessary because Python 3 makes a distinction between these types. This wouldn't be needed if we used "use_bin_type=True" when encoding and "encoding='utf-8'" when decoding. Unfortunately, this would break backwards compatibility due to a change in wire protocol, so this less than ideal solution is used instead. """
if not six.PY3: return src if isinstance(src, dict): return _decode_embedded_dict(src) elif isinstance(src, list): return _decode_embedded_list(src) elif isinstance(src, bytes): try: return src.decode() # pylint: disable=redefined-variable-type except UnicodeError: return src else: return src
def getmin(self): """Get minimiser after optimisation."""
if self.opt['ReturnVar'] == 'X': return self.var_x() elif self.opt['ReturnVar'] == 'Y0': return self.var_y0() elif self.opt['ReturnVar'] == 'Y1': return self.var_y1() else: raise ValueError(self.opt['ReturnVar'] + ' is not a valid value' 'for option ReturnVar')
def _key_info(conn, args): """ Dummy reply (mainly for 'gpg --edit' to succeed). For details, see GnuPG agent KEYINFO command help. https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=agent/command.c;h=c8b34e9882076b1b724346787781f657cac75499;hb=refs/heads/master#l1082 """
fmt = 'S KEYINFO {0} X - - - - - - -' keygrip, = args keyring.sendline(conn, fmt.format(keygrip).encode('ascii'))
def set_return_val(self, state, val, is_fp=None, size=None, stack_base=None): """ Set the return value into the given state """
ty = self.func_ty.returnty if self.func_ty is not None else None try: betterval = self._standardize_value(val, ty, state, None) except AttributeError: raise ValueError("Can't fit value %s into a return value" % repr(val)) if self.ret_val is not None: loc = self.ret_val elif is_fp is not None: loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL elif ty is not None: loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL else: loc = self.FP_RETURN_VAL if self.is_fp_value(val) else self.RETURN_VAL if loc is None: raise NotImplementedError("This SimCC doesn't know how to store this value - should be implemented") loc.set_value(state, betterval, endness='Iend_BE', stack_base=stack_base)
def all(self): """ Synchronize all registered plugins and plugin points to database. """
# Django >= 1.9 changed something with the migration logic causing # plugins to be executed before the corresponding database tables # exist. This method will only return something if the database # tables have already been created. # XXX: I don't fully understand the issue and there should be # another way but this appears to work fine. if django_version >= (1, 9) and ( not db_table_exists(Plugin._meta.db_table) or not db_table_exists(PluginPoint._meta.db_table)): return self.points()
def copy_default_config_to_user_directory( basename, clobber=False, dst_dir='~/.config/scriptabit'): """ Copies the default configuration file into the user config directory. Args: basename (str): The base filename. clobber (bool): If True, the default will be written even if a user config already exists. dst_dir (str): The destination directory. """
dst_dir = os.path.expanduser(dst_dir) dst = os.path.join(dst_dir, basename) src = resource_filename( Requirement.parse("scriptabit"), os.path.join('scriptabit', basename)) if not os.path.exists(dst_dir): os.makedirs(dst_dir) if clobber or not os.path.isfile(dst): shutil.copy(src, dst)
def cns_vwl_str_len_wb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, and 5. the word breaks in the current string. :rtype: IPAString """
return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break))])
def restore(self, image): """ Restore the droplet to the specified backup image A Droplet restoration will rebuild an image using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing a backup image of the droplet :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """
if isinstance(image, Image): image = image.id return self.act(type='restore', image=image)
def one_of(choices, first_is_default=False, as_rules=False): """ A wrapper for :class:`Any`. :param as_rules: `bool`. If `False` (by default), each element of `choices` is wrapped in the :class:`Equals` validator so they are interpreted as literals. .. deprecated:: 0.13 Use :class:`Any` instead. """
assert choices if as_rules: None # for coverage else: choices = [Equals(x) for x in choices] return Any(choices, first_is_default=first_is_default)
def get_core(self): """ Get an unsatisfiable core if the formula was previously unsatisfied. """
if self.maplesat and self.status == False: return pysolvers.maplecm_core(self.maplesat)
def addProxyObject(self, obj, proxied): """ Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6 """
self.proxied_objects[id(obj)] = proxied self.proxied_objects[id(proxied)] = obj
def popitem(self, last=True): """ Remove and return a ``(key, value)`` pair from the dictionary. If last=True (default) then remove the *greatest* `key` from the diciontary. Else, remove the *least* key from the dictionary. If the dictionary is empty, calling `popitem` raises a KeyError`. """
if not len(self): raise KeyError('popitem(): dictionary is empty') key = self._list_pop(-1 if last else 0) value = self._pop(key) return (key, value)
def markInputline( self, markerString = ">!<" ): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """
line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join((line_str[:line_column], markerString, line_str[line_column:])) return line_str.strip()
def check_version_is_equal_or_higher(cls, version, cmp_major, cmp_minor): """ Checks if the version is equal or higher than a specified value. :param int version: Version number to be checked. :param int cmp_major: Major version to be compared with. :param int cmp_minor: Minor version to be compared with. :return: True if equal or higher, otherwise False. :rtype: bool """
return (cls.convert_to_major_ver(version) > cmp_major) or \ (cls.convert_to_major_ver(version) == cmp_major and cls.convert_to_minor_ver(version) >= cmp_minor)
def script_repr(self,imports=[],prefix=" "): """ Same as Parameterized.script_repr, except that X.classname(Y is replaced with X.classname.instance(Y """
return self.pprint(imports,prefix,unknown_value='',qualify=True, separator="\n")
def process_data(self, stream, metadata): """ Extract the tabulated data from the input file. Parameters ---------- stream : Streamlike object A Streamlike object (nominally StringIO) containing the table to be extracted metadata : dict Metadata read in from the header and the namelist Returns ------- (pandas.DataFrame, dict) The first element contains the data, processed to the standard MAGICCData format. The second element is th updated metadata based on the processing performed. """
ch, metadata = self._get_column_headers_and_update_metadata(stream, metadata) df = self._convert_data_block_and_headers_to_df(stream) return df, metadata, ch
def process_messages_loop(self): """ Processes incoming WorkRequest messages one at a time via functions specified by add_command. """
self.receiving_messages = True try: self.process_messages_loop_internal() except pika.exceptions.ConnectionClosed as ex: logging.error("Connection closed {}.".format(ex)) raise
def get_item_es(self, **kwargs): """ Get ES collection item taking into account generated queryset of parent view. This method allows working with nested resources properly. Thus an item returned by this method will belong to its parent view's queryset, thus filtering out objects that don't belong to the parent object. Returns an object retrieved from the applicable ACL. If an ACL wasn't applied, it is applied explicitly. """
item_id = self._get_context_key(**kwargs) objects_ids = self._parent_queryset_es() if objects_ids is not None: objects_ids = self.get_es_object_ids(objects_ids) if six.callable(self.context): self.reload_context(es_based=True, **kwargs) if (objects_ids is not None) and (item_id not in objects_ids): raise JHTTPNotFound('{}(id={}) resource not found'.format( self.Model.__name__, item_id)) return self.context
def register(self, password): """Registers the current user with the given password."""
if len(password) < 8: raise ValueError("Password must be at least 8 characters.") params = {"name": self.nick, "password": password} resp = self.conn.make_api_call("register", params) if "error" in resp: raise RuntimeError(f"{resp['error'].get('message') or resp['error']}") self.conn.make_call("useSession", resp["session"]) self.conn.cookies.update({"session": resp["session"]}) self.logged_in = True
def _find_package(c): """ Try to find 'the' One True Package for this project. Mostly for obtaining the ``_version`` file within it. Uses the ``packaging.package`` config setting if defined. If not defined, fallback is to look for a single top-level Python package (directory containing ``__init__.py``). (This search ignores a small blacklist of directories like ``tests/``, ``vendor/`` etc.) """
# TODO: is there a way to get this from the same place setup.py does w/o # setup.py barfing (since setup() runs at import time and assumes CLI use)? configured_value = c.get("packaging", {}).get("package", None) if configured_value: return configured_value # TODO: tests covering this stuff here (most logic tests simply supply # config above) packages = [ path for path in os.listdir(".") if ( os.path.isdir(path) and os.path.exists(os.path.join(path, "__init__.py")) and path not in ("tests", "integration", "sites", "vendor") ) ] if not packages: sys.exit("Unable to find a local Python package!") if len(packages) > 1: sys.exit("Found multiple Python packages: {0!r}".format(packages)) return packages[0]
def _comp_task(inbox, args, kwargs): """ (internal) Composes a sequence of functions in the global variable TASK. The resulting composition is given the input "inbox" and arguments "args", "kwargs". """
# Note. this function uses a global variable which must be defined on the # remote host. for func, args, kwargs in itertools.izip(TASK, args, kwargs): inbox = (func(inbox, *args, **kwargs),) return inbox[0]
def segment_snrs(filters, stilde, psd, low_frequency_cutoff): """ This functions calculates the snr of each bank veto template against the segment Parameters ---------- filters: list of FrequencySeries The list of bank veto templates filters. stilde: FrequencySeries The current segment of data. psd: FrequencySeries low_frequency_cutoff: float Returns ------- snr (list): List of snr time series. norm (list): List of normalizations factors for the snr time series. """
snrs = [] norms = [] for bank_template in filters: # For every template compute the snr against the stilde segment snr, _, norm = matched_filter_core( bank_template, stilde, h_norm=bank_template.sigmasq(psd), psd=None, low_frequency_cutoff=low_frequency_cutoff) # SNR time series stored here snrs.append(snr) # Template normalization factor stored here norms.append(norm) return snrs, norms
def _on_cluster_data_moved(self, response, command, future): """Process the ``MOVED`` response from a Redis cluster node. :param bytes response: The response from the Redis server :param command: The command that was being executed :type command: tredis.client.Command :param future: The execution future :type future: tornado.concurrent.Future """
LOGGER.debug('on_cluster_data_moved(%r, %r, %r)', response, command, future) parts = response.split(' ') name = '{}:{}'.format(*common.split_connection_host_port(parts[2])) LOGGER.debug('Moved to %r', name) if name not in self._cluster: raise exceptions.ConnectionError( '{} is not connected'.format(name)) self._cluster[name].execute( command._replace(connection=self._cluster[name]), future)
def append(self, station): """ Append station to database. Returns the index of the appended station. """
rec = station._pack(self) with self: _libtcd.add_tide_record(rec, self._header) return self._header.number_of_records - 1
def connect(self, callback, ref=False, position='first', before=None, after=None): """ Connect the callback to the event group. The callback will receive events from *all* of the emitters in the group. See :func:`EventEmitter.connect() <vispy.event.EventEmitter.connect>` for arguments. """
self._connect_emitters(True) return EventEmitter.connect(self, callback, ref, position, before, after)
def _update_config(self,directory,filename): """Manages FLICKR config files"""
basefilename=os.path.splitext(filename)[0] ext=os.path.splitext(filename)[1].lower() if filename==LOCATION_FILE: print("%s - Updating geotag information"%(LOCATION_FILE)) return self._update_config_location(directory) elif filename==TAG_FILE: print("%s - Updating tags"%(TAG_FILE)) return self._update_config_tags(directory) elif filename==SET_FILE: print("%s - Updating sets"%(SET_FILE)) return self._update_config_sets(directory) elif filename==MEGAPIXEL_FILE: print("%s - Updating photo size"%(MEGAPIXEL_FILE)) return self._upload_media(directory,resize_request=True) elif ext in self.FLICKR_META_EXTENSIONS: return self._update_meta(directory,basefilename) return False
def dispatch(self, event): """Given an event, send it to all the subscribers. Args event (:class:`~bigchaindb.events.EventTypes`): the event to dispatch to all the subscribers. """
for event_types, queues in self.queues.items(): if event.type & event_types: for queue in queues: queue.put(event)
def regular_file() -> Callable: """ Returns a method that can be used in argument parsing to check the argument is a regular file or a symbolic link, but not, e.g., a process substitution. :return: A method that can be used as a type in argparse. """
def check_regular_file(value_to_check): value_to_check = str(value_to_check) if not os.path.isfile(value_to_check): raise argparse.ArgumentTypeError("must exist and be a regular file.") return value_to_check return check_regular_file
def submitQuest(self): """ Submits the active quest, returns result Returns bool - True if successful, otherwise False """
form = pg.form(action="kitchen2.phtml") pg = form.submit() if "Woohoo" in pg.content: try: self.prize = pg.find(text = "The Chef waves his hands, and you may collect your prize...").parent.parent.find_all("b")[-1].text except Exception: logging.getLogger("neolib.quest").exception("Failed to parse kitchen quest prize", {'pg': pg}) raise parseException return True else: logging.getLogger("neolib.quest").info("Failed to complete kitchen quest", {'pg': pg}) return False
def convert(value, from_unit, to_unit): """ Converts a value from `from_unit` units to `to_unit` units :param value: value to convert :type value: int or str or decimal.Decimal :param from_unit: unit to convert from :type from_unit: str :param to_unit: unit to convert to :type to_unit: str >>> convert(value='1.5', from_unit='xrb', to_unit='krai') Decimal('0.0015') """
if isinstance(value, float): raise ValueError( "float values can lead to unexpected precision loss, please use a" " Decimal or string eg." " convert('%s', %r, %r)" % (value, from_unit, to_unit) ) if from_unit not in UNITS_TO_RAW: raise ValueError('unknown unit: %r' % from_unit) if to_unit not in UNITS_TO_RAW: raise ValueError('unknown unit: %r' % to_unit) try: value = Decimal(value) except Exception: raise ValueError('not a number: %r' % value) from_value_in_base = UNITS_TO_RAW[from_unit] to_value_in_base = UNITS_TO_RAW[to_unit] result = value * (from_value_in_base / to_value_in_base) return result.normalize()
def ocsp_responder_certificate_path(): """Get ocsp responder certificate path Test: TEST_of_SK_OCSP_RESPONDER_2011.pem Live: sk-ocsp-responder-certificates.pem Note: These files are distributed under esteid/certs :return: """
certificate_path = getattr(settings, 'ESTEID_OCSP_RESPONDER_CERTIFICATE_PATH', 'TEST_of_SK_OCSP_RESPONDER_2011.pem') if certificate_path in ['TEST_of_SK_OCSP_RESPONDER_2011.pem', 'sk-ocsp-responder-certificates.pem']: return os.path.join(os.path.dirname(__file__), 'certs', certificate_path) return certificate_path
def _iname2qname(self, iname: InstanceName) -> QualName: """Translate instance name to qualified name in the receiver's context. """
p, s, loc = iname.partition(":") return (loc, p) if s else (p, self.ns)
def archive(cwd, output, rev='tip', fmt=None, prefix=None, user=None): """ Export a tarball from the repository cwd The path to the Mercurial repository output The path to the archive tarball rev: tip The revision to create an archive from fmt: None Format of the resulting archive. Mercurial supports: tar, tbz2, tgz, zip, uzip, and files formats. prefix : None Prepend <prefix>/ to every filename in the archive user : None Run hg as a user other than what the minion runs as If ``prefix`` is not specified it defaults to the basename of the repo directory. CLI Example: .. code-block:: bash salt '*' hg.archive /path/to/repo output=/tmp/archive.tgz fmt=tgz """
cmd = [ 'hg', 'archive', '{0}'.format(output), '--rev', '{0}'.format(rev), ] if fmt: cmd.append('--type') cmd.append('{0}'.format(fmt)) if prefix: cmd.append('--prefix') cmd.append('"{0}"'.format(prefix)) return __salt__['cmd.run'](cmd, cwd=cwd, runas=user, python_shell=False)
def _write_method(schema): """Add a write method for named schema to a class. """
def method( self, filename=None, schema=schema, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None, **kwargs): # Use generic write class to write data. return _write( self._data, filename=filename, schema=schema, id_col=id_col, sequence_col=sequence_col, extra_data=extra_data, alphabet=alphabet, **kwargs ) # Update docs method.__doc__ = _write_doc_template(schema) return method
def get_symbol_list(rank, dim=6): """ Returns a symbolic representation of the voigt-notation tensor that places identical symbols for entries related by index transposition, i. e. C_1121 = C_1211 etc. Args: dim (int): dimension of matrix/tensor, e. g. 6 for voigt notation and 3 for standard rank (int): rank of tensor, e. g. 3 for third-order ECs Returns: c_vec (array): array representing distinct indices c_arr (array): array representing tensor with equivalent indices assigned as above """
indices = list( itertools.combinations_with_replacement(range(dim), r=rank)) c_vec = np.zeros(len(indices), dtype=object) c_arr = np.zeros([dim]*rank, dtype=object) for n, idx in enumerate(indices): c_vec[n] = sp.Symbol('c_'+''.join([str(i) for i in idx])) for perm in itertools.permutations(idx): c_arr[perm] = c_vec[n] return c_vec, c_arr
def serialize(self, value, primitive=False): """Serialize this field."""
return [self.field.serialize(v, primitive=primitive) for v in value]
def update(self, environments): """ Method to update environments :param environments: List containing environments desired to updated :return: None """
data = {'environments': environments} environments_ids = [str(env.get('id')) for env in environments] return super(ApiEnvironment, self).put('api/v3/environment/%s/' % ';'.join(environments_ids), data)
def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """
result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result]
def alwaysCalledWithMatch(cls, spy, *args, **kwargs): #pylint: disable=invalid-name """ Checking the inspector is always called with partial SinonMatcher(args/kwargs) Args: SinonSpy, args/kwargs """
cls.__is_spy(spy) if not (spy.alwaysCalledWithMatch(*args, **kwargs)): raise cls.failException(cls.message)
def known_pipettes() -> Sequence[str]: """ List pipette IDs for which we have known overrides """
return [fi.stem for fi in CONFIG['pipette_config_overrides_dir'].iterdir() if fi.is_file() and '.json' in fi.suffixes]
def get_default_config(self): """ Returns the default collector settings """
config = super(CephCollector, self).get_default_config() config.update({ 'socket_path': '/var/run/ceph', 'socket_prefix': 'ceph-', 'socket_ext': 'asok', 'ceph_binary': '/usr/bin/ceph', }) return config
def winapi_result( result ): """Validate WINAPI BOOL result, raise exception if failed"""
if not result: raise WinApiException("%d (%x): %s" % (ctypes.GetLastError(), ctypes.GetLastError(), ctypes.FormatError())) return result
def set_server(self, pos, key, value): """Set the key to the value for the pos (position in the list)."""
self._web_list[pos][key] = value
def state_size(self): """Tuple of `tf.TensorShape`s indicating the size of state tensors."""
if self._max_unique_stats == 1: return (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size])) else: return (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size]), tf.TensorShape(1))
def _has_perm(self, user, permission): """ Check whether the user has the given permission @return True if user is granted with access, False if not. """
if user.is_superuser: return True if user.is_active: perms = [perm.split('.')[1] for perm in user.get_all_permissions()] return permission in perms return False
def start_external_service(self, service_name, conf=None): """ Start external service service_name with configuration conf. :param service_name: Name of service to start :param conf: :return: nothing """
if service_name in self._external_services: ser = self._external_services[service_name] service = ser(service_name, conf=conf, bench=self.bench) try: service.start() except PluginException: self.logger.exception("Starting service %s caused an exception!", service_name) raise PluginException("Failed to start external service {}".format(service_name)) self._started_services.append(service) setattr(self.bench, service_name, service) else: self.logger.warning("Service %s not found. Check your plugins.", service_name)
def get_pool(parallel, kwargs): """ Yields: a ThreadPoolExecutor if parallel is True and `concurrent.futures` exists. `None` otherwise. """
if parallel: try: from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(thread_name_prefix="insights-collector-pool", **kwargs) as pool: yield pool except ImportError: yield None else: yield None
def poweroff(self): """ shutdown the machine """
command = const.CMD_POWEROFF command_string = b'' response_size = 1032 cmd_response = self.__send_command(command, command_string, response_size) if cmd_response.get('status'): self.is_connect = False self.next_uid = 1 return True else: raise ZKErrorResponse("can't poweroff")
def _add_comments(self, comments, original_string=""): """ Returns a string with comments added """
return comments and "{0} # {1}".format(self._strip_comments(original_string)[0], "; ".join(comments)) or original_string
def commands(config, names): """Return the list of commands to run."""
commands = {cmd: Command(**dict((minus_to_underscore(k), v) for k, v in config.items(cmd))) for cmd in config.sections() if cmd != 'packages'} try: return tuple(commands[x] for x in names) except KeyError as e: raise RuntimeError( 'Section [commands] in the config file does not contain the ' 'key {.args[0]!r} you requested to execute.'.format(e))
def get_app_modules(self, apps): """return array of imported leonardo modules for apps """
modules = getattr(self, "_modules", []) if not modules: from django.utils.module_loading import module_has_submodule # Try importing a modules from the module package package_string = '.'.join(['leonardo', 'module']) for app in apps: exc = '...' try: # check if is not full app _app = import_module(app) except Exception as e: _app = False exc = e if module_has_submodule( import_module(package_string), app) or _app: if _app: mod = _app else: mod = import_module('.{0}'.format(app), package_string) if mod: modules.append(mod) continue warnings.warn('%s was skipped because %s ' % (app, exc)) self._modules = modules return self._modules
def csvtolist(inputstr): """ converts a csv string into a list """
reader = csv.reader([inputstr], skipinitialspace=True) output = [] for r in reader: output += r return output
def read(self): try: bytes = self.sock.recv(self.max_size) except: self.torrent.kill_peer(self) return """ Chain of events: - process_input - check save_state and read length, id, and message accordingly - if we have a piece (really a block), we piece.save it out inside call to ppiece - If we've completed a piece we: - Tell the switchboard to write it out - init a new piece """
if len(bytes) == 0: print 'Got 0 bytes from fileno {}.'.format(self.fileno()) self.torrent.kill_peer(self) self.process_input(bytes)
def remove_field(self, field): """ Removes a field from this table :param field: This can be a string of a field name, a dict of {'alias': field}, or a ``Field`` instance :type field: str or dict or :class:`Field <querybuilder.fields.Field>` """
new_field = FieldFactory( field, ) new_field.set_table(self) new_field_identifier = new_field.get_identifier() for field in self.fields: if field.get_identifier() == new_field_identifier: self.fields.remove(field) return field return None
def iter(self, count=0, func=sum): """Iterator of infinite dice rolls. :param count: [0] Return list of ``count`` sums :param func: [sum] Apply func to list of individual die rolls func([]) """
while True: yield self.roll(count, func)
def candidates(text): """ Given a `text` string, get candidates and context for feature extraction and classification """
for Pmatch in finditer(TARGET, text): # the punctuation mark itself P = Pmatch.group(1) # is it a boundary? B = bool(match(NEWLINE, Pmatch.group(5))) # L & R start = Pmatch.start() end = Pmatch.end() Lmatch = search(LTOKEN, text[max(0, start - BUFSIZE):start]) if not Lmatch: # this happens when a line begins with '.' continue L = word_tokenize(" " + Lmatch.group(1))[-1] Rmatch = search(RTOKEN, text[end:end + BUFSIZE]) if not Rmatch: # this happens at the end of the file, usually continue R = word_tokenize(Rmatch.group(1) + " ")[0] # complete observation yield Observation(L, P, R, B, end)
def _pars_total_indexes(names, dims, fnames, pars): """Obtain all the indexes for parameters `pars` in the sequence of names. `names` references variables that are in column-major order Parameters ---------- names : sequence of str All the parameter names. dim : sequence of list of int Dimensions, in same order as `names`. fnames : sequence of str All the scalar parameter names pars : sequence of str The parameters of interest. It is assumed all elements in `pars` are in `names`. Returns ------- indexes : OrderedDict of list of int Dictionary uses parameter names as keys. Indexes are column-major order. For each parameter there is also a key `par`+'_rowmajor' that stores the row-major indexing. Note ---- Inside each parameter (vector or array), the sequence uses column-major ordering. For example, if we have parameters alpha and beta, having dimensions [2, 2] and [2, 3] respectively, the whole parameter sequence is alpha[0,0], alpha[1,0], alpha[0, 1], alpha[1, 1], beta[0, 0], beta[1, 0], beta[0, 1], beta[1, 1], beta[0, 2], beta[1, 2]. In short, like R matrix(..., bycol=TRUE). Example ------- >>> pars_oi = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> dims_oi = [[], [], [8], [8], []] >>> fnames_oi = ['mu', 'tau', 'eta[1]', 'eta[2]', 'eta[3]', 'eta[4]', ... 'eta[5]', 'eta[6]', 'eta[7]', 'eta[8]', 'theta[1]', 'theta[2]', ... 'theta[3]', 'theta[4]', 'theta[5]', 'theta[6]', 'theta[7]', ... 'theta[8]', 'lp__'] >>> pars = ['mu', 'tau', 'eta', 'theta', 'lp__'] >>> _pars_total_indexes(pars_oi, dims_oi, fnames_oi, pars) ... # doctest: +ELLIPSIS OrderedDict([('mu', (0,)), ('tau', (1,)), ('eta', (2, 3, ... """
starts = _calc_starts(dims) def par_total_indexes(par): # if `par` is a scalar, it will match one of `fnames` if par in fnames: p = fnames.index(par) idx = tuple([p]) return OrderedDict([(par, idx), (par+'_rowmajor', idx)]) else: p = names.index(par) idx = starts[p] + np.arange(np.prod(dims[p])) idx_rowmajor = starts[p] + _idx_col2rowm(dims[p]) return OrderedDict([(par, tuple(idx)), (par+'_rowmajor', tuple(idx_rowmajor))]) indexes = OrderedDict() for par in pars: indexes.update(par_total_indexes(par)) return indexes
def sha1(self): """Return a sha1 hash of the model items. :rtype: str """
sha1 = hashlib.sha1(''.join(['%s:%s' % (k,v) for k,v in self.items()])) return str(sha1.hexdigest())
def mouseMoveEvent(self, event): """Override Qt method. Show code analisis, if left button pressed select lines. """
line_number = self.editor.get_linenumber_from_mouse_event(event) block = self.editor.document().findBlockByNumber(line_number-1) data = block.userData() # this disables pyflakes messages if there is an active drag/selection # operation check = self._released == -1 if data and data.code_analysis and check: self.editor.show_code_analysis_results(line_number, data) else: self.editor.hide_tooltip() if event.buttons() == Qt.LeftButton: self._released = line_number self.editor.select_lines(self._pressed, self._released)
def to_utf8(obj): """Walks a simple data structure, converting unicode to byte string. Supports lists, tuples, and dictionaries. """
if isinstance(obj, unicode_type): return _utf8(obj) elif isinstance(obj, dict): return dict((to_utf8(k), to_utf8(v)) for (k, v) in obj.items()) elif isinstance(obj, list): return list(to_utf8(i) for i in obj) elif isinstance(obj, tuple): return tuple(to_utf8(i) for i in obj) return obj
def lpc(blk, order=None): """ Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object, the analysis whitening filter. This implementation uses the autocorrelation method, using the Levinson-Durbin algorithm or Numpy pseudo-inverse for linear system solving, when needed. Parameters ---------- blk : An iterable with well-defined length. Don't use this function with Stream objects! order : The order of the resulting ZFilter object. Defaults to ``len(blk) - 1``. Returns ------- A FIR filter, as a ZFilter object. The mean squared error over the given block is in its "error" attribute. Hint ---- See ``lpc.kautocor`` example, which should apply equally for this strategy. See Also -------- levinson_durbin : Levinson-Durbin algorithm for solving Yule-Walker equations (Toeplitz matrix linear system). lpc.nautocor: LPC coefficients from linear system solved with Numpy pseudo-inverse. lpc.kautocor: LPC coefficients obtained with Levinson-Durbin algorithm. """
if order < 100: return lpc.nautocor(blk, order) try: return lpc.kautocor(blk, order) except ParCorError: return lpc.nautocor(blk, order)
def to_dict(self): """ Returns a dictionary that represents this object, to be used for JSONification. :return: the object dictionary :rtype: dict """
result = super(OptionHandler, self).to_dict() result["type"] = "OptionHandler" result["options"] = join_options(self.options) return result
def status_delete(self, id): """ Delete a status """
id = self.__unpack_id(id) url = '/api/v1/statuses/{0}'.format(str(id)) self.__api_request('DELETE', url)
def _new_token(self, chars=None, line_no=None): """ Appends new token to token stream. `chars` List of token characters. Defaults to current token list. `line_no` Line number for token. Defaults to current line number. """
if not line_no: line_no = self._line_no if not chars: chars = self._token_chars if chars: # add new token self._tokens.append((line_no, ''.join(chars))) self._token_chars = []
def getLocalIPaddress(): """visible to other machines on LAN"""
try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('google.com', 0)) my_local_ip = s.getsockname()[0] # takes ~0.005s #from netifaces import interfaces, ifaddresses, AF_INET #full solution in the event of multiple NICs (network interface cards) on the PC #def ip4_addresses(): # ip_list = [] # for interface in interfaces(): # for link in ifaddresses(interface)[AF_INET]: # If IPv6 addresses are needed instead, use AF_INET6 instead of AF_INET # ip_list.append(link['addr']) # return ip_list except Exception: my_local_ip = None return my_local_ip
def in_order(self) -> Iterator["BSP"]: """Iterate over this BSP's hierarchy in order. .. versionadded:: 8.3 """
if self.children: yield from self.children[0].in_order() yield self yield from self.children[1].in_order() else: yield self
def _one_to_many_query(cls, query_obj, search4, model_attrib): """extends and returns a SQLAlchemy query object to allow one-to-many queries :param query_obj: SQL Alchemy query object :param str search4: search string :param model_attrib: attribute in model """
model = model_attrib.parent.class_ already_joined_tables = [mapper.class_ for mapper in query_obj._join_entities] if isinstance(search4, (str, int, Iterable)) and model not in already_joined_tables: query_obj = query_obj.join(model) if isinstance(search4, str): query_obj = query_obj.filter(model_attrib.like(search4)) elif isinstance(search4, int): query_obj = query_obj.filter(model_attrib == search4) elif isinstance(search4, Iterable): query_obj = query_obj.filter(model_attrib.in_(search4)) return query_obj
def _findUniqueMappingValues(mapping): """Find mapping entries that are unique for one key (value length of 1). .. Note: This function can be used to find unique proteins by providing a peptide to protein mapping. :param mapping: dict, for each key contains a set of entries :returns: a set of unique mapping values """
uniqueMappingValues = set() for entries in viewvalues(mapping): if len(entries) == 1: uniqueMappingValues.update(entries) return uniqueMappingValues
def add_comment(self, post=None, name=None, email=None, pub_date=None, website=None, body=None): """ Adds a comment to the post provided. """
if post is None: if not self.posts: raise CommandError("Cannot add comments without posts") post = self.posts[-1] post["comments"].append({ "user_name": name, "user_email": email, "submit_date": pub_date, "user_url": website, "comment": body, })
def perform(self, node, inputs, output_storage): """Evaluate this node's computation. Parameters ---------- node : `theano.gof.graph.Apply` The node of this Op in the computation graph. inputs : 1-element list of arrays Contains an array (usually `numpy.ndarray`) of concrete values supplied for the symbolic input variable ``x``. output_storage : 1-element list of 1-element lists The single 1-element list contained in ``output_storage`` by default contains only ``None``. This value must be replaced by the result of the application of `odl_op`. Examples -------- Perform a matrix multiplication: >>> space = odl.rn(3) >>> matrix = np.array([[1, 0, 1], ... [0, 1, 1]], dtype=float) >>> op = odl.MatrixOperator(matrix, domain=space) >>> matrix_op = TheanoOperator(op) >>> x = theano.tensor.dvector() >>> op_x = matrix_op(x) >>> op_func = theano.function([x], op_x) >>> op_func([1, 2, 3]) array([ 4., 5.]) Evaluate a functional, i.e., an operator with scalar output: >>> space = odl.rn(3) >>> functional = odl.solvers.L2NormSquared(space) >>> func_op = TheanoOperator(functional) >>> x = theano.tensor.dvector() >>> op_x = func_op(x) >>> op_func = theano.function([x], op_x) >>> op_func([1, 2, 3]) array(14.0) """
x = inputs[0] z = output_storage[0] z[0] = np.asarray(self.operator(x))
def forum_topic_update(self, topic_id, title=None, category=None): """Update a specific topic (Login Requires) (UNTESTED). Parameters: topic_id (int): Where topic_id is the topic id. title (str): Topic title. category (str): Can be: 0, 1, 2 (General, Tags, Bugs & Features respectively). """
params = { 'forum_topic[title]': title, 'forum_topic[category_id]': category } return self._get('forum_topics/{0}.json'.format(topic_id), params, method='PUT', auth=True)
def get_directory(self, identifier): """Implements the policy for naming directories for image objects. Image object directories are name by their identifier. In addition, these directories are grouped in parent directories named by the first two characters of the identifier. The aim is to avoid having too many sub-folders in a single directory. Parameters ---------- identifier : string Unique object identifier Returns ------- string Path to image objects data directory """
return os.path.join( os.path.join(self.directory, identifier[:2]), identifier )
def show_conf(conf_file=default_conf, name=None): """ Show configuration conf_file : string path to logadm.conf, defaults to /etc/logadm.conf name : string optional show only a single entry CLI Example: .. code-block:: bash salt '*' logadm.show_conf salt '*' logadm.show_conf name=/var/log/syslog """
cfg = _parse_conf(conf_file) # filter if name and name in cfg: return {name: cfg[name]} elif name: return {name: 'not found in {}'.format(conf_file)} else: return cfg
def delete_pipeline_stage(self, pipeline_key, stage_key, sort_by = None): """Deletes a stage in the pipeline by stage key and pipeline key Args: pipeline_key key for pipeline stage_key key for stage sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp' returns (status code for the GET request, dict of op report) """
if not (pipeline_key and stage_key): return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.stages_suffix, stage_key ]) code, data = self._req('delete', uri) return code, data
def asynchronous(self, fun, low, user='UNKNOWN', pub=None): """ Execute the function in a multiprocess and return the event tag to use to watch for the return """
async_pub = pub if pub is not None else self._gen_async_pub() proc = salt.utils.process.SignalHandlingMultiprocessingProcess( target=self._proc_function, args=(fun, low, user, async_pub['tag'], async_pub['jid'])) with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers proc.start() proc.join() # MUST join, otherwise we leave zombies all over return async_pub
def download(): """ Download all files from an FTP share """
ftp = ftplib.FTP(SITE) ftp.set_debuglevel(DEBUG) ftp.login(USER, PASSWD) ftp.cwd(DIR) filelist = ftp.nlst() filecounter = MANAGER.counter(total=len(filelist), desc='Downloading', unit='files') for filename in filelist: with Writer(filename, ftp.size(filename), DEST) as writer: ftp.retrbinary('RETR %s' % filename, writer.write) print(filename) filecounter.update() ftp.close()
def share(self, data, mime=None, time=None): """Share some data from this Feed Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `data` (mandatory) (as applicable) The data you want to share `mime` (optional) (string) The mime type of the data you're sharing. There are some Iotic Labs-defined default values: `"idx/1"` - Corresponds to "application/ubjson" - the recommended way to send mixed data. Share a python dictionary as the data and the agent will to the encoding and decoding for you. #!python data = {} data["temperature"] = self._convert_to_celsius(ADC.read(1)) # ...etc... my_feed.share(data) `"idx/2"` Corresponds to "text/plain" - the recommended way to send textual data. Share a utf8 string as data and the agent will pass it on, unchanged. #!python my_feed.share(u"string data") `"text/xml"` or any other valid mime type. To show the recipients that you're sending something more than just bytes #!python my_feed.share("<xml>...</xml>".encode('utf8'), mime="text/xml") `time` (optional) (datetime) UTC time for this share. If not specified, the container's time will be used. Thus it makes almost no sense to specify `datetime.utcnow()` here. This parameter can be used to indicate that the share time does not correspond to the time to which the data applies, e.g. to populate recent storgage with historical data. """
evt = self.share_async(data, mime=mime, time=time) self._client._wait_and_except_if_failed(evt)
def save_all(self): """Save all opened files. Iterate through self.data and call save() on any modified files. """
for index in range(self.get_stack_count()): if self.data[index].editor.document().isModified(): self.save(index)