prompt
stringlengths
23
36.3k
response
stringlengths
0
1.55k
def query_disc(nside, vec, radius, inclusive=False, fact=4, nest=False): """ Wrapper around healpy.query_disc to deal with old healpy implementation. nside : int The nside of the Healpix map. vec : float, sequence of 3 elements The coordinates of unit vector defining the disk center. radius : float The radius (in degrees) of the disc inclusive : bool, optional If False, return the exact set of pixels whose pixel centers lie within the disk; if True, return all pixels that overlap with the disk, and maybe a few more. Default: False fact : int, optional Only used when inclusive=True. The overlapping test will be done at the resolution fact*nside. For NESTED ordering, fact must be a power of 2, else it can be any positive integer. Default: 4. nest: bool, optional if True, assume NESTED pixel ordering, otherwise, RING pixel ordering """
try: # New-style call (healpy 1.6.3) return hp.query_disc(nside, vec, np.radians(radius), inclusive, fact, nest) except Exception as e: print(e) # Old-style call (healpy 0.10.2) return hp.query_disc(nside, vec, np.radians(radius), nest, deg=False)
def get_project_urls(project): """Get the URLs for all runs from a given project. TODO: docstring"""
urls = [] with ftputil.FTPHost(sra_host, sra_user, sra_password) as ftp_host: download_paths = [] exp_dir = '/sra/sra-instant/reads/ByStudy/sra/SRP/%s/%s/' \ %(project[:6], project) ftp_host.chdir(exp_dir) run_folders = ftp_host.listdir(ftp_host.curdir) # compile a list of all files for folder in run_folders: files = ftp_host.listdir(folder) assert len(files) == 1 for f in files: path = exp_dir + folder + '/' + f urls.append(path) return urls
def list_queries(self, **kwargs): """List queries in device query service. :param int limit: The number of devices to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get devices after/starting at given `device_id` :param filters: Dictionary of filters to apply. :returns: a list of :py:class:`Query` objects. :rtype: PaginatedResponse """
kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, Query, True) api = self._get_api(device_directory.DefaultApi) return PaginatedResponse(api.device_query_list, lwrap_type=Query, **kwargs)
def get_calculated_aes(aesthetics): """ Return a list of the aesthetics that are calculated """
calculated_aesthetics = [] for name, value in aesthetics.items(): if is_calculated_aes(value): calculated_aesthetics.append(name) return calculated_aesthetics
def project(self, geometries, inSR, outSR, transformation="", transformFoward=False): """ The project operation is performed on a geometry service resource. This operation projects an array of input geometries from the input spatial reference to the output spatial reference. Inputs: geometries - array of geometries to be projected (structured as JSON geometry objects returned by the ArcGIS REST API). inSR - spatial reference of the input geometries WKID. outSR - spatial reference or WKID for the returned geometries. transformation - The WKID or a JSON object specifying the geographic transformation (also known as datum transformation) to be applied to the projected geometries. transformForward - indicating whether or not to transform forward. """
url = self._url + "/project" params = { "f" : "json", "inSR" : inSR, "geometries": self.__geometryListToGeomTemplate(geometries=geometries), "outSR" : outSR, "transformation" : transformation, "transformFoward": transformFoward } return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
def create_record_task(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None) -> RecordTask: """Create a record task for this hardware source. .. versionadded:: 1.0 :param frame_parameters: The frame parameters for the record. Pass None for defaults. :type frame_parameters: :py:class:`FrameParameters` :param channels_enabled: The enabled channels for the record. Pass None for defaults. :type channels_enabled: List of booleans. :return: The :py:class:`RecordTask` object. :rtype: :py:class:`RecordTask` Callers should call close on the returned task when finished. See :py:class:`RecordTask` for examples of how to use. """
return RecordTask(self.__hardware_source, frame_parameters, channels_enabled)
def _aggregation_op(cls, op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor], x: 'TensorFluent', vars_list: List[str]) -> 'TensorFluent': """Returns a TensorFluent for the aggregation `op` applied to fluent `x`. Args: op: The aggregation operation. x: The input fluent. vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the aggregation operator's output. """
axis = cls._varslist2axis(x, vars_list) t = op(x.tensor, axis) scope = [] for var in x.scope.as_list(): if var not in vars_list: scope.append(var) batch = x.batch return TensorFluent(t, scope, batch=batch)
def smart_content_encoding(self): """Smart content encoding."""
encoding = self.content_encoding if not encoding: base_list = self.basename.split('.') while (not encoding) and len(base_list) > 1: _, encoding = mimetypes.guess_type('.'.join(base_list)) base_list.pop() return encoding
def exception_handler(exctype, value, traceback): """ This exception handler catches KeyboardInterrupt to cancel the Runner and also stops the Runner in case of an error. """
if exctype == KeyboardInterrupt: pypro.console.out('') # Adds a new line after Ctrl+C character pypro.console.err('Canceled') elif exctype == PyproException: pypro.console.err('[Error] ', value.message) exit() else: sys.__excepthook__(exctype, value, traceback)
def get_item_bank_assignment_session(self): """Gets the ``OsidSession`` associated with the item bank assignment service. return: (osid.assessment.ItemBankAssignmentSession) - an ``ItemBankAssignmentSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_item_bank_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_bank_assignment()`` is ``true``.* """
if not self.supports_item_bank_assignment(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ItemBankAssignmentSession(runtime=self._runtime)
def apparent_dip_correction(axes): """ Produces a two-dimensional rotation matrix that rotates a projected dataset to correct for apparent dip """
a1 = axes[0].copy() a1[-1] = 0 cosa = angle(axes[0],a1,cos=True) _ = 1-cosa**2 if _ > 1e-12: sina = N.sqrt(_) if cosa < 0: sina *= -1 # Construct rotation matrix R= N.array([[cosa,sina],[-sina,cosa]]) else: # Small angle, don't bother # (small angles can lead to spurious results) R = N.identity(2) #if axes[0,0] < 0: # return R.T #else: return R
def urlvoid_check(name, api_key): """Checks URLVoid.com for info on a domain"""
if not is_fqdn(name): return None url = 'http://api.urlvoid.com/api1000/{key}/host/{name}'.format(key=api_key, name=name) response = requests.get(url) tree = ET.fromstring(response.text) if tree.find('./detections/engines'): return [e.text for e in tree.find('./detections/engines')] else: return None
def deploy_deb(self, file_name, distribution, component, architecture, parameters={}): """ Convenience method to deploy .deb packages Keyword arguments: file_name -- full path to local file that will be deployed distribution -- debian distribution (e.g. 'wheezy') component -- repository component (e.g. 'main') architecture -- package architecture (e.g. 'i386') parameters -- attach any additional metadata """
params = { 'deb.distribution': distribution, 'deb.component': component, 'deb.architecture': architecture } params.update(parameters) self.deploy_file(file_name, parameters=params)
def members_from_rank_range_in( self, leaderboard_name, starting_rank, ending_rank, **options): """ Retrieve members from the named leaderboard within a given rank range. @param leaderboard_name [String] Name of the leaderboard. @param starting_rank [int] Starting rank (inclusive). @param ending_rank [int] Ending rank (inclusive). @param options [Hash] Options to be used when retrieving the data from the leaderboard. @return members from the leaderboard that fall within the given rank range. """
starting_rank -= 1 if starting_rank < 0: starting_rank = 0 ending_rank -= 1 if ending_rank > self.total_members_in(leaderboard_name): ending_rank = self.total_members_in(leaderboard_name) - 1 raw_leader_data = [] if self.order == self.DESC: raw_leader_data = self.redis_connection.zrevrange( leaderboard_name, starting_rank, ending_rank, withscores=False) else: raw_leader_data = self.redis_connection.zrange( leaderboard_name, starting_rank, ending_rank, withscores=False) return self._parse_raw_members( leaderboard_name, raw_leader_data, **options)
def field(self, name): """ Returns the field on this struct with the given name. Will try to find this name on all ancestors if this struct extends another. If found, returns a dict with keys: 'name', 'comment', 'type', 'is_array' If not found, returns None :Parameters: name string name of field to lookup """
if self.fields.has_key(name): return self.fields[name] elif self.extends: if not self.parent: self.parent = self.contract.struct(self.extends) return self.parent.field(name) else: return None
def list(self, path): """GET /<path>?list=true :param path: :type path: :return: :rtype: """
try: payload = { 'list': True } return self._adapter.get('/v1/{0}'.format(path), params=payload).json() except exceptions.InvalidPath: return None
def from_arrays(event, time, name_event=None, name_time=None): """Create structured array. Parameters ---------- event : array-like Event indicator. A boolean array or array with values 0/1. time : array-like Observed time. name_event : str|None Name of event, optional, default: 'event' name_time : str|None Name of observed time, optional, default: 'time' Returns ------- y : np.array Structured array with two fields. """
name_event = name_event or 'event' name_time = name_time or 'time' if name_time == name_event: raise ValueError('name_time must be different from name_event') time = numpy.asanyarray(time, dtype=numpy.float_) y = numpy.empty(time.shape[0], dtype=[(name_event, numpy.bool_), (name_time, numpy.float_)]) y[name_time] = time event = numpy.asanyarray(event) check_consistent_length(time, event) if numpy.issubdtype(event.dtype, numpy.bool_): y[name_event] = event else: events = numpy.unique(event) events.sort() if len(events) != 2: raise ValueError('event indicator must be binary') if numpy.all(events == numpy.array([0, 1], dtype=events.dtype)): y[name_event] = event.astype(numpy.bool_) else: raise ValueError('non-boolean event indicator must contain 0 and 1 only') return y
def are_equal(self, sp1, sp2): """ True if there is some overlap in composition between the species Args: sp1: First species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. sp2: Second species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. Returns: True always """
set1 = set(sp1.elements) set2 = set(sp2.elements) return set1.issubset(set2) or set2.issubset(set1)
def maybe_from_tuple(tup_or_range): """Convert a tuple into a range but pass ranges through silently. This is useful to ensure that input is a range so that attributes may be accessed with `.start`, `.stop` or so that containment checks are constant time. Parameters ---------- tup_or_range : tuple or range A tuple to pass to from_tuple or a range to return. Returns ------- range : range The input to convert to a range. Raises ------ ValueError Raised when the input is not a tuple or a range. ValueError is also raised if the input is a tuple whose length is not 2 or 3. """
if isinstance(tup_or_range, tuple): return from_tuple(tup_or_range) elif isinstance(tup_or_range, range): return tup_or_range raise ValueError( 'maybe_from_tuple expects a tuple or range, got %r: %r' % ( type(tup_or_range).__name__, tup_or_range, ), )
def _get_caller_globals_and_locals(): """ Returns the globals and locals of the calling frame. Is there an alternative to frame hacking here? """
caller_frame = inspect.stack()[2] myglobals = caller_frame[0].f_globals mylocals = caller_frame[0].f_locals return myglobals, mylocals
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]: """Tease out the content-type and character encoding. A default character encoding of UTF-8 is used, so the content-type must be used to determine if any decoding is necessary to begin with. """
if not content_type: return None, "utf-8" else: type_, parameters = cgi.parse_header(content_type) encoding = parameters.get("charset", "utf-8") return type_, encoding
def reversals(self, transfer_id, data={}, **kwargs): """" Get all Reversal Transfer from given id Args: transfer_id : Id for which reversal transfer object has to be fetched Returns: Transfer Dict """
url = "{}/{}/reversals".format(self.base_url, transfer_id) return self.get_url(url, data, **kwargs)
def start(self): """ Starts the server. """
self._app.run(host=self._host, port=self._port)
def Max(a, axis, keep_dims): """ Max reduction op. """
return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
def get_config_window_bounds(self): """Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor :return: coords X and Y where set the browser window. """
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0) bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0) monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1) if monitor_index > -1: try: monitor = screeninfo.get_monitors()[monitor_index] bounds_x += monitor.x bounds_y += monitor.y except NotImplementedError: self.logger.warn('Current environment doesn\'t support get_monitors') return bounds_x, bounds_y
def upload(self, file_descriptor, settings): """ Загружает файл в облако :param file_descriptor: открытый дескриптор :param settings: настройки загрузки :rtype: requests.Response """
multipart_form_data = { 'file': file_descriptor } params = {"settings": json.dumps(settings)} dr = self.__app.native_api_call('media', 'upload', params, self.__options, True, multipart_form_data, False, http_path="/api/meta/v1/", http_method='POST', connect_timeout_sec=60 * 10) return json.loads(dr.text)
def demix1(servo1, servo2, gain=0.5): """de-mix a mixed servo output"""
s1 = servo1 - 1500 s2 = servo2 - 1500 out1 = (s1+s2)*gain out2 = (s1-s2)*gain return out1+1500
def from_miss(self, **kwargs): """Called to initialize an instance when it is not found in the cache. For example, if your CacheModel should pull data from the database to populate the cache, ... def from_miss(self, username): user = User.objects.get(username=username) self.email = user.email self.full_name = user.get_full_name() """
raise type(self).Missing(type(self)(**kwargs).key())
def sample(self, size=(), rule="R", antithetic=None, verbose=False, **kws): """ Overwrite sample() function, because the constructed Dist that is based on the KDE is only working with the random sampling that is given by the KDE itself. """
size_ = numpy.prod(size, dtype=int) dim = len(self) if dim > 1: if isinstance(size, (tuple,list,numpy.ndarray)): shape = (dim,) + tuple(size) else: shape = (dim, size) else: shape = size out = self.kernel.resample(size_)[0] try: out = out.reshape(shape) except: if len(self) == 1: out = out.flatten() else: out = out.reshape(dim, out.size/dim) return out
def clear_duration(self): """Clears the duration. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """
# Implemented from template for osid.assessment.AssessmentOfferedForm.clear_duration_template if (self.get_duration_metadata().is_read_only() or self.get_duration_metadata().is_required()): raise errors.NoAccess() self._my_map['duration'] = self._duration_default
def reports(self): """returns a list of reports on the server"""
if self._metrics is None: self.__init() self._reports = [] for r in self._metrics: url = self._url + "/%s" % six.moves.urllib.parse.quote_plus(r['reportname']) self._reports.append(UsageReport(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True)) del url return self._reports
def _get_agg_axis(self, axis_num): """ Let's be explicit about this. """
if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def match_value_to_text(self, text): """ this is going to be the tricky bit - probably not possible to get the 'exact' rating for a value. Will need to do sentiment analysis of the text to see how it matches the rating. Even that sounds like it wont work - maybe a ML algorithm would do it, but that requires a large body of text already matched to values - and values aren't even defined as far as I have found. UPDATE - this could work if we assume values can be single words, eg tax=0.3, freedom=0.7, healthcare=0.3, welfare=0.3 etc """
if self.nme in text: res = 0.8 else: res = 0.2 return self.nme + ' = ' + str(res) + ' match against ' + text
def _create_api_call(self, method, _url, kwargs): """ This will create an APICall object and return it :param method: str of the html method ['GET','POST','PUT','DELETE'] :param _url: str of the sub url of the api call (ex. g/device/list) :param kwargs: dict of additional arguments :return: ApiCall """
api_call = self.ApiCall(name='%s.%s' % (_url, method), label='ID_%s' % self._count, base_uri=self.base_uri, timeout=self.timeout, headers=self.headers, cookies=self.cookies, proxies=self.proxies, accepted_return=self.accepted_return or 'json') if self.max_history: self._count += 1 # count of _calls if len(self) > self.max_history: self._calls.pop(0) self._calls['ID_%s' % self._count] = api_call return api_call
def read(self, lpBaseAddress, nSize): """ Reads from the memory of the process. @see: L{peek} @type lpBaseAddress: int @param lpBaseAddress: Memory address to begin reading. @type nSize: int @param nSize: Number of bytes to read. @rtype: str @return: Bytes read from the process memory. @raise WindowsError: On error an exception is raised. """
hProcess = self.get_handle( win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION ) if not self.is_buffer(lpBaseAddress, nSize): raise ctypes.WinError(win32.ERROR_INVALID_ADDRESS) data = win32.ReadProcessMemory(hProcess, lpBaseAddress, nSize) if len(data) != nSize: raise ctypes.WinError() return data
def get_firmware_image(self, image_id): """Get a firmware image with provided image_id. :param str image_id: The firmware ID for the image to retrieve (Required) :return: FirmwareImage """
api = self._get_api(update_service.DefaultApi) return FirmwareImage(api.firmware_image_retrieve(image_id))
def log(duration, message=None, use_last_commit_message=False): """ Log time against the current active issue """
branch = git.branch issue = jira.get_issue(branch) # Create the comment comment = "Working on issue %s" % branch if message: comment = message elif use_last_commit_message: comment = git.get_last_commit_message() if issue: # If the duration is provided use it, otherwise use the elapsed time since the last mark duration = jira.get_elapsed_time(issue) if duration == '.' else duration if duration: # Add the worklog jira.add_worklog(issue, timeSpent=duration, adjustEstimate=None, newEstimate=None, reduceBy=None, comment=comment) print "Logged %s against issue %s (%s)" % (duration, branch, comment) else: print "No time logged, less than 0m elapsed."
def connectionJustEstablished(self): """ We sent out SYN, they acknowledged it. Congratulations, you have a new baby connection. """
assert not self.disconnecting assert not self.disconnected try: p = self.factory.buildProtocol(PTCPAddress( self.peerAddressTuple, self.pseudoPortPair)) p.makeConnection(self) except: log.msg("Exception during PTCP connection setup.") log.err() self.loseConnection() else: self.protocol = p
def _init_hex(self, hexval: str) -> None: """ Initialize from a hex value string. """
self.hexval = hex2termhex(fix_hex(hexval)) self.code = hex2term(self.hexval) self.rgb = hex2rgb(self.hexval)
def print_packet_range(): """Print the range of archived packets."""
first_packet = next(iter(archive.list_packets())) last_packet = next(iter(archive.list_packets(descending=True))) print('First packet:', first_packet) print('Last packet:', last_packet) td = last_packet.generation_time - first_packet.generation_time print('Timespan:', td)
def print_statistics(self): """Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. """
print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR)
def get_signed_query_params_v2(credentials, expiration, string_to_sign): """Gets query parameters for creating a signed URL. :type credentials: :class:`google.auth.credentials.Signing` :param credentials: The credentials used to create a private key for signing text. :type expiration: int or long :param expiration: When the signed URL should expire. :type string_to_sign: str :param string_to_sign: The string to be signed by the credentials. :raises: :exc:`AttributeError` if credentials is not an instance of :class:`google.auth.credentials.Signing`. :rtype: dict :returns: Query parameters matching the signing credentials with a signed payload. """
ensure_signed_credentials(credentials) signature_bytes = credentials.sign_bytes(string_to_sign) signature = base64.b64encode(signature_bytes) service_account_name = credentials.signer_email return { "GoogleAccessId": service_account_name, "Expires": str(expiration), "Signature": signature, }
def get_assessment_results_session_for_bank(self, bank_id, proxy): """Gets an ``AssessmentResultsSession`` to retrieve assessment results for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the assessment taken arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentResultsSession) - an assessment results session for this service raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_results()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_results()`` is ``true``.* """
if not self.supports_assessment_results(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentResultsSession(bank_id, proxy, self._runtime)
def id_fix(value): """ fix @prefix values for ttl """
if value.startswith('KSC_M'): pass else: value = value.replace(':','_') if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'): value = 'obo:' + value elif value.startswith('birnlex') or value.startswith('nlx'): value = 'NIFSTD:' + value elif value.startswith('MESH'): value = ':'.join(value.split('_')) else: value = ':' + value return OntId(value).URIRef
def _is_robot(): """ Return `True` if the current visitor is a robot or spider, or `False` otherwise. This function works by comparing the request's user agent with a regular expression. The regular expression can be configured with the ``SPLIT_ROBOT_REGEX`` setting. """
robot_regex = current_app.config['SPLIT_ROBOT_REGEX'] user_agent = request.headers.get('User-Agent', '') return re.search(robot_regex, user_agent, flags=re.VERBOSE)
def get_single_upstream_artifact_full_path(context, task_id, path): """Return the full path where an upstream artifact should be located. Artifact may not exist. If you want to be sure if does, use ``get_and_check_single_upstream_artifact_full_path()`` instead. This function is mainly used to move artifacts to the expected location. Args: context (scriptworker.context.Context): the scriptworker context. task_id (str): the task id of the task that published the artifact path (str): the relative path of the artifact Returns: str: absolute path to the artifact should be. """
return os.path.abspath(os.path.join(context.config['work_dir'], 'cot', task_id, path))
def _set_updater(self, updater): """Sets a push updater into the store. This function only changes the local store. When running on multiple machines one must use `set_optimizer`. Parameters ---------- updater : function The updater function. Examples -------- >>> def update(key, input, stored): ... print "update on key: %d" % key ... stored += input * 2 >>> kv._set_updater(update) >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 4. 4. 4.] [ 4. 4. 4.]] >>> kv.push('3', mx.nd.ones(shape)) update on key: 3 >>> kv.pull('3', out=a) >>> print a.asnumpy() [[ 6. 6. 6.] [ 6. 6. 6.]] """
self._updater = updater # set updater with int keys _updater_proto = ctypes.CFUNCTYPE( None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p) self._updater_func = _updater_proto(_updater_wrapper(updater)) # set updater with str keys _str_updater_proto = ctypes.CFUNCTYPE( None, ctypes.c_char_p, NDArrayHandle, NDArrayHandle, ctypes.c_void_p) self._str_updater_func = _str_updater_proto(_updater_wrapper(updater)) check_call(_LIB.MXKVStoreSetUpdaterEx(self.handle, self._updater_func, self._str_updater_func, None))
def MatchQuality(cls, record_data, record_count=1): """Check how well this record matches the given binary data. This function will only be called if the record matches the type code given by calling MatchType() and this functon should check how well this record matches and return a quality score between 0 and 100, with higher quality matches having higher scores. The default value should be MatchQuality.GenericMatch which is 50. If this record does not match at all, it should return MatchQuality.NoMatch. Many times, only a single record type will match a given binary record but there are times when multiple different logical records produce the same type of record in a script, such as set_version and set_userkey both producing a call_rpc record with different RPC values. The MatchQuality method is used to allow for rich decoding of such scripts back to the best possible record that created them. Args: record_data (bytearay): The raw record that we should check for a match. record_count (int): The number of binary records that are included in record_data. Returns: int: The match quality between 0 and 100. You should use the constants defined in MatchQuality as much as possible. """
if record_count > 1: return MatchQuality.NoMatch cmd, _address, _resp_length, _payload = cls._parse_rpc_info(record_data) if cmd == PersistGraphRecord.RPC_ID: return MatchQuality.PerfectMatch return MatchQuality.NoMatch
def mean(series): """ Returns the mean of a series. Args: series (pandas.Series): column to summarize. """
if np.issubdtype(series.dtype, np.number): return series.mean() else: return np.nan
def createPolyline(self, points, strokewidth=1, stroke='black'): """ Creates a Polyline @type points: string in the form "x1,y1 x2,y2 x3,y3" @param points: all points relevant to the polygon @type strokewidth: string or int @param strokewidth: width of the pen used to draw @type stroke: string (either css constants like "black" or numerical values like "#FFFFFF") @param stroke: color with which to draw the outer limits @return: a polyline object """
style_dict = {'fill':'none', 'stroke-width':strokewidth, 'stroke':stroke} myStyle = StyleBuilder(style_dict) p = Polyline(points=points) p.set_style(myStyle.getStyle()) return p
def make_post_request(self, url, auth, json_payload): """This function executes the request with the provided json payload and return the json response"""
response = requests.post(url, auth=auth, json=json_payload) return response.json()
def reduce_to_parent_states(models): """Remove all models of states that have a state model with parent relation in the list The function filters the list of models, so that for no model in the list, one of it (grand-)parents is also in the list. E.g. if the input models consists of a hierarchy state with two of its child states, the resulting list only contains the hierarchy state. :param set models: The set of selected models :return: The reduced set of selected models :rtype: set """
models = set(models) # Ensure that models is a set and that we do not operate on the parameter itself models_to_remove = set() # check all models for model in models: parent_m = model.parent # check if any (grand-)parent is already in the selection, if so, remove the child while parent_m is not None: if parent_m in models: models_to_remove.add(model) break parent_m = parent_m.parent for model in models_to_remove: models.remove(model) if models_to_remove: logger.debug("The selection has been reduced, as it may not contain elements whose children are also selected") return models
def attach_related_file(self, path, mimetype=None): """Attaches a file from the filesystem."""
filename = os.path.basename(path) content = open(path, 'rb').read() self.attach_related(filename, content, mimetype)
def load_settings(self, path): """ Load settings dict :param path: Path to settings file :type path: str | unicode :return: Loaded settings :rtype: dict :raises IOError: If file not found or error accessing file :raises TypeError: Settings file does not contain dict """
res = self.load_file(path) if not isinstance(res, dict): raise TypeError("Expected settings to be dict") return res
def get_data(self, workbook, row, col, formula_values={}): """ :return: 2d numpy array for this table with any formulas resolved to the final excel formula. :param xltable.Workbook workbook: Workbook the table has been added to. :param int row: Row where the table will start in the sheet (used for resolving formulas). :param int col: Column where the table will start in the sheet (used for resolving formulas). :param formula_values: dict to add pre-calculated formula values to (keyed by row, col). """
if workbook: prev_table = workbook.active_table workbook.active_table = self try: return self._get_data_impl(workbook, row, col, formula_values) finally: if workbook: workbook.active_table = prev_table
def msg(self, message, *args, **kwargs): """Shortcut to send a message through the connection. This function sends the input message through the connection. A target can be defined, else it will send it to the channel or user from the input Line, effectively responding on whatever triggered the command which calls this function to be called. If raw has not been set to True, formatting will be applied using the standard Python Formatting Mini-Language, using the additional given args and kwargs, along with some additional kwargs, such as the match object to easily access Regex matches, color codes and other things. http://docs.python.org/3.3/library/string.html#format-string-syntax """
target = kwargs.pop('target', None) raw = kwargs.pop('raw', False) if not target: target = self.line.sender.nick if self.line.pm else \ self.line.target if not raw: kw = { 'm': self, 'b': chr(2), 'c': chr(3), 'u': chr(31), } kw.update(kwargs) try: message = message.format(*args, **kw) except IndexError: if len(args) == 1 and isinstance(args[0], list): # Message might be: msg, [arg1, arg2], kwargs message = message.format(*args[0], **kw) else: raise self.connection.msg(target, message)
def clean(self, string, n_cols=None): """ Required reading! http://nedbatchelder.com/text/unipain.html Python 2 input string will be a unicode type (unicode code points). Curses will accept unicode if all of the points are in the ascii range. However, if any of the code points are not valid ascii curses will throw a UnicodeEncodeError: 'ascii' codec can't encode character, ordinal not in range(128). If we encode the unicode to a utf-8 byte string and pass that to curses, it will render correctly. Python 3 input string will be a string type (unicode code points). Curses will accept that in all cases. However, the n character count in addnstr will not be correct. If code points are passed to addnstr, curses will treat each code point as one character and will not account for wide characters. If utf-8 is passed in, addnstr will treat each 'byte' as a single character. Reddit's api sometimes chokes and double-encodes some html characters Praw handles the initial decoding, but we need to do a second pass just to make sure. See https://github.com/michael-lazar/rtv/issues/96 Example: &amp;amp; -> returned directly from reddit's api &amp; -> returned after PRAW decodes the html characters & -> returned after our second pass, this is the true value """
if n_cols is not None and n_cols <= 0: return '' if isinstance(string, six.text_type): string = unescape(string) if self.config['ascii']: if isinstance(string, six.binary_type): string = string.decode('utf-8') string = string.encode('ascii', 'replace') return string[:n_cols] if n_cols else string else: if n_cols: string = textual_width_chop(string, n_cols) if isinstance(string, six.text_type): string = string.encode('utf-8') return string
def check_loops_in_grpah(self, current=None, visited=[]): """ :param current: current node to check if visited :param visited: list of visited fields :raise: KittyException if loop found """
if current in visited: path = ' -> '.join(v.get_name() for v in (visited + [current])) raise KittyException('loop detected in model: %s' % path) current = current if current else self._root for conn in self._graph[current.hash()]: self.check_loops_in_grpah(conn.dst, visited + [conn.src])
def log_request(self, code='-', size='-'): """Selectively log an accepted request."""
if self.server.logRequests: BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
def exception(self, e): """Log an error messsage. :param e: Exception to log. """
self.logged_exception(e) self.logger.exception(e)
def postprocess_WC(self, entry): """ Parse WC keywords. Subject keywords are usually semicolon-delimited. """
if type(entry.WC) not in [str, unicode]: WC= u' '.join([unicode(k) for k in entry.WC]) else: WC= entry.WC entry.WC= [k.strip().upper() for k in WC.split(';')]
def _time_variable_part(epoch, ref_epoch, trnd, periodic): """ Return sum of the time-variable part of the coefficients The formula is: G(t) = G(t0) + trnd*(t-t0) + asin1*sin(2pi/p1 * (t-t0)) + acos1*cos(2pi/p1 * (t-t0)) + asin2*sin(2pi/p2 * (t-t0)) + acos2*cos(2pi/p2 * (t-t0)) This function computes all terms after G(t0). """
delta_t = epoch - ref_epoch trend = trnd * delta_t periodic_sum = _np.zeros_like(trnd) for period in periodic: for trifunc in periodic[period]: coeffs = periodic[period][trifunc] if trifunc == 'acos': periodic_sum += coeffs * _np.cos(2 * _np.pi / period * delta_t) elif trifunc == 'asin': periodic_sum += coeffs * _np.sin(2 * _np.pi / period * delta_t) return trend + periodic_sum
def cache_intermediate_result(cls, result, readable_name=None): """Add result to the cached data. Parameters ---------- result : LazyResult Data to cache. readable_name : str Will be used when generating a name for this intermediate result. Returns ------- str A generated placeholder name uniquely identifying this intermediate result. """
from .lazy_result import LazyResult assert isinstance(result, LazyResult) dependency_name = Cache._generate_placeholder(readable_name) Cache._intermediate_results[dependency_name] = result return dependency_name
def set(self, x, y): """Set a pixel of the :class:`Canvas` object. :param x: x coordinate of the pixel :param y: y coordinate of the pixel """
x = normalize(x) y = normalize(y) col, row = get_pos(x, y) if type(self.chars[row][col]) != int: return self.chars[row][col] |= pixel_map[y % 4][x % 2]
def eigb(A, y0, eps, rmax=150, nswp=20, max_full_size=1000, verb=1): """ Approximate computation of minimal eigenvalues in tensor train format This function uses alternating least-squares algorithm for the computation of several minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function. :Reference: S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov. Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm., 185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought :type y0: tensor :param eps: Accuracy required :type eps: float :param rmax: Maximal rank :type rmax: int :param kickrank: Addition rank, the larger the more robus the method, :type kickrank: int :rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors. :Example: >>> import tt >>> import tt.eigb >>> d = 8; f = 3 >>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1 >>> x = tt.rand(n, d * f, r) >>> a = tt.qlaplace_dd([8, 8, 8]) >>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0) Solving a block eigenvalue problem Looking for 8 eigenvalues with accuracy 1E-06 swp: 1 er = 35.93 rmax:19 swp: 2 er = 4.51015E-04 rmax:18 swp: 3 er = 1.87584E-12 rmax:17 Total number of matvecs: 0 >>> print ev [ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448 0.0013448 0.00164356] """
ry = y0.r.copy() lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps, rmax, ry[y0.d], 0, nswp, max_full_size, verb) y = tensor() y.d = y0.d y.n = A.n.copy() y.r = ry y.core = tt_eigb.tt_block_eig.result_core.copy() tt_eigb.tt_block_eig.deallocate_result() y.get_ps() return y, lam
def unpack(gh): """ unpacks gh list into l m g h type list Parameters _________ gh : list of gauss coefficients (as returned by, e.g., doigrf) Returns data : nested list of [[l,m,g,h],...] """
data = [] k, l = 0, 1 while k + 1 < len(gh): for m in range(l + 1): if m == 0: data.append([l, m, gh[k], 0]) k += 1 else: data.append([l, m, gh[k], gh[k + 1]]) k += 2 l += 1 return data
def mark_and_add_effort(self, modulus, json_info): """ Inserts factorization effort for vulnerable modulus into json_info :param modulus: :param json_info: :return: """
META_AMZ_FACT = 92. / 152. # conversion from university cluster to AWS AMZ_C4_PRICE = 0.1 # price of 2 AWS CPUs per hour length = int(ceil(log(modulus, 2))) length_ceiling = int(ceil(length / 32)) * 32 if length_ceiling in self.length_to_time_years: effort_time = self.length_to_time_years[length_ceiling] else: effort_time = -1 if effort_time > 0: effort_time *= META_AMZ_FACT # scaling to more powerful AWS CPU effort_price = effort_time * 365.25 * 24 * 0.5 * AMZ_C4_PRICE else: effort_price = -1 json_info['marked'] = True json_info['time_years'] = effort_time json_info['price_aws_c4'] = effort_price return json_info
def printlet(flatten=False, **kwargs): """ Print chunks of data from a chain :param flatten: whether to flatten data chunks :param kwargs: keyword arguments as for :py:func:`print` If ``flatten`` is :py:const:`True`, every chunk received is unpacked. This is useful when passing around connected data, e.g. from :py:func:`~.enumeratelet`. Keyword arguments via ``kwargs`` are equivalent to those of :py:func:`print`. For example, passing ``file=sys.stderr`` is a simple way of creating a debugging element in a chain: .. code:: debug_chain = chain[:i] >> printlet(file=sys.stderr) >> chain[i:] """
chunk = yield if flatten: while True: print(*chunk, **kwargs) chunk = yield chunk else: while True: print(chunk, **kwargs) chunk = yield chunk
def pathways(self, fraction=1.0, maxiter=1000): r"""Decompose flux network into dominant reaction paths. Parameters ---------- fraction : float, optional Fraction of total flux to assemble in pathway decomposition maxiter : int, optional Maximum number of pathways for decomposition Returns ------- paths : list List of dominant reaction pathways capacities: list List of capacities corresponding to each reactions pathway in paths References ---------- .. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model Simul 7: 1192-1219 (2009) """
return tptapi.pathways(self.net_flux, self.A, self.B, fraction=fraction, maxiter=maxiter)
def color(self, value): """Set the color to a new value (tuple). Renders the text if needed."""
if value != self.color: self._color = value self._render()
def libvlc_audio_set_callbacks(mp, play, pause, resume, flush, drain, opaque): """Set callbacks and private data for decoded audio. Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}() to configure the decoded audio format. @param mp: the media player. @param play: callback to play audio samples (must not be NULL). @param pause: callback to pause playback (or NULL to ignore). @param resume: callback to resume playback (or NULL to ignore). @param flush: callback to flush audio buffers (or NULL to ignore). @param drain: callback to drain audio buffers (or NULL to ignore). @param opaque: private pointer for the audio callbacks (as first parameter). @version: LibVLC 2.0.0 or later. """
f = _Cfunctions.get('libvlc_audio_set_callbacks', None) or \ _Cfunction('libvlc_audio_set_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None, None, MediaPlayer, AudioPlayCb, AudioPauseCb, AudioResumeCb, AudioFlushCb, AudioDrainCb, ctypes.c_void_p) return f(mp, play, pause, resume, flush, drain, opaque)
def get_string_camel_patterns(cls, name, min_length=0): """ Finds all permutations of possible camel casing of the given name :param name: str, the name we need to get all possible permutations and abbreviations for :param min_length: int, minimum length we want for abbreviations :return: list(list(str)), list casing permutations of list of abbreviations """
# Have to check for longest first and remove duplicates patterns = [] abbreviations = list(set(cls._get_abbreviations(name, output_length=min_length))) abbreviations.sort(key=len, reverse=True) for abbr in abbreviations: # We won't check for abbreviations that are stupid eg something with apparent camel casing within # the word itself like LeF, sorting from: # http://stackoverflow.com/questions/13954841/python-sort-upper-case-and-lower-case casing_permutations = list(set(cls._get_casing_permutations(abbr))) casing_permutations.sort(key=lambda v: (v.upper(), v[0].islower(), len(v))) permutations = [permutation for permutation in casing_permutations if cls.is_valid_camel(permutation) or len(permutation) <= 2] if permutations: patterns.append(permutations) return patterns
def _get_changed_docs(self, ancestral_commit_sha, doc_id_from_repo_path, doc_ids_to_check=None): """Returns the set of documents that have changed on the master since commit `ancestral_commit_sha` or `False` (on an error) 'doc_id_from_repo_path' is a required function if `doc_ids_to_check` is passed in, it should be an iterable list of IDs. Only IDs in this list will be returned. """
try: x = git(self.gitdir, self.gitwd, "diff-tree", "--name-only", "-r", ancestral_commit_sha, "master") except: _LOG.exception('diff-tree failed') return False touched = set() for f in x.split('\n'): found_id = doc_id_from_repo_path(f) if found_id: touched.add(found_id) if doc_ids_to_check: tc = set(doc_ids_to_check) return tc.intersection(touched) return touched
def Read(self): """See base class."""
raw_in = os.read(self.dev, self.GetInReportDataLength()) decoded_in = list(bytearray(raw_in)) return decoded_in
def satisfies_constraints(self, possible_solution): """Return True if the given solution is satisfied by all the constraints."""
for c in self._constraints: values = c.extract_values(possible_solution) if any(type(v) is NilObject for v in values) or not c(*values): return False return True
def show_history(self, status=None, nids=None, full_history=False, metadata=False): """ Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental) """
nrows, ncols = get_terminal_size() works_done = [] # Loop on the tasks and show the history of the work is not in works_done for task in self.iflat_tasks(status=status, nids=nids): work = task.work if work not in works_done: works_done.append(work) if work.history or full_history: cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts) print(work.history.to_string(metadata=metadata)) if task.history or full_history: cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts) print(task.history.to_string(metadata=metadata)) # Print the history of the flow. if self.history or full_history: cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts) print(self.history.to_string(metadata=metadata))
def unregister_message_handler(self, target_or_handler): """Unregister a mpv script message handler for the given script message target name. You can also call the ``unregister_mpv_messages`` function attribute set on the handler function when it is registered. """
if isinstance(target_or_handler, str): del self._message_handlers[target_or_handler] else: for key, val in self._message_handlers.items(): if val == target_or_handler: del self._message_handlers[key]
def sam_pair_to_insert(s1, s2): """Returns insert size from pair of sam records, as long as their orientation is "innies". Otherwise returns None."""
if s1.is_unmapped or s2.is_unmapped or (s1.tid != s2.tid) or (s1.is_reverse == s2.is_reverse): return None # If here, reads are both mapped to the same ref, and in opposite orientations if s1.is_reverse: end = s1.reference_end - 1 start = s2.reference_start else: end = s2.reference_end - 1 start = s1.reference_start if start < end: return end - start + 1 else: return None
def locale_export(): """Exports for dealing with Click-based programs and ASCII/Unicode errors. RuntimeError: Click will abort further execution because Python 3 was configured to use ASCII as encoding for the environment. Consult https://click.palletsprojects.com/en/7.x/python3/ for mitigation steps. Looks up available locales on the system to find an appropriate one to pick, defaulting to C.UTF-8 which is globally available on newer systems. """
locale_to_use = "C.UTF-8" try: locales = subprocess.check_output(["locale", "-a"]).decode(errors="ignore").split("\n") except subprocess.CalledProcessError: locales = [] for locale in locales: if locale.lower().endswith(("utf-8", "utf8")): locale_to_use = locale break return "export LC_ALL=%s && export LANG=%s && " % (locale_to_use, locale_to_use)
def pearson(x, y): """ Pearson's correlation implementation without scipy or numpy. :param list x: Dataset x :param list y: Dataset y :return: Population pearson correlation coefficient :rtype: float """
mx = Decimal(mean(x)) my = Decimal(mean(y)) xm = [Decimal(i) - mx for i in x] ym = [Decimal(j) - my for j in y] sx = [i ** 2 for i in xm] sy = [j ** 2 for j in ym] num = sum([a * b for a, b in zip(xm, ym)]) den = Decimal(sum(sx) * sum(sy)).sqrt() # Stdev of one (or both) of the scores is zero if the # denominator is zero. Dividing by zero is impossible, so # just check if it is zero before we tell it to divide. if den == 0.0: # TODO: Better message raise NoAffinityError("Standard deviation of either " "users' scores is zero") return float(num / den)
def GetAttributeNames(self): """Retrieves the names of all attributes. Returns: list[str]: attribute names. """
attribute_names = [] for attribute_name in iter(self.__dict__.keys()): # Not using startswith to improve performance. if attribute_name[0] == '_': continue attribute_names.append(attribute_name) return attribute_names
def get_column(column_name, node, context): """Get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: column, the SQLAlchemy column if found. Raises an AssertionError otherwise. """
column = try_get_column(column_name, node, context) if column is None: selectable = get_node_selectable(node, context) raise AssertionError( u'Column "{}" not found in selectable "{}". Columns present are {}. ' u'Context is {}.'.format(column_name, selectable.original, [col.name for col in selectable.c], context)) return column
def _write_pidfile(pidfile): """ Write file with current process ID. """
pid = str(os.getpid()) handle = open(pidfile, 'w') try: handle.write("%s\n" % pid) finally: handle.close()
def node_changed(self): """ Triggers the host model(s) :meth:`umbra.ui.models.GraphModel.node_changed` method. :return: Method success. :rtype: bool """
for model in umbra.ui.models.GraphModel.find_model(self): model.node_changed(self) return True
def from_dynacRepr(cls, pynacRepr): """ Construct a ``Steerer`` instance from the Pynac lattice element """
f = float(pynacRepr[1][0][0]) p = 'HV'[int(pynacRepr[1][0][1])] return cls(f, p)
def findAll(self, tag_name, params=None, fn=None, case_sensitive=False): """ Search for elements by their parameters using `Depth-first algorithm <http://en.wikipedia.org/wiki/Depth-first_search>`_. Args: tag_name (str): Name of the tag you are looking for. Set to "" if you wish to use only `fn` parameter. params (dict, default None): Parameters which have to be present in tag to be considered matching. fn (function, default None): Use this function to match tags. Function expects one parameter which is HTMLElement instance. case_sensitive (bool, default False): Use case sensitive search. Returns: list: List of :class:`HTMLElement` instances matching your \ criteria. """
output = [] if self.isAlmostEqual(tag_name, params, fn, case_sensitive): output.append(self) tmp = [] for el in self.childs: tmp = el.findAll(tag_name, params, fn, case_sensitive) if tmp: output.extend(tmp) return output
def search(self, query): """ Perform a (Lucene) search of the entity. :argument str query: a properly escaped Lucene query """
return self.client.request( self.path + "?" + urlencode([("query", query)]), )
def spa_tmplt_engine(htilde, kmin, phase_order, delta_f, piM, pfaN, pfa2, pfa3, pfa4, pfa5, pfl5, pfa6, pfl6, pfa7, amp_factor): """ Calculate the spa tmplt phase """
taylorf2_kernel(htilde.data, kmin, phase_order, delta_f, piM, pfaN, pfa2, pfa3, pfa4, pfa5, pfl5, pfa6, pfl6, pfa7, amp_factor)
def create_branches(self, branches): """ Create branches from a TreeBuffer or dict mapping names to type names Parameters ---------- branches : TreeBuffer or dict """
if not isinstance(branches, TreeBuffer): branches = TreeBuffer(branches) self.set_buffer(branches, create_branches=True)
def key_edited(self, path, new_key_str): """ Edits the key of a semantic data entry :param path: The path inside the tree store to the target entry :param str new_key_str: The new value of the target cell :return: """
tree_store_path = self.create_tree_store_path_from_key_string(path) if isinstance(path, string_types) else path if self.tree_store[tree_store_path][self.KEY_STORAGE_ID] == new_key_str: return dict_path = self.tree_store[tree_store_path][self.ID_STORAGE_ID] old_value = self.model.state.get_semantic_data(dict_path) self.model.state.remove_semantic_data(dict_path) if new_key_str == "": target_dict = self.model.state.semantic_data for element in dict_path[0:-1]: target_dict = target_dict[element] new_key_str = generate_semantic_data_key(list(target_dict.keys())) new_dict_path = self.model.state.add_semantic_data(dict_path[0:-1], old_value, key=new_key_str) self._changed_id_to = {':'.join(dict_path): new_dict_path} # use hashable key (workaround for tree view ctrl) self.reload_tree_store_data()
def _to_upper(self): """Convert sequences to upper case."""
self.exon_seq = self.exon_seq.upper() self.three_prime_seq = [s.upper() for s in self.three_prime_seq] self.five_prime_seq = [s.upper() for s in self.five_prime_seq]
def find_optconf(self, pconfs): """Find the optimal Parallel configuration."""
# Save pconfs for future reference. self.set_pconfs(pconfs) # Select the partition on which we'll be running and set MPI/OMP cores. optconf = self.manager.select_qadapter(pconfs) return optconf
def pack_results(measurements: Sequence[Tuple[str, np.ndarray]]) -> bytes: """Pack measurement results into a byte string. Args: measurements: A sequence of tuples, one for each measurement, consisting of a string key and an array of boolean data. The data should be a 2-D array indexed by (repetition, qubit_index). All data for all measurements must have the same number of repetitions. Returns: Packed bytes, as described in the unpack_results docstring below. Raises: ValueError if the measurement data do not have the compatible shapes. """
if not measurements: return b'' shapes = [(key, np.shape(data)) for key, data in measurements] if not all(len(shape) == 2 for _, shape in shapes): raise ValueError("Expected 2-D data: shapes={}".format(shapes)) reps = shapes[0][1][0] if not all(shape[0] == reps for _, shape in shapes): raise ValueError( "Expected same reps for all keys: shapes={}".format(shapes)) bits = np.hstack([np.asarray(data, dtype=bool) for _, data in measurements]) bits = bits.reshape(-1) # Pad length to multiple of 8 if needed. remainder = len(bits) % 8 if remainder: bits = np.pad(bits, (0, 8 - remainder), 'constant') # Pack in little-endian bit order. bits = bits.reshape((-1, 8))[:, ::-1] byte_arr = np.packbits(bits, axis=1).reshape(-1) return byte_arr.tobytes()
def wdiff( settings, wrap_with_html=False, fold_breaks=False, hard_breaks=False ): """ Returns the results of `wdiff` in a HTML compatible format. Needs a :cls:`settings.Settings` object. If *wrap_with_html* is set, the *diff* is returned in a full HTML document structure. If *fold_breaks* is set, `<ins>` and `<del>` tags are allowed to span line breaks If *hard_breaks* is set, line breaks are replaced with `<br />` tags. """
diff = generate_wdiff(settings.org_file, settings.new_file, fold_breaks) if wrap_with_html: return wrap_content(diff, settings, hard_breaks) else: return diff
def verify_identifiers(identifiers, n_items): """Ensure that identifiers has a compatible length and that its elements are unique"""
if identifiers is None: return identifiers identifiers = np.array(identifiers, copy=False) # Check length for consistency if len(identifiers) != n_items: raise ValueError("identifiers has inconsistent dimension.") # Check that identifiers are unique if len(np.unique(identifiers)) != n_items: raise ValueError("identifiers contains duplicate values.") return identifiers
def set_aliases_and_defaults(self, aliases_config=None, default_properties=None): """ Set the alias config and defaults to use. Typically used when switching to a collection with a different schema. Args: aliases_config: An alias dict to use. Defaults to None, which means the default aliases defined in "aliases.json" is used. See constructor for format. default_properties: List of property names (strings) to use by default, if no properties are given to the 'properties' argument of query(). """
if aliases_config is None: with open(os.path.join(os.path.dirname(__file__), "aliases.json")) as f: d = json.load(f) self.aliases = d.get("aliases", {}) self.default_criteria = d.get("defaults", {}) else: self.aliases = aliases_config.get("aliases", {}) self.default_criteria = aliases_config.get("defaults", {}) # set default properties if default_properties is None: self._default_props, self._default_prop_dict = None, None else: self._default_props, self._default_prop_dict = \ self._parse_properties(default_properties)
def sanitize_for_serialization(self, obj): """ Builds a JSON POST object. If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is swagger model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data. """
if obj is None: return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime, date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: # Convert model obj to dict except # attributes `swagger_types`, `attribute_map` # and attributes which value is not None. # Convert attribute name to json key in # model definition for request. obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in iteritems(obj.swagger_types) if getattr(obj, attr) is not None} return {key: self.sanitize_for_serialization(val) for key, val in iteritems(obj_dict)}
def left_parallel_line(ax, scale, i, **kwargs): """ Draws the i-th line parallel to the left axis. Parameters ---------- ax: Matplotlib AxesSubplot The subplot to draw on. scale: float Simplex scale size. i: float The index of the line to draw kwargs: Dictionary Any kwargs to pass through to Matplotlib. """
p1 = (i, scale - i, 0) p2 = (i, 0, scale - i) line(ax, p1, p2, **kwargs)
def _in_search_queryset(*, instance, index) -> bool: """Wrapper around the instance manager method."""
try: return instance.__class__.objects.in_search_queryset(instance.id, index=index) except Exception: logger.exception("Error checking object in_search_queryset.") return False
def Refresh(self): """Reloads the group object to synchronize with cloud representation. >>> clc.v2.Group("wa-1234").Refresh() """
self.dirty = False self.data = clc.v2.API.Call('GET','groups/%s/%s' % (self.alias,self.id), session=self.session) self.data['changeInfo']['createdDate'] = clc.v2.time_utils.ZuluTSToSeconds(self.data['changeInfo']['createdDate']) self.data['changeInfo']['modifiedDate'] = clc.v2.time_utils.ZuluTSToSeconds(self.data['changeInfo']['modifiedDate'])