prompt
stringlengths
23
36.3k
response
stringlengths
0
1.55k
def get_size_at_time(self, timestamp): """ Get the size of the object at a specific time (snapshot). If the object was not alive/sized at that instant, return 0. """
size = 0 for (t, s) in self.snapshots: if t == timestamp: size = s.size return size
def updateNexttime(self, now): """ Find the next time this appointment should be scheduled. Delete any nonrecurring record that just happened. """
if self._recidxnexttime is not None and not self.recur: del self.recs[self._recidxnexttime] while self.recs and self.nexttime <= now: lowtime = 999999999999.9 # Find the lowest next time of all of our recs (backwards, so we can delete) for i in range(len(self.recs) - 1, -1, -1): rec = self.recs[i] nexttime = rec.nexttime(self.nexttime) if nexttime == 0.0: # We blew by and missed a fixed-year appointment, either due to clock shenanigans, this query going # really long, or the initial requirement being in the past logger.warning(f'Missed an appointment: {rec}') del self.recs[i] continue if nexttime < lowtime: lowtime = nexttime lowidx = i if not self.recs: break self._recidxnexttime = lowidx self.nexttime = lowtime if not self.recs: self._recidxnexttime = None self.nexttime = None return
def get_perceval_params_from_url(cls, url): """ Get the perceval params given a URL for the data source """
params = [] dparam = cls.get_arthur_params_from_url(url) params.append(dparam['url']) params.append(dparam['channel']) return params
def _get_vispy_font_filename(face, bold, italic): """Fetch a remote vispy font"""
name = face + '-' name += 'Regular' if not bold and not italic else '' name += 'Bold' if bold else '' name += 'Italic' if italic else '' name += '.ttf' return load_data_file('fonts/%s' % name)
def _get_default_router(self, routers, router_name=None): """Returns the default router for ordering a dedicated host."""
if router_name is None: for router in routers: if router['id'] is not None: return router['id'] else: for router in routers: if router['hostname'] == router_name: return router['id'] raise SoftLayer.SoftLayerError("Could not find valid default router")
def make_dbsource(**kwargs): """Returns a mapnik PostGIS or SQLite Datasource."""
if 'spatialite' in connection.settings_dict.get('ENGINE'): kwargs.setdefault('file', connection.settings_dict['NAME']) return mapnik.SQLite(wkb_format='spatialite', **kwargs) names = (('dbname', 'NAME'), ('user', 'USER'), ('password', 'PASSWORD'), ('host', 'HOST'), ('port', 'PORT')) for mopt, dopt in names: val = connection.settings_dict.get(dopt) if val: kwargs.setdefault(mopt, val) return mapnik.PostGIS(**kwargs)
def create(self, language, query, tasks=values.unset, model_build=values.unset, field=values.unset): """ Create a new QueryInstance :param unicode language: An ISO language-country string of the sample. :param unicode query: A user-provided string that uniquely identifies this resource as an alternative to the sid. It can be up to 2048 characters long. :param unicode tasks: Constraints the query to a set of tasks. Useful when you need to constrain the paths the user can take. Tasks should be comma separated task-unique-name-1, task-unique-name-2 :param unicode model_build: The Model Build Sid or unique name of the Model Build to be queried. :param unicode field: Constraints the query to a given Field with an task. Useful when you know the Field you are expecting. It accepts one field in the format task-unique-name-1:field-unique-name :returns: Newly created QueryInstance :rtype: twilio.rest.preview.understand.assistant.query.QueryInstance """
data = values.of({ 'Language': language, 'Query': query, 'Tasks': tasks, 'ModelBuild': model_build, 'Field': field, }) payload = self._version.create( 'POST', self._uri, data=data, ) return QueryInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
async def execute(query): """Execute *SELECT*, *INSERT*, *UPDATE* or *DELETE* query asyncronously. :param query: peewee query instance created with ``Model.select()``, ``Model.update()`` etc. :return: result depends on query type, it's the same as for sync ``query.execute()`` """
if isinstance(query, (peewee.Select, peewee.ModelCompoundSelectQuery)): coroutine = select elif isinstance(query, peewee.Update): coroutine = update elif isinstance(query, peewee.Insert): coroutine = insert elif isinstance(query, peewee.Delete): coroutine = delete else: coroutine = raw_query return (await coroutine(query))
def isthai(word: str, ignore_chars: str = ".") -> bool: """ Check if all character is Thai เป็นคำที่มีแต่อักษรไทยหรือไม่ :param str word: input text :param str ignore_chars: characters to be ignored (i.e. will be considered as Thai) :return: True or False """
if not ignore_chars: ignore_chars = "" for ch in word: if ch not in ignore_chars and not isthaichar(ch): return False return True
def create_checkered_image(width, height, c1=(154, 154, 154, 255), c2=(100, 100, 100, 255), s=6): """ Return a checkered image of size width x height. Arguments: * width: image width * height: image height * c1: first color (RGBA) * c2: second color (RGBA) * s: size of the squares """
im = Image.new("RGBA", (width, height), c1) draw = ImageDraw.Draw(im, "RGBA") for i in range(s, width, 2 * s): for j in range(0, height, 2 * s): draw.rectangle(((i, j), ((i + s - 1, j + s - 1))), fill=c2) for i in range(0, width, 2 * s): for j in range(s, height, 2 * s): draw.rectangle(((i, j), ((i + s - 1, j + s - 1))), fill=c2) return im
def _authenticate(self): """ Attempt to authenticate the request using each authentication instance in turn. Returns a three-tuple of (authenticator, user, authtoken). """
for authenticator in self.authenticators: try: user_auth_tuple = authenticator.authenticate(self) except exceptions.APIException: self._not_authenticated() raise if user_auth_tuple is not None: self._authenticator = authenticator self.user, self.auth = user_auth_tuple return self._not_authenticated()
def reset_queues(queues): """Resets original queue._put() method."""
for queue in queues: with queue.mutex: queue._put = queue._pebble_old_method delattr(queue, '_pebble_old_method') delattr(queue, '_pebble_lock')
def inline_css(html_message, encoding='unicode'): """ Inlines all CSS in an HTML string Given an HTML document with CSS declared in the HEAD, inlines it into the applicable elements. Used primarily in the preparation of styled emails. Arguments: html_message -- a string of HTML, including CSS """
document = etree.HTML(html_message) converter = Conversion() converter.perform(document, html_message, '', encoding=encoding) return converter.convertedHTML
def _is_chunk_markdown(source): """Return whether a chunk contains Markdown contents."""
lines = source.splitlines() if all(line.startswith('# ') for line in lines): # The chunk is a Markdown *unless* it is commented Python code. source = '\n'.join(line[2:] for line in lines if not line[2:].startswith('#')) # skip headers if not source: return True # Try to parse the chunk: if it fails, it is Markdown, otherwise, # it is Python. return not _is_python(source) return False
def signal(self, details): """ Send a signal to all task processes. """
log = self._params.get('log', self._discard) if '_signal' not in dir(self._parent) or not callable(getattr(self._parent, '_signal')): log.error("Event parent '%s' has no '_signal' method", self._name) return sig = utils.signum(self._handler_arg) if sig is None: log.error("Invalid signal '%s' for task '%s'", self._handler_arg, sig._name) return log.info("sending %s to all '%s' processes", utils.signame(sig), self._name) self._parent._signal(sig)
def _request(self, text, properties, retries=0): """Send a request to the CoreNLP server. :param (str | unicode) text: raw text for the CoreNLPServer to parse :param (dict) properties: properties that the server expects :return: request result """
text = to_unicode(text) # ensures unicode try: r = requests.post(self.server, params={'properties': str(properties)}, data=text.encode('utf-8')) r.raise_for_status() return r except requests.ConnectionError as e: if retries > 5: logging.critical('Max retries exceeded!') raise e else: logging.critical(repr(e)) logging.critical("It seems like we've temporarily ran out of ports. Taking a 30s break...") time.sleep(30) logging.critical("Retrying...") return self._request(text, properties, retries=retries+1) except requests.HTTPError: if r.text == "CoreNLP request timed out. Your document may be too long.": raise TimeoutException(r.text) else: raise AnnotationException(r.text)
def ask_unicode_16(self, next_rva_ptr): """The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked to see if there's a Unicode NULL character there. """
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0: self.length = next_rva_ptr - self.rva_ptr return True return False
def get_removals_int_oxid(self): """ Returns a set of delithiation steps, e.g. set([1.0 2.0 4.0]) etc. in order to produce integer oxidation states of the redox metals. If multiple redox metals are present, all combinations of reduction/oxidation are tested. Note that having more than 3 redox metals will likely slow down the algorithm. Examples: LiFePO4 will return [1.0] Li4Fe3Mn1(PO4)4 will return [1.0, 2.0, 3.0, 4.0]) Li6V4(PO4)6 will return [4.0, 6.0]) *note that this example is not normalized* Returns: array of integer cation removals. If you double the unit cell, your answers will be twice as large! """
# the elements that can possibly be oxidized oxid_els = [Element(spec.symbol) for spec in self.comp if is_redox_active_intercalation(spec)] numa = set() for oxid_el in oxid_els: numa = numa.union(self._get_int_removals_helper(self.comp.copy(), oxid_el, oxid_els, numa)) # convert from num A in structure to num A removed num_cation = self.comp[Specie(self.cation.symbol, self.cation_charge)] return set([num_cation - a for a in numa])
def read_var_uint32(self): """Reads a varint from the stream, interprets this varint as an unsigned, 32-bit integer, and returns the integer. """
i = self.read_var_uint64() if i > wire_format.UINT32_MAX: raise errors.DecodeError('Value out of range for uint32: %d' % i) return i
def str_presenter(dmpr, data): """Return correct str_presenter to write multiple lines to a yaml field. Source: http://stackoverflow.com/a/33300001 """
if is_multiline(data): return dmpr.represent_scalar('tag:yaml.org,2002:str', data, style='|') return dmpr.represent_scalar('tag:yaml.org,2002:str', data)
def add_progress(self, count, symbol='#', color=None, on_color=None, attrs=None): """Add a section of progress to the progressbar. The progress is captured by "count" and displayed as a fraction of the statusbar width proportional to this count over the total progress displayed. The progress will be displayed using the "symbol" character and the foreground and background colours and display style determined by the the "fg", "bg" and "style" parameters. For these, use the colorama package to set up the formatting. """
self._progress.add_progress(count, symbol, color, on_color, attrs)
def format_error(error: "GraphQLError") -> dict: """Format a GraphQL error Given a GraphQLError, format it according to the rules described by the "Response Format, Errors" section of the GraphQL Specification. """
if not error: raise ValueError("Received null or undefined error.") formatted: Dict[str, Any] = dict( # noqa: E701 (pycqa/flake8#394) message=error.message or "An unknown error occurred.", locations=error.locations, path=error.path, ) if error.extensions: formatted.update(extensions=error.extensions) return formatted
def contour(x, y, z, ax, **kwargs): """ Contour plot of 2d DataArray Wraps :func:`matplotlib:matplotlib.pyplot.contour` """
primitive = ax.contour(x, y, z, **kwargs) return primitive
def _decode_token_compact(token): """ Decode a compact-serialized JWT Returns {'header': ..., 'payload': ..., 'signature': ...} """
header, payload, raw_signature, signing_input = _unpack_token_compact(token) token = { "header": header, "payload": payload, "signature": base64url_encode(raw_signature) } return token
def decipher(self,string): """Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext. """
string = self.remove_punctuation(string,filter='[^'+self.chars+']') ret = '' for i in range(0,len(string),3): ind = tuple([int(string[i+k]) for k in [0,1,2]]) ret += IND2L[ind] return ret
def cls_slots(self, cls: CLASS_OR_CLASSNAME) -> List[SlotDefinition]: """ Return the list of slots directly included in the class definition. Includes slots whose domain is cls -- as declared in slot.domain or class.slots Does not include slots declared in mixins, apply_to or is_a links @param cls: class name or class definition name @return: all direct class slots """
if not isinstance(cls, ClassDefinition): cls = self.schema.classes[cls] return [self.schema.slots[s] for s in cls.slots]
def Send(self, data, status=200, ctype="application/octet-stream", additional_headers=None, last_modified=0): """Sends a response to the client."""
if additional_headers: additional_header_strings = [ "%s: %s\r\n" % (name, val) for name, val in iteritems(additional_headers) ] else: additional_header_strings = [] header = "" header += "HTTP/1.0 %s\r\n" % self.statustext[status] header += "Server: GRR Server\r\n" header += "Content-type: %s\r\n" % ctype header += "Content-Length: %d\r\n" % len(data) header += "Last-Modified: %s\r\n" % self.date_time_string(last_modified) header += "".join(additional_header_strings) header += "\r\n" self.wfile.write(header.encode("utf-8")) self.wfile.write(data)
def netmiko_save_config( task: Task, cmd: str = "", confirm: bool = False, confirm_response: str = "" ) -> Result: """ Execute Netmiko save_config method Arguments: cmd(str, optional): Command used to save the configuration. confirm(bool, optional): Does device prompt for confirmation before executing save operation confirm_response(str, optional): Response send to device when it prompts for confirmation Returns: :obj: `nornir.core.task.Result`: * result (``str``): String showing the CLI output from the save operation """
conn = task.host.get_connection("netmiko", task.nornir.config) if cmd: result = conn.save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response ) else: result = conn.save_config(confirm=confirm, confirm_response=confirm_response) return Result(host=task.host, result=result, changed=True)
def _add_slide_number(self, slide_no): """Add the slide number to the output if enabled."""
if self.builder.config.slide_numbers: self.body.append( '\n<div class="slide-no">%s</div>\n' % (slide_no,), )
def serialize(self): """This function serialize into a simple dict object. It is used when transferring data to other daemons over the network (http) Here is the generic function that simply export attributes declared in the properties dictionary and the running_properties of the object. :return: Dictionary containing key and value from properties and running_properties :rtype: dict """
cls = self.__class__ # id is not in *_properties res = {'uuid': self.uuid} for prop in cls.properties: if hasattr(self, prop) and getattr(self, prop, None) is not None: res[prop] = serialize(getattr(self, prop), True) for prop in cls.running_properties: if hasattr(self, prop) and getattr(self, prop, None) is not None: res[prop] = serialize(getattr(self, prop), True) return res
def checkup_git_repo_legacy(url, name=None, base_dir='~/repos', verbose=False, prefix='', postfix=''): """Checkout or update a git repo."""
if not name: match = re.match(r'.*/(.+)\.git', url) assert match, flo("Unable to extract repo name from '{url}'") name = match.group(1) assert name is not None, flo('Cannot extract repo name from repo: {url}') assert name != '', flo('Cannot extract repo name from repo: {url} (empty)') if verbose: name_blue = blue(name) print_msg(flo('{prefix}Checkout or update {name_blue}{postfix}')) if not exists(base_dir): run(flo('mkdir -p {base_dir}')) if not exists(flo('{base_dir}/{name}/.git')): run(flo(' && '.join([ 'cd {base_dir}', 'git clone {url} {name}'])), msg='clone repo') else: if verbose: print_msg('update: pull from origin') run(flo('cd {base_dir}/{name} && git pull')) return name
def next(self): """ Get the next line of data. Returns ------- tag : str data : """
line = self.buffer.readline() while line == '\n': # Skip forward to the next line with content. line = self.buffer.readline() if line == '': # End of file. self.at_eof = True return None, None match = re.match('([A-Z]{2}|[C][1])\W(.*)', line) if match is not None: self.current_tag, data = match.groups() else: self.current_tag = self.last_tag data = line.strip() return self.current_tag, _cast(data)
def get_fixture_node(self, app_label, fixture_prefix): """ Get all fixtures in given app with given prefix. :param str app_label: App label :param str fixture_prefix: first part of the fixture name :return: list of found fixtures. """
app_nodes = self.get_app_nodes(app_label=app_label) nodes = [ node for node in app_nodes if node[1].startswith(fixture_prefix) ] if len(nodes) > 1: raise MultipleFixturesFound( "The following fixtures with prefix '%s' are found in app '%s'" ": %s" % ( fixture_prefix, app_label, ', '.join( [node[1] for node in nodes] ) ) ) elif len(nodes) == 0: raise FixtureNotFound("Fixture with prefix '%s' not found in app " "'%s'" % (fixture_prefix, app_label)) return nodes
def execute(self): """Execute the test batch """
self.browser_config = BrowserConfig( runner=self, browser_id=BROME_CONFIG['runner_args']['localhost_runner'], browsers_config=BROME_CONFIG['browsers_config'] ) try: self.run() except KeyboardInterrupt: self.info_log("Test batch interrupted") except: tb = traceback.format_exc() self.error_log("Exception in run of the grid runner: %s" % str(tb)) raise finally: self.terminate()
def print_ec2_info(region, instance_id, access_key_id, secret_access_key, username): """ outputs information about our EC2 instance """
data = get_ec2_info(instance_id=instance_id, region=region, access_key_id=access_key_id, secret_access_key=secret_access_key, username=username) log_green("region: %s" % data['region']) log_green("Instance_type: %s" % data['instance_type']) log_green("Instance state: %s" % data['state']) log_green("Public dns: %s" % data['public_dns_name']) log_green("Ip address: %s" % data['ip_address']) log_green("volume: %s" % data['volume']) log_green("user: %s" % data['username']) log_green("ssh -i %s %s@%s" % (env.key_filename, username, data['ip_address']))
def get_zoneID(self, headers, zone): """Get the zone id for the zone."""
zoneIDurl = self.BASE_URL + '?name=' + zone zoneIDrequest = requests.get(zoneIDurl, headers=headers) zoneID = zoneIDrequest.json()['result'][0]['id'] return zoneID
def is_avro(path_or_buffer): """Return True if path (or buffer) points to an Avro file. Parameters ---------- path_or_buffer: path to file or file-like object Path to file """
if is_str(path_or_buffer): fp = open(path_or_buffer, 'rb') close = True else: fp = path_or_buffer close = False try: header = fp.read(len(MAGIC)) return header == MAGIC finally: if close: fp.close()
def get_below_left_key_rect(self): """Returns tuple key rect of below left cell"""
key_left = self.row, self.col - 1, self.tab key_below_left = self.row + 1, self.col - 1, self.tab border_width_right = \ float(self.cell_attributes[key_below_left]["borderwidth_right"]) \ / 2.0 border_width_bottom = \ float(self.cell_attributes[key_left]["borderwidth_bottom"]) / 2.0 rect_below_left = (self.x-border_width_right, self.y-self.height, border_width_right, border_width_bottom) return key_below_left, rect_below_left
def _generate_SAX_single(self, sections, value): """ Generate SAX representation(Symbolic Aggregate approXimation) for a single data point. Read more about it here: Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf). :param dict sections: value sections. :param float value: value to be categorized. :return str: a SAX representation. """
sax = 0 for section_number in sections.keys(): section_lower_bound = sections[section_number] if value >= section_lower_bound: sax = section_number else: break return str(sax)
def add(self, adgroup_id, title, img_url, nick=None): """xxxxx.xxxxx.creative.add =================================== 创建一个创意"""
request = TOPRequest('xxxxx.xxxxx.creative.add') request['adgroup_id'] = adgroup_id request['title'] = title request['img_url'] = img_url if nick!=None: request['nick'] = nick self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models = {'result':Creative}) return self.result
def _prompt_started_hook(self): """Emit a signal when the prompt is ready."""
if not self._reading: self._highlighter.highlighting_on = True self.sig_prompt_ready.emit()
def normalize_polynomial(coeffs, threshold=_L2_THRESHOLD): r"""Normalizes a polynomial in the :math:`L_2` sense. Does so on the interval :math:\left[0, 1\right]` via :func:`polynomial_norm`. Args: coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial / power basis. threshold (Optional[float]): The point :math:`\tau` below which a polynomial will be considered to be numerically equal to zero, applies to all :math:`f` with :math`\| f \|_{L_2} < \tau`. Returns: numpy.ndarray: The normalized polynomial. """
l2_norm = polynomial_norm(coeffs) if l2_norm < threshold: return np.zeros(coeffs.shape, order="F") else: coeffs /= l2_norm return coeffs
def complex_type(name=None): """Decorator for registering complex types"""
def wrapped(cls): ParseType.type_mapping[name or cls.__name__] = cls return cls return wrapped
def list_pages_ajax(request, invalid_move=False): """Render pages table for ajax function."""
language = get_language_from_request(request) pages = Page.objects.root() context = { 'invalid_move': invalid_move, 'language': language, 'pages': pages, } return render_to_response("admin/basic_cms/page/change_list_table.html", context, context_instance=RequestContext(request) )
def __request_finish(self, queue_item, new_requests, request_failed=False): """Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. request_failed (bool): True if the request failed (if needs to be moved to errored). """
if self.__stopping: return del self.__threads[queue_item.get_hash()] if request_failed: new_queue_items = [] self.queue.move(queue_item, QueueItem.STATUS_ERRORED) else: self.routing.increase_route_count(queue_item.request) new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests) self.queue.move(queue_item, QueueItem.STATUS_FINISHED) try: action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items) except Exception as e: action = None print(e) print(traceback.format_exc()) queue_item.decompose() if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.__should_spawn_new_requests = True
def sensors(self): """Return all known sensors. :return: list of :class:`Sensor` instances. """
sensors = [] try: while True: sensor = self.lib.tdSensor() sensors.append(Sensor(lib=self.lib, **sensor)) except TelldusError as e: if e.error != const.TELLSTICK_ERROR_DEVICE_NOT_FOUND: raise return sensors
def move_to_collection(self, source_collection, destination_collection): """Move entity from source to destination collection."""
# Remove from collection. self.collections.remove(source_collection) # pylint: disable=no-member source_collection.data.remove(*self.data.all()) # pylint: disable=no-member # Add to collection. self.collections.add(destination_collection) # pylint: disable=no-member destination_collection.data.add(*self.data.all())
def indentLine(self, block, autoIndent): """ Indent line. Return filler or null. """
indent = None if indent is None: indent = self.tryMatchedAnchor(block, autoIndent) if indent is None: indent = self.tryCComment(block) if indent is None and not autoIndent: indent = self.tryCppComment(block) if indent is None: indent = self.trySwitchStatement(block) if indent is None: indent = self.tryAccessModifiers(block) if indent is None: indent = self.tryBrace(block) if indent is None: indent = self.tryCKeywords(block, block.text().lstrip().startswith('{')) if indent is None: indent = self.tryCondition(block) if indent is None: indent = self.tryStatement(block) if indent is not None: return indent else: dbg("Nothing matched") return self._prevNonEmptyBlockIndent(block)
def setup_user_signals(self, ): """Setup the signals for the user page :returns: None :rtype: None :raises: None """
log.debug("Setting up user page signals.") self.user_task_view_pb.clicked.connect(self.user_view_task) self.user_prj_view_pb.clicked.connect(self.user_view_prj) self.user_prj_add_pb.clicked.connect(self.user_add_prj) self.user_prj_remove_pb.clicked.connect(self.user_remove_prj) self.user_username_le.editingFinished.connect(self.user_save) self.user_first_le.editingFinished.connect(self.user_save) self.user_last_le.editingFinished.connect(self.user_save) self.user_email_le.editingFinished.connect(self.user_save)
def get_grade_entry(self, grade_entry_id): """Gets the ``GradeEntry`` specified by its ``Id``. arg: grade_entry_id (osid.id.Id): ``Id`` of the ``GradeEntry`` return: (osid.grading.GradeEntry) - the grade entry raise: NotFound - ``grade_entry_id`` not found raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """
# Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(grade_entry_id, 'grading').get_identifier())}, **self._view_filter())) return objects.GradeEntry(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
def start_codon_spliced_offsets(self): """ Offsets from start of spliced mRNA transcript of nucleotides in start codon. """
offsets = [ self.spliced_offset(position) for position in self.start_codon_positions ] return self._contiguous_offsets(offsets)
def update_db(self, new_values): """Update database values and application configuration. The provided keys must be defined in the ``WAFFLE_CONFS`` setting. Arguments: new_values (dict): dict of configuration variables and their values The dict has the following structure: { 'MY_CONFIG_VAR' : <CONFIG_VAL>, 'MY_CONFIG_VAR1' : <CONFIG_VAL1> } """
confs = self.app.config.get('WAFFLE_CONFS', {}) to_update = {} for key in new_values.keys(): # Some things cannot be changed... if key.startswith('WAFFLE_'): continue # No arbitrary keys if key not in confs.keys(): continue value = new_values[key] self.configstore.put(key, util.serialize(value)) self.configstore.commit() to_update[key] = value # Update config if not to_update: return self.app.config.update(to_update) # Notify other processes if self.app.config.get('WAFFLE_MULTIPROC', False): self.notify(self)
def delete_node(node_id, purge_data,**kwargs): """ Remove node from DB completely If there are attributes on the node, use purge_data to try to delete the data. If no other resources link to this data, it will be deleted. """
user_id = kwargs.get('user_id') try: node_i = db.DBSession.query(Node).filter(Node.id == node_id).one() except NoResultFound: raise ResourceNotFoundError("Node %s not found"%(node_id)) group_items = db.DBSession.query(ResourceGroupItem).filter( ResourceGroupItem.node_id==node_id).all() for gi in group_items: db.DBSession.delete(gi) if purge_data == 'Y': _purge_datasets_unique_to_resource('NODE', node_id) log.info("Deleting node %s, id=%s", node_i.name, node_id) node_i.network.check_write_permission(user_id) db.DBSession.delete(node_i) db.DBSession.flush() return 'OK'
def get_hw_platform(self, udi): """Return th HW platform information from the device."""
platform = None try: pid = udi['pid'] if pid == '': self.log("Empty PID. Use the hw family from the platform string.") return self.raw_family match = re.search(self.pid2platform_re, pid) if match: platform = match.group(1) except KeyError: pass return platform
def delbr(self, name): """ Set the device down and delete the bridge. """
self.getbr(name) # Check if exists _runshell([ipexe, 'link', 'set', 'dev', name, 'down'], "Could not set link down for %s." % name) _runshell([brctlexe, 'delbr', name], "Could not delete bridge %s." % name)
def delete_node(self, node_name): """ Deletes this node and all edges referencing it. Args: node_name (str): The name of the node to delete. Raises: KeyError: Raised if the node does not exist in the graph. """
graph = self.graph if node_name not in graph: raise KeyError('node %s does not exist' % node_name) graph.pop(node_name) for node, edges in graph.items(): if node_name in edges: edges.remove(node_name)
def from_header(cls, header: BlockHeader, chaindb: BaseChainDB) -> BaseBlock: """ Returns the block denoted by the given block header. """
if header.uncles_hash == EMPTY_UNCLE_HASH: uncles = [] # type: List[BlockHeader] else: uncles = chaindb.get_block_uncles(header.uncles_hash) transactions = chaindb.get_block_transactions(header, cls.get_transaction_class()) return cls( header=header, transactions=transactions, uncles=uncles, )
def patch(): """Patch PynamoDB so it generates subsegements when calling DynamoDB."""
import pynamodb if hasattr(botocore.vendored.requests.sessions, '_xray_enabled'): return setattr(botocore.vendored.requests.sessions, '_xray_enabled', True) wrapt.wrap_function_wrapper( 'botocore.vendored.requests.sessions', 'Session.send', _xray_traced_pynamodb, )
def dvsep(s1, s2): """ Calculate the time derivative of the separation angle between two input states, S1 and S2. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvsep_c.html :param s1: State vector of the first body. :type s1: 6-Element Array of floats :param s2: State vector of the second body. :type s2: 6-Element Array of floats :return: The time derivative of the angular separation between S1 and S2. :rtype: float """
assert len(s1) is 6 and len(s2) is 6 s1 = stypes.toDoubleVector(s1) s2 = stypes.toDoubleVector(s2) return libspice.dvsep_c(s1, s2)
def number_cwt_peaks(x, n): """ This feature calculator searches for different peaks in x. To do so, x is smoothed by a ricker wavelet and for widths ranging from 1 to n. This feature calculator returns the number of peaks that occur at enough width scales and with sufficiently high Signal-to-Noise-Ratio (SNR) :param x: the time series to calculate the feature of :type x: numpy.ndarray :param n: maximum width to consider :type n: int :return: the value of this feature :return type: int """
return len(find_peaks_cwt(vector=x, widths=np.array(list(range(1, n + 1))), wavelet=ricker))
def update_resources_from_dict(self, res, types=None, names=None, languages=None): """ Update or add resources from resource dict. types = a list of resource types to update (None = all) names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all) """
UpdateResourcesFromDict(self.filename, res, types, names, languages)
def work_dir(path): """ Context menager for executing commands in some working directory. Returns to the previous wd when finished. Usage: >>> with work_dir(path): ... subprocess.call('git status') """
starting_directory = os.getcwd() try: os.chdir(path) yield finally: os.chdir(starting_directory)
def facet_by(self, column): """ Faceting creates new TableFu instances with rows matching each possible value. """
faceted_spreadsheets = {} for row in self.rows: if row[column]: col = row[column].value if faceted_spreadsheets.has_key(col): faceted_spreadsheets[col].append(row.cells) else: faceted_spreadsheets[col] = [] faceted_spreadsheets[col].append(row.cells) # create a new TableFu instance for each facet tables = [] for k, v in faceted_spreadsheets.items(): v.insert(0, self.default_columns) table = TableFu(v) table.faceted_on = k table.formatting = self.formatting table.options = self.options tables.append(table) tables.sort(key=lambda t: t.faceted_on) return tables
def get_all_dex(self): """ Return the raw data of all classes dex files :rtype: a generator """
try: yield self.get_file("classes.dex") # Multidex support basename = "classes%d.dex" for i in range(2, sys.maxsize): yield self.get_file(basename % i) except FileNotPresent: pass
def onMessageSeen( self, seen_by=None, thread_id=None, thread_type=ThreadType.USER, seen_ts=None, ts=None, metadata=None, msg=None, ): """ Called when the client is listening, and somebody marks a message as seen :param seen_by: The ID of the person who marked the message as seen :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param seen_ts: A timestamp of when the person saw the message :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType """
log.info( "Messages seen by {} in {} ({}) at {}s".format( seen_by, thread_id, thread_type.name, seen_ts / 1000 ) )
def add_request_ids_from_environment(logger, name, event_dict): """Custom processor adding request IDs to the log event, if available."""
if ENV_APIG_REQUEST_ID in os.environ: event_dict['api_request_id'] = os.environ[ENV_APIG_REQUEST_ID] if ENV_LAMBDA_REQUEST_ID in os.environ: event_dict['lambda_request_id'] = os.environ[ENV_LAMBDA_REQUEST_ID] return event_dict
def sargasso_chart (self): """ Make the sargasso plot """
# Config for the plot config = { 'id': 'sargasso_assignment_plot', 'title': 'Sargasso: Assigned Reads', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads' } #We only want to plot the READs at the moment return bargraph.plot(self.sargasso_data, [name for name in self.sargasso_keys if 'Reads' in name], config)
def disable_gui(self): """Disable GUI event loop integration. If an application was registered, this sets its ``_in_event_loop`` attribute to False. It then calls :meth:`clear_inputhook`. """
gui = self._current_gui if gui in self.apps: self.apps[gui]._in_event_loop = False return self.clear_inputhook()
def get_lexers(main_lex, exam_lex, tool_lex): """ gets all the lexer wrappers """
if not main_lex: return None, None, None lexer = None if main_lex: if issubclass(main_lex, PromptLex): lexer = main_lex elif issubclass(main_lex, PygLex): lexer = PygmentsLexer(main_lex) if exam_lex: if issubclass(exam_lex, PygLex): exam_lex = PygmentsLexer(exam_lex) if tool_lex: if issubclass(tool_lex, PygLex): tool_lex = PygmentsLexer(tool_lex) return lexer, exam_lex, tool_lex
def battery_reported(self, voltage, rawVoltage): """Battery reported."""
self._update_attribute(BATTERY_PERCENTAGE_REMAINING, voltage) self._update_attribute(self.BATTERY_VOLTAGE_ATTR, int(rawVoltage / 100))
def dump(self, filename): """ Dumps statistics. @param filename: filename where stats will be dumped, filename is created and must not exist prior to this call. @type filename: string """
flags = os.O_WRONLY|os.O_CREAT|os.O_NOFOLLOW|os.O_EXCL fd = os.open(filename, flags, 0o0600) os.write(fd, bytes(self.__str__(), locale.getpreferredencoding())) os.close(fd)
def main(argv): """Program entry point. :param argv: command-line arguments :type argv: :class:`list` """
author_strings = [] for name, email in zip(metadata.authors, metadata.emails): author_strings.append('Author: {0} <{1}>'.format(name, email)) epilog = ''' {project} {version} {authors} URL: <{url}> '''.format( project=metadata.project, version=metadata.version, authors='\n'.join(author_strings), url=metadata.url) arg_parser = argparse.ArgumentParser( prog=argv[0], formatter_class=argparse.RawDescriptionHelpFormatter, description=metadata.description, epilog=epilog) arg_parser.add_argument( '-V', '--version', action='version', version='{0} {1}'.format(metadata.project, metadata.version)) arg_parser.parse_args(args=argv[1:]) print(epilog) return 0
def _to_dict(self): """Return a json dictionary representing this model."""
_dict = {} if hasattr(self, 'deployment') and self.deployment is not None: _dict['deployment'] = self.deployment if hasattr(self, 'user_id') and self.user_id is not None: _dict['user_id'] = self.user_id return _dict
def p_type_def_3(t): """type_def : STRUCT ID struct_body SEMI"""
id = t[2] body = t[3] lineno = t.lineno(1) if id_unique(id, 'struct', lineno): name_dict[id] = struct_info(id, body, lineno)
def _configure_logger(): """Configure the logging module."""
if not app.debug: _configure_logger_for_production(logging.getLogger()) elif not app.testing: _configure_logger_for_debugging(logging.getLogger())
def do_sqlite_connect(dbapi_connection, connection_record): """Ensure SQLite checks foreign key constraints. For further details see "Foreign key support" sections on https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support """
# Enable foreign key constraint checking cursor = dbapi_connection.cursor() cursor.execute('PRAGMA foreign_keys=ON') cursor.close()
def ClearStopTimes(self): """Remove all stop times from this trip. StopTime objects previously returned by GetStopTimes are unchanged but are no longer associated with this trip. """
cursor = self._schedule._connection.cursor() cursor.execute('DELETE FROM stop_times WHERE trip_id=?', (self.trip_id,))
def load_data_file_to_net(self, filename): """ Load Clustergrammer's dat format (saved as JSON). """
inst_dat = self.load_json_to_dict(filename) load_data.load_data_to_net(self, inst_dat)
def code_to_text(self): """Return the text representation of a code cell"""
source = copy(self.source) comment_magic(source, self.language, self.comment_magics) options = [] if self.cell_type == 'code' and self.language: options.append(self.language) filtered_metadata = {key: self.metadata[key] for key in self.metadata if key not in ['active', 'language']} if filtered_metadata: options.append(metadata_to_md_options(filtered_metadata)) return ['```{}'.format(' '.join(options))] + source + ['```']
def image_coarsen(xlevel=0, ylevel=0, image="auto", method='average'): """ This will coarsen the image data by binning each xlevel+1 along the x-axis and each ylevel+1 points along the y-axis type can be 'average', 'min', or 'max' """
if image == "auto": image = _pylab.gca().images[0] Z = _n.array(image.get_array()) # store this image in the undo list global image_undo_list image_undo_list.append([image, Z]) if len(image_undo_list) > 10: image_undo_list.pop(0) # images have transposed data image.set_array(_fun.coarsen_matrix(Z, ylevel, xlevel, method)) # update the plot _pylab.draw()
def entropy_H(self, data): """Calculate the entropy of a chunk of data."""
if len(data) == 0: return 0.0 occurences = array.array('L', [0]*256) for x in data: occurences[ord(x)] += 1 entropy = 0 for x in occurences: if x: p_x = float(x) / len(data) entropy -= p_x*math.log(p_x, 2) return entropy
def delete_experiment(experiment): """Delete an experiment and all its data."""
redis = _get_redis_connection() experiment = Experiment.find(redis, experiment) if experiment: experiment.delete() return redirect(url_for('.index'))
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. - geometry: shapely geometry - zoom: zoom level """
validate_zoom(zoom) if geometry.is_empty: return if not geometry.is_valid: raise ValueError("no valid geometry: %s" % geometry.type) if geometry.geom_type == "Point": yield self.tile_from_xy(geometry.x, geometry.y, zoom) elif geometry.geom_type == "MultiPoint": for point in geometry: yield self.tile_from_xy(point.x, point.y, zoom) elif geometry.geom_type in ( "LineString", "MultiLineString", "Polygon", "MultiPolygon", "GeometryCollection" ): prepared_geometry = prep(clip_geometry_to_srs_bounds(geometry, self)) for tile in self.tiles_from_bbox(geometry, zoom): if prepared_geometry.intersects(tile.bbox()): yield tile
def base_object(self, data): """ Make sure to return all the existing filter fields for query results. """
obj = {'id': data.get(self.id)} if self.parent is not None: obj['$parent'] = data.get(self.parent.id) return obj
def _forward(self, x_dot_parameters): """ Helper to calculate the forward weights. """
return forward(self._lattice, x_dot_parameters, self.state_machine.n_states)
def hash(self): """ :rtype: int :return: hash of the container """
hashed = super(Repeat, self).hash() return khash(hashed, self._min_times, self._max_times, self._step, self._repeats)
def any_unique(keys, axis=semantics.axis_default): """returns true if any of the keys is unique"""
index = as_index(keys, axis) return np.any(index.count == 1)
def _unique_resource_identifier_from_kwargs(**kwargs): """Chooses an identifier given different choices The unique identifier in BIG-IP's REST API at the time of this writing is called 'name'. This is in contrast to the unique identifier that is used by iWorkflow and BIG-IQ which at some times is 'name' and other times is 'uuid'. For example, in iWorkflow, there consider this URI * https://10.2.2.3/mgmt/cm/cloud/tenants/{0}/services/iapp Then consider this iWorkflow URI * https://localhost/mgmt/cm/cloud/connectors/local/{0} In the first example, the identifier, {0}, is what we would normally consider a name. For example, "tenant1". In the second example though, the value is expected to be what we would normally consider to be a UUID. For example, '244bd478-374e-4eb2-8c73-6e46d7112604'. This method only tries to rectify the problem of which to use. I believe there might be some change that the two can appear together, although I have not yet experienced it. If it is possible, I believe it would happen in BIG-IQ/iWorkflow land where the UUID and Name both have significance. That's why I deliberately prefer the UUID when it exists in the parameters sent to the URL. :param kwargs: :return: """
name = kwargs.pop('name', '') uuid = kwargs.pop('uuid', '') id = kwargs.pop('id', '') if uuid: return uuid, kwargs elif id: # Used for /mgmt/cm/system/authn/providers/tmos on BIG-IP return id, kwargs else: return name, kwargs
def add_raw_code(self, string_or_list): """Add raw Gmsh code. """
if _is_string(string_or_list): self._GMSH_CODE.append(string_or_list) else: assert isinstance(string_or_list, list) for string in string_or_list: self._GMSH_CODE.append(string) return
def _read_centroid_from_ndk_string(self, ndk_string, hypocentre): """ Reads the centroid data from the ndk string to return an instance of the GCMTCentroid class :param str ndk_string: String of data (line 3 of ndk format) :param hypocentre: Instance of the GCMTHypocentre class """
centroid = GCMTCentroid(hypocentre.date, hypocentre.time) data = ndk_string[:58].split() centroid.centroid_type = data[0].rstrip(':') data = [float(x) for x in data[1:]] time_diff = data[0] if fabs(time_diff) > 1E-6: centroid._get_centroid_time(time_diff) centroid.time_error = data[1] centroid.latitude = data[2] centroid.latitude_error = data[3] centroid.longitude = data[4] centroid.longitude_error = data[5] centroid.depth = data[6] centroid.depth_error = data[7] centroid.depth_type = ndk_string[59:63] centroid.centroid_id = ndk_string[64:] return centroid
def run_step(context): """Set hierarchy into context with substitutions if it doesn't exist yet. context is a dictionary or dictionary-like. context['defaults'] must exist. It's a dictionary. Will iterate context['defaults'] and add these as new values where their keys don't already exist. While it's doing so, it will leave all other values in the existing hierarchy untouched. List merging is purely additive, with no checks for uniqueness or already existing list items. E.g context [0,1,2] with contextMerge=[2,3,4] will result in [0,1,2,2,3,4] Keep this in mind especially where complex types like dicts nest inside a list - a merge will always add a new dict list item, not merge it into whatever dicts might exist on the list already. For example, say input context is: key1: value1 key2: value2 key3: k31: value31 k32: value32 defaults: key2: 'aaa_{key1}_zzz' key3: k33: value33 key4: 'bbb_{key2}_yyy' This will result in return context: key1: value1 key2: value2 key3: k31: value31 k32: value32 k33: value33 key4: bbb_value2_yyy """
logger.debug("started") context.assert_key_has_value(key='defaults', caller=__name__) context.set_defaults(context['defaults']) logger.info(f"set {len(context['defaults'])} context item defaults.") logger.debug("done")
def timeid(self, data: ['SASdata', str] = None, by: str = None, id: str = None, out: [str, 'SASdata'] = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the TIMEID procedure Documentation link: http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeid_syntax.htm :param data: SASdata object or string. This parameter is required. :parm by: The by variable can only be a string type. :parm id: The id variable can only be a string type. :parm out: The out variable can be a string or SASdata type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
def goto_line(self, line_number): """Go to specified line number in current active editor."""
if line_number: line_number = int(line_number) try: self.plugin.go_to_line(line_number) except AttributeError: pass
def link(self, href, **kwargs): """Retuns a new link relative to this resource."""
return link.Link(dict(href=href, **kwargs), self.base_uri)
def putcolslice(self, columnname, value, blc, trc, inc=[], startrow=0, nrow=-1, rowincr=1): """Put into a slice in a table column holding arrays. Its arguments are the same as for getcolslice and putcellslice. """
self._putcolslice(columnname, value, blc, trc, inc, startrow, nrow, rowincr)
def guess_type(s): """ attempt to convert string value into numeric type """
sc = s.replace(',', '') # remove comma from potential numbers try: return int(sc) except ValueError: pass try: return float(sc) except ValueError: pass return s
def writeObjectReference(self, obj, output): """Tries to write an object reference, adding it to the references table. Does not write the actual object bytes or set the reference position. Returns a tuple of whether the object was a new reference (True if it was, False if it already was in the reference table) and the new output. """
position = self.positionOfObjectReference(obj) if position is None: self.writtenReferences[obj] = len(self.writtenReferences) output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize) return (True, output) else: output += self.binaryInt(position, byteSize=self.trailer.objectRefSize) return (False, output)
def simulate_roi(self, name=None, randomize=True, restore=False): """Generate a simulation of the ROI using the current best-fit model and replace the data counts cube with this simulation. The simulation is created by generating an array of Poisson random numbers with expectation values drawn from the model cube of the binned analysis instance. This function will update the counts cube both in memory and in the source map file. The counts cube can be restored to its original state by calling this method with ``restore`` = True. Parameters ---------- name : str Name of the model component to be simulated. If None then the whole ROI will be simulated. restore : bool Restore the data counts cube to its original state. """
self.logger.info('Simulating ROI') self._fitcache = None if restore: self.logger.info('Restoring') self._restore_counts_maps() self.logger.info('Finished') return for c in self.components: c.simulate_roi(name=name, clear=True, randomize=randomize) if hasattr(self.like.components[0].logLike, 'setCountsMap'): self._init_roi_model() else: self.write_xml('tmp') self._like = SummedLikelihood() for i, c in enumerate(self._components): c._create_binned_analysis('tmp.xml') self._like.addComponent(c.like) self._init_roi_model() self.load_xml('tmp') self.logger.info('Finished')
def setRecordState( self, recordState ): """ Sets the record state for this item to the inputed state. :param recordState | <XOrbRecordItem.State> """
self._recordState = recordState try: is_colored = self.treeWidget().isColored() except AttributeError: return if not is_colored: return # determine the color for the item based on the state if recordState & XOrbRecordItem.State.Removed: clr = self.treeWidget().colorSet().color('RecordRemoved') elif recordState & XOrbRecordItem.State.New: clr = self.treeWidget().colorSet().color('RecordNew') elif recordState & XOrbRecordItem.State.Modified: clr = self.treeWidget().colorSet().color('RecordModified') else: clr = None # set the color based on the record state if clr is not None: clr = QColor(clr) clr.setAlpha(40) brush = QBrush(clr) else: brush = QBrush() for c in range(self.treeWidget().columnCount()): self.setBackground(c, brush)
def blocksplit_dtrajs(dtrajs, lag=1, sliding=True, shift=None): """ Splits the discrete trajectories into approximately uncorrelated fragments Will split trajectories into fragments of lengths lag or longer. These fragments are overlapping in order to conserve the transition counts at given lag. If sliding=True, the resulting trajectories will lead to exactly the same count matrix as when counted from dtrajs. If sliding=False (sampling at lag), the count matrices are only equal when also setting shift=0. Parameters ---------- dtrajs : list of ndarray(int) Discrete trajectories lag : int Lag time at which counting will be done. If sh sliding : bool True for splitting trajectories for sliding count, False if lag-sampling will be applied shift : None or int Start of first full tau-window. If None, shift will be randomly generated """
dtrajs_new = [] for dtraj in dtrajs: if len(dtraj) <= lag: continue if shift is None: s = np.random.randint(min(lag, dtraj.size-lag)) else: s = shift if sliding: if s > 0: dtrajs_new.append(dtraj[0:lag+s]) for t0 in range(s, dtraj.size-lag, lag): dtrajs_new.append(dtraj[t0:t0+2*lag]) else: for t0 in range(s, dtraj.size-lag, lag): dtrajs_new.append(dtraj[t0:t0+lag+1]) return dtrajs_new