prompt
stringlengths
23
36.3k
response
stringlengths
0
1.55k
def isempty(path): """Check if a path has zero length (also true if it's a directory) :param path: A string for the path. This should not have any wildcards. :returns: True if the path has zero length, False otherwise. """
cmd = "hadoop fs -test -z %s" p = _hadoop_fs_command(cmd % (path)) p.communicate() rcode = p.returncode return bool(int(rcode == 0))
def _expand_dataset_packages(dataset_label_dict): """Returns list of possible packages contained in dataset, in case the dataset is multi dataset, eg. 'lisa'. In case the param is not pointing to multidataset returns only that label in a list. :param str dataset_label_dict: label of multi dataset :return: list of labels """
new_dataset_label_dict = [] for label in dataset_label_dict: dataset_metadata = data_urls[label] if type(dataset_metadata) == dict and "package" in dataset_metadata: new_dataset_label_dict.extend(dataset_metadata["package"]) else: new_dataset_label_dict.append(label) return new_dataset_label_dict
def cleanup_request(request): """ Removes `oauth_` keys from various dicts on the request object, and returns the sanitized version. """
for method_type in ('GET', 'PUT', 'POST', 'DELETE'): block = getattr(request, method_type, { }) if True in [ k.startswith("oauth_") for k in block.keys() ]: sanitized = block.copy() for k in sanitized.keys(): if k.startswith("oauth_"): sanitized.pop(k) setattr(request, method_type, sanitized) return request
def _get_first_all_link_record(self): """Request first ALL-Link record."""
_LOGGER.debug("Starting: _get_first_all_link_record") _LOGGER.info('Requesting ALL-Link Records') if self.aldb.status == ALDBStatus.LOADED: self._next_all_link_rec_nak_retries = 3 self._handle_get_next_all_link_record_nak(None) return self.aldb.clear() self._next_all_link_rec_nak_retries = 0 msg = GetFirstAllLinkRecord() self.send_msg(msg, wait_nak=True, wait_timeout=.5) _LOGGER.debug("Ending: _get_first_all_link_record")
def _print_value(self): """Generates the table values."""
for line in range(self.Lines_num): for col, length in zip(self.Table, self.AttributesLength): vals = list(col.values())[0] val = vals[line] if len(vals) != 0 and line < len(vals) else '' self.StrTable += "| " self.StrTable += self._pad_string(val, length * 2) self.StrTable += "|" + '\n' self._print_divide()
def get_placeholder_field_names(self): """ Returns a list with the names of all PlaceholderFields. """
return [field.name for field in self._meta.fields if field.get_internal_type() == 'PlaceholderField']
def group_memberships(self, user, include=None): """ Retrieve the group memberships for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """
return self._query_zendesk(self.endpoint.group_memberships, 'group_membership', id=user, include=include)
def _ _init__(self, driverName=None, debug=False): """ Constructs a new TTS engine instance. @param driverName: Name of the platform specific driver to use. If None, selects the default driver for the operating system. @type: str @param debug: Debugging output enabled or not @type debug: bool """
self.proxy = driver.DriverProxy(weakref.proxy(self), driverName, debug) # initialize other vars self._connects = {} self._inLoop = False self._driverLoop = True self._debug = debug
def binary_search(a, k): """ Do a binary search in an array of objects ordered by '.key' returns the largest index for which: a[i].key <= k like c++: a.upperbound(k)-- """
first, last = 0, len(a) while first < last: mid = (first + last) >> 1 if k < a[mid].key: last = mid else: first = mid + 1 return first - 1
def _validate(self, all_valid_addresses): """Validate that all of the dependencies in the graph exist in the given addresses set."""
for dependency, dependents in iteritems(self._dependent_address_map): if dependency not in all_valid_addresses: raise AddressLookupError( 'Dependent graph construction failed: {} did not exist. Was depended on by:\n {}'.format( dependency.spec, '\n '.join(d.spec for d in dependents) ) )
def _get_patches(installed_only=False, root=None): """ List all known patches in repos. """
patches = {} for element in __zypper__(root=root).nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'): installed = element.getAttribute('status') == 'installed' if (installed_only and installed) or not installed_only: patches[element.getAttribute('name')] = { 'installed': installed, 'summary': element.getAttribute('summary'), } return patches
def diags2(symmat): """ Diagonalize a symmetric 2x2 matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/diags2_c.html :param symmat: A symmetric 2x2 matrix. :type symmat: 2x2-Element Array of floats :return: A diagonal matrix similar to symmat, A rotation used as the similarity transformation. :rtype: tuple """
symmat = stypes.toDoubleMatrix(symmat) diag = stypes.emptyDoubleMatrix(x=2, y=2) rotateout = stypes.emptyDoubleMatrix(x=2, y=2) libspice.diags2_c(symmat, diag, rotateout) return stypes.cMatrixToNumpy(diag), stypes.cMatrixToNumpy(rotateout)
def delete(self, path, data, **options): """ Parses DELETE request options and dispatches a request """
data, options = self._update_request(data, options) return self.request('delete', path, data=data, **options)
def _is_converged(self): """Determine if calculation converged; for a relaxation (static) run we look for ionic (electronic) convergence in the output"""
if self.is_relaxed(): # relaxation run case return self._get_line(['End of', 'Geometry Optimization'], self.outputf, return_string=False) else: # static run case return self._get_line('convergence has been achieved', self.outputf, return_string=False)
def get_nested_attribute(obj, attribute): """ Returns the value of the given (possibly dotted) attribute for the given object. If any of the parents on the nested attribute's name path are `None`, the value of the nested attribute is also assumed as `None`. :raises AttributeError: If any attribute access along the attribute path fails with an `AttributeError`. """
parent, attr = resolve_nested_attribute(obj, attribute) if not parent is None: attr_value = getattr(parent, attr) else: attr_value = None return attr_value
def peek(self, fmt): """Interpret next bits according to format string and return result. fmt -- Token string describing how to interpret the next bits. The position in the bitstring is not changed. If not enough bits are available then all bits to the end of the bitstring will be used. Raises ReadError if not enough bits are available. Raises ValueError if the format is not understood. See the docstring for 'read' for token examples. """
pos_before = self._pos value = self.read(fmt) self._pos = pos_before return value
def lpr_kurtosis(frame, lpcorder=10): """ frame: windowed signal return kurtosis of linear prediction residual from input signal """
c = lpc(frame, lpcorder) coef = c[1:][::-1] * -1 residuals = [] for i in xrange(frame.size - 10): residuals.append(frame[i + 10] - np.sum(frame[i:i + 10] * coef)) residuals = np.array(residuals) return calc_kurtosis(residuals)
def contains_key(self, key): """ Determines whether this multimap contains an entry with the key. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the specified key. :return: (bool), ``true`` if this multimap contains an entry for the specified key. """
check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(multi_map_contains_key_codec, key_data, key=key_data, thread_id=thread_id())
def list_mapping(html_cleaned): """将预处理后的网页文档映射成列表和字典,并提取虚假标题 Keyword arguments: html_cleaned -- 预处理后的网页源代码,字符串类型 Return: unit_raw -- 网页文本行 init_dict -- 字典的key是索引,value是网页文本行,并按照网页文本行长度降序排序 fake_title -- 虚假标题,即网页源代码<title>中的文本行 """
unit_raw = html_cleaned.split('\n') for i in unit_raw: c = CDM(i) if c.PTN is not 0: fake_title = i break init_list = [] init_dict = {} for i in unit_raw: init_list.append(len(i)) for i in range(0, len(init_list)): init_dict[i] = init_list[i] init_dict = sorted(init_dict.items(), key=lambda item: item[1], reverse=True) try: log('debug', '映射成功,提取的虚假标题为:【{}】'.format(fake_title)) except UnboundLocalError: fake_title = '' log('err', '虚假标题提取失败') return unit_raw, init_dict, fake_title
def objectatrib(instance, atrib): """ this filter is going to be useful to execute an object method or get an object attribute dynamically. this method is going to take into account the atrib param can contains underscores """
atrib = atrib.replace("__", ".") atribs = [] atribs = atrib.split(".") obj = instance for atrib in atribs: if type(obj) == dict: result = obj[atrib] else: try: result = getattr(obj, atrib)() except Exception: result = getattr(obj, atrib) obj = result return result
def queryset(self, request): """ Exclude replies from listing since they are displayed inline as part of listing. For proxy models with cls apptribute limit comments to those classified as cls. """
qs = super(CommentAdmin, self).queryset(request) qs = qs.filter( Q(user__is_staff=False) | Q(user__isnull=True), is_removed=False ) cls = getattr(self, 'cls', None) if cls: qs = qs.filter(classifiedcomment__cls=self.cls) return qs.select_related('user', 'content_type')
def safe_listget(list_, index, default='?'): """ depricate """
if index >= len(list_): return default ret = list_[index] if ret is None: return default return ret
def post_flag_list(self, creator_id=None, creator_name=None, post_id=None, reason_matches=None, is_resolved=None, category=None): """Function to flag a post (Requires login). Parameters: creator_id (int): The user id of the flag's creator. creator_name (str): The name of the flag's creator. post_id (int): The post id if the flag. """
params = { 'search[creator_id]': creator_id, 'search[creator_name]': creator_name, 'search[post_id]': post_id, } return self._get('post_flags.json', params, auth=True)
def has_name_version(self, name: str, version: str) -> bool: """Check if there exists a network with the name/version combination in the database."""
return self.session.query(exists().where(and_(Network.name == name, Network.version == version))).scalar()
def validate_lengths(n_samples, lengths): """Validate lengths array against n_samples. Parameters ---------- n_samples : integer Total number of samples. lengths : array-like of integers, shape (n_sequences,), optional Lengths of individual sequences in the input. Returns ------- start : array of integers, shape (n_sequences,) Start indices of sequences. end : array of integers, shape (n_sequences,) One-past-the-end indices of sequences. """
if lengths is None: lengths = [n_samples] lengths = np.asarray(lengths, dtype=np.int32) if lengths.sum() > n_samples: msg = "More than {0:d} samples in lengths array {1!s}" raise ValueError(msg.format(n_samples, lengths)) end = np.cumsum(lengths) start = end - lengths return start, end
def ask_for_board_id(self): """Factored out in case interface isn't keyboard"""
board_id = raw_input("paste in board id or url: ").strip() m = re.search(r"(?:https?://)?(?:trello.com)?/?b?/?([a-zA-Z]{8})/(?:.*)", board_id) if m: board_id = m.group(1) return board_id
def _write_config_file(batch_id, caller_names, base_dir, data): """Write YAML configuration to generate an ensemble set of combined calls. """
config_dir = utils.safe_makedir(os.path.join(base_dir, "config")) config_file = os.path.join(config_dir, "{0}-ensemble.yaml".format(batch_id)) algorithm = data["config"]["algorithm"] econfig = {"ensemble": algorithm["ensemble"], "names": caller_names, "prep-inputs": False} intervals = validate.get_analysis_intervals(data, None, base_dir) if intervals: econfig["intervals"] = os.path.abspath(intervals) with open(config_file, "w") as out_handle: yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False) return config_file
def auto_cleaned_path_stripped_uuid4(instance, filename: str) -> str: """ Gets upload path in this format: {MODEL_NAME}/{UUID4}{SUFFIX}. Same as `upload_path_uuid4` but deletes the original file name from the user. :param instance: Instance of model or model class. :param filename: Uploaded file name. :return: Target upload path. """
_, suffix = parse_filename(filename) base_dir = get_base_dir_from_object(instance) rand_uuid = uuid.uuid4() return os.path.join(base_dir, "{rand_uuid}{suffix}".format(rand_uuid=rand_uuid, suffix=suffix))
def load_tmy3_hourly_temp_data( self, start, end, read_from_cache=True, write_to_cache=True ): """ Load hourly TMY3 temperature data from start date to end date (inclusive). This is the primary convenience method for loading hourly TMY3 temperature data. Parameters ---------- start : datetime.datetime The earliest date from which to load data. end : datetime.datetime The latest date until which to load data. read_from_cache : bool Whether or not to load data from cache. write_to_cache : bool Whether or not to write newly loaded data to cache. """
return load_tmy3_hourly_temp_data( self.usaf_id, start, end, read_from_cache=read_from_cache, write_to_cache=write_to_cache, )
def from_text(text): """Convert text into a DNS rdata class value. @param text: the text @type text: string @rtype: int @raises dns.rdataclass.UnknownRdataClass: the class is unknown @raises ValueError: the rdata class value is not >= 0 and <= 65535 """
value = _by_text.get(text.upper()) if value is None: match = _unknown_class_pattern.match(text) if match == None: raise UnknownRdataclass value = int(match.group(1)) if value < 0 or value > 65535: raise ValueError("class must be between >= 0 and <= 65535") return value
def ProcessClientResourcesStats(self, client_id, status): """Process status message from a client and update the stats. Args: client_id: Client id. status: The status object returned from the client. """
if hasattr(status, "child_session_id"): flow_path = status.child_session_id else: flow_path = "aff4:/%s/flows/%s" % (status.client_id, status.flow_id) resources = rdf_client_stats.ClientResources() resources.client_id = client_id resources.session_id = flow_path resources.cpu_usage.user_cpu_time = status.cpu_time_used.user_cpu_time resources.cpu_usage.system_cpu_time = status.cpu_time_used.system_cpu_time resources.network_bytes_sent = status.network_bytes_sent self.context.usage_stats.RegisterResources(resources)
def from_networkx(cls, graph, weight='weight'): r"""Import a graph from NetworkX. Edge weights are retrieved as an edge attribute, under the name specified by the ``weight`` parameter. Signals are retrieved from node attributes, and stored in the :attr:`signals` dictionary under the attribute name. `N`-dimensional signals that were broken during export are joined. Parameters ---------- graph : :class:`networkx.Graph` A NetworkX graph object. weight : string or None, optional The edge attribute that holds the numerical values used as the edge weights. All edge weights are set to 1 if None, or not found. Returns ------- graph : :class:`~pygsp.graphs.Graph` A PyGSP graph object. Notes ----- The nodes are ordered according to :meth:`networkx.Graph.nodes`. In NetworkX, node attributes need not be set for every node. If a node attribute is not set for a node, a NaN is assigned to the corresponding signal for that node. If the graph is a :class:`networkx.MultiGraph`, multiedges are aggregated by summation. See Also -------- from_graphtool : import from graph-tool load : load from a file Examples -------- >>> import networkx as nx >>> graph = nx.Graph() >>> graph.add_edge(1, 2, weight=0.2) >>> graph.add_edge(2, 3, weight=0.9) >>> graph.add_node(4, sig=3.1416) >>> graph.nodes() NodeView((1, 2, 3, 4)) >>> graph = graphs.Graph.from_networkx(graph) >>> graph.W.toarray() array([[0. , 0.2, 0. , 0. ], [0.2, 0. , 0.9, 0. ], [0. , 0.9, 0. , 0. ], [0. , 0. , 0. , 0. ]]) >>> graph.signals {'sig': array([ nan, nan, nan, 3.1416])} """
nx = _import_networkx() from .graph import Graph adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight) graph_pg = Graph(adjacency) for i, node in enumerate(graph.nodes()): for name in graph.nodes[node].keys(): try: signal = graph_pg.signals[name] except KeyError: signal = np.full(graph_pg.n_vertices, np.nan) graph_pg.set_signal(signal, name) try: signal[i] = graph.nodes[node][name] except KeyError: pass # attribute not set for node graph_pg._join_signals() return graph_pg
def create_url(self, db="", user="genome", host="genome-mysql.cse.ucsc.edu", password="", dialect="mysqldb"): """ internal: create a dburl from a set of parameters or the defaults on this object """
if os.path.exists(db): db = "sqlite:///" + db # Is this a DB URL? If so, use it directly if self.db_regex.match(db): self.db = self.url = db self.dburl = db self.user = self.host = self.password = "" else: self.db = db if user == "genome" and host != "genome-mysql.cse.ucsc.edu": import getpass user = getpass.getuser() self.host = host self.user = user self.password = (":" + password) if password else "" self.dburl = self.url.format(db=self.db, user=self.user, host=self.host, password=self.password, dialect=dialect)
def create_cas_login_url(cas_url, cas_route, service, renew=None, gateway=None): """ Create a CAS login URL . Keyword arguments: cas_url -- The url to the CAS (ex. http://sso.pdx.edu) cas_route -- The route where the CAS lives on server (ex. /cas) service -- (ex. http://localhost:5000/login) renew -- "true" or "false" gateway -- "true" or "false" Example usage: >>> create_cas_login_url( ... 'http://sso.pdx.edu', ... '/cas', ... 'http://localhost:5000', ... ) 'http://sso.pdx.edu/cas?service=http%3A%2F%2Flocalhost%3A5000' """
return create_url( cas_url, cas_route, ('service', service), ('renew', renew), ('gateway', gateway), )
def simxSetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode): """ Please have a look at the function description/documentation in the V-REP user manual """
return c_SetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode)
def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration): # Return the charge from calibration """ Interpolatet the TDC calibration for each pixel from 0 to max_tdc"""
charge_calibration = np.zeros(shape=(80, 336, max_tdc)) for column in range(80): for row in range(336): actual_pixel_calibration = tdc_pixel_calibration[column, row, :] if np.any(actual_pixel_calibration != 0) and np.any(np.isfinite(actual_pixel_calibration)): selected_measurements = np.isfinite(actual_pixel_calibration) # Select valid calibration steps selected_actual_pixel_calibration = actual_pixel_calibration[selected_measurements] selected_tdc_calibration_values = tdc_calibration_values[selected_measurements] interpolation = interp1d(x=selected_actual_pixel_calibration, y=selected_tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0) charge_calibration[column, row, :] = interpolation(np.arange(max_tdc)) return charge_calibration
def _validate_configuration_type(self, configuration_type): """Validate configuration type :param configuration_type: configuration_type, should be Startup or Running :raise Exception: """
if configuration_type.lower() != 'running' and configuration_type.lower() != 'startup': raise Exception(self.__class__.__name__, 'Configuration Type is invalid. Should be startup or running')
def convert_formula_to_atomic_fractions(formula): """ Converts a chemical formula to an atomic fraction :class:`dict`. Args: formula (str): chemical formula, like Al2O3. No wildcard are accepted. """
mole_fractions = {} total_mole_fraction = 0.0 for match in CHEMICAL_FORMULA_PATTERN.finditer(formula): symbol, mole_fraction = match.groups() z = pyxray.element_atomic_number(symbol.strip()) if mole_fraction == '': mole_fraction = 1.0 mole_fraction = float(mole_fraction) mole_fraction = float(mole_fraction) mole_fractions[z] = mole_fraction total_mole_fraction += mole_fraction # Calculate atomic fractions atomic_fractions = {} for z, mole_fraction in mole_fractions.items(): atomic_fractions[z] = mole_fraction / total_mole_fraction return atomic_fractions
def posix_to_dt_str(posix): """Reverse of str_to_datetime. This is used by GCS stub to generate GET bucket XML response. Args: posix: A float of secs from unix epoch. Returns: A datetime str. """
dt = datetime.datetime.utcfromtimestamp(posix) dt_str = dt.strftime(_DT_FORMAT) return dt_str + '.000Z'
def get_work_kind(self): """ We'll have a kind_slug like 'movies'. We need to translate that into a work `kind` like 'movie'. """
slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()} return slugs_to_kinds.get(self.kind_slug, None)
def on_release(self, window, key, scancode, action, mods): """ Key handler for key releases. """
# controls for grasping if key == glfw.KEY_SPACE: self.grasp = not self.grasp # toggle gripper # user-commanded reset elif key == glfw.KEY_Q: self._reset_state = 1 self._enabled = False self._reset_internal_state()
def _get_count_pagination(self, base, oldest_neighbor, newest_neighbor): """ Compute the pagination for count-based views """
count = self.spec['count'] out_spec = {**base, 'count': count, 'order': self._order_by} if self._order_by == 'newest': older_view = View({**out_spec, 'last': oldest_neighbor}) if oldest_neighbor else None newer_count = View({**base, 'first': newest_neighbor, 'order': 'oldest', 'count': count}) if newest_neighbor else None newer_view = View({**out_spec, 'last': newer_count.last}) if newer_count else None return older_view, newer_view if self._order_by == 'oldest': older_count = View({**base, 'last': oldest_neighbor, 'order': 'newest', 'count': count}) if oldest_neighbor else None older_view = View({**out_spec, 'first': older_count.last}) if older_count else None newer_view = View({**out_spec, 'first': newest_neighbor}) if newest_neighbor else None return older_view, newer_view return None, None
def remove_die(self, die): """Remove ``Die`` (first matching) from Roll. :param die: Die instance """
if die in self._dice: self._dice.remove(die)
def get_classes(self): """ Returns all Java Classes from the DEX objects as an array of DEX files. """
for idx, digest in enumerate(self.analyzed_vms): dx = self.analyzed_vms[digest] for vm in dx.vms: filename = self.analyzed_digest[digest] yield idx, filename, digest, vm.get_classes()
def restart(self, timeout=300, config_callback=None): """Restart each member of the replica set."""
for member_id in self.server_map: host = self.server_map[member_id] server_id = self._servers.host_to_server_id(host) server = self._servers._storage[server_id] server.restart(timeout, config_callback) self.waiting_member_state()
def register_shape_calculator(operator_name, calculator_function, overwrite=False): """ :param operator_name: A unique operator ID. It is usually a string but you can use a type as well :param calculator_function: A callable object :param overwrite: By default, we raise an exception if the caller of this function is trying to assign an existing key (i.e., operator_name) a new value (i.e., calculator_function). Set this flag to True to enable overwriting. """
if not overwrite and operator_name in _shape_calculator_pool: raise ValueError('We do not overwrite registrated shape calculator by default') _shape_calculator_pool[operator_name] = calculator_function
def list_rooms(self, message): """what are the rooms?: List all the rooms I know about."""
context = {"rooms": self.available_rooms.values(), } self.say(rendered_template("rooms.html", context), message=message, html=True)
def decision_function(self, pairs): """Returns the decision function used to classify the pairs. Returns the opposite of the learned metric value between samples in every pair, to be consistent with scikit-learn conventions. Hence it should ideally be low for dissimilar samples and high for similar samples. This is the decision function that is used to classify pairs as similar (+1), or dissimilar (-1). Parameters ---------- pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2) 3D Array of pairs to predict, with each row corresponding to two points, or 2D array of indices of pairs if the metric learner uses a preprocessor. Returns ------- y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,) The predicted decision function value for each pair. """
pairs = check_input(pairs, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=self._tuple_size) return - self.score_pairs(pairs)
def secretfile_args(parser): """Add Secretfile management command line arguments to parser"""
parser.add_argument('--secrets', dest='secrets', help='Path where secrets are stored', default=os.path.join(os.getcwd(), ".secrets")) parser.add_argument('--policies', dest='policies', help='Path where policies are stored', default=os.path.join(os.getcwd(), "vault", "")) parser.add_argument('--secretfile', dest='secretfile', help='Secretfile to use', default=os.path.join(os.getcwd(), "Secretfile")) parser.add_argument('--tags', dest='tags', help='Tags of things to seed', default=[], type=str, action='append') parser.add_argument('--include', dest='include', help='Specify paths to include', default=[], type=str, action='append') parser.add_argument('--exclude', dest='exclude', help='Specify paths to exclude', default=[], type=str, action='append')
def match(self, name): """ Returns True if name matches one of the patterns. """
for pat in self.pats: if fnmatch.fnmatch(name, pat): return True return False
def data(self, data): """Use msgpack's streaming feed feature to build up a set of lists. The lists should then contain the messagepack-rpc specified items. This should be outrageously fast. """
self.unpacker.feed(data) for msg in self.unpacker: self.handlers[msg[0]](*msg)
def weather_at_places_in_bbox(self, lon_left, lat_bottom, lon_right, lat_top, zoom=10, cluster=False): """ Queries the OWM Weather API for the weather currently observed by meteostations inside the bounding box of latitude/longitude coords. :param lat_top: latitude for top margin of bounding box, must be between -90.0 and 90.0 :type lat_top: int/float :param lon_left: longitude for left margin of bounding box must be between -180.0 and 180.0 :type lon_left: int/float :param lat_bottom: latitude for the bottom margin of bounding box, must be between -90.0 and 90.0 :type lat_bottom: int/float :param lon_right: longitude for the right margin of bounding box, must be between -180.0 and 180.0 :type lon_right: int/float :param zoom: zoom level (defaults to: 10) :type zoom: int :param cluster: use server clustering of points :type cluster: bool :returns: a list of *Observation* objects or ``None`` if no weather data is available :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* when coordinates values are out of bounds or negative values are provided for limit """
geo.assert_is_lon(lon_left) geo.assert_is_lon(lon_right) geo.assert_is_lat(lat_bottom) geo.assert_is_lat(lat_top) assert type(zoom) is int, "'zoom' must be an int" if zoom <= 0: raise ValueError("'zoom' must greater than zero") assert type(cluster) is bool, "'cluster' must be a bool" params = {'bbox': ','.join([str(lon_left), str(lat_bottom), str(lon_right), str(lat_top), str(zoom)]), 'cluster': 'yes' if cluster else 'no'} uri = http_client.HttpClient.to_url(BBOX_CITY_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) return self._parsers['observation_list'].parse_JSON(json_data)
def OauthAuthorizeApplication(self, oauth_duration = 'hour'): """ Authorize an application using oauth. If this function returns True, the obtained oauth token can be retrieved using getResponse and will be in url-parameters format. TODO: allow the option to ask the user himself for permission, instead of doing this automatically. Especially important for web applications. @param oauth_duration (string) (optional) -'hour', 'day', 'week', 'year', 'forever' @return (boolean) - Boolean indicating whether OauthAuthorizeApplication was successful """
if self.__session_id__ == '': self.__error__ = "not logged in" return False # automatically get authorization for the application parameters = {'oauth_token':self.__oauth_token__.key, 'tok_expir':self.__OauthGetTokExpir__(oauth_duration), 'action':'ALLOW', 'session_id':self.__session_id__} if self.__SenseApiCall__('/oauth/provider_authorize', 'POST', parameters = parameters): if self.__status__ == 302: response = urlparse.parse_qs(urlparse.urlparse(self.__headers__['location'])[4]) verifier = response['oauth_verifier'][0] self.__oauth_token__.set_verifier(verifier) return True else: self.__setAuthenticationMethod__('session_id') self.__error__ = "error authorizing application" return False else: self.__setAuthenticationMethod__('session_id') self.__error__ = "error authorizing application" return False
def close(self): """Close the stream. If *autoclose* was passed to the constructor then the underlying transport will be closed as well. """
if self._closed: return if self._autoclose: self._transport.close() self._transport._closed.wait() self._transport = None self._closed = True
def _analyze_function(self): """ Go over the variable information in variable manager for this function, and return all uninitialized register/stack variables. :return: """
if not self._function.is_simprocedure \ and not self._function.is_plt \ and not self._variable_manager.has_function_manager(self._function.addr): l.warning("Please run variable recovery on %s before analyzing its calling conventions.", repr(self._function)) return None vm = self._variable_manager[self._function.addr] input_variables = vm.input_variables() input_args = self._args_from_vars(input_variables) # TODO: properly decide sp_delta sp_delta = self.project.arch.bytes if self.project.arch.call_pushes_ret else 0 cc = SimCC.find_cc(self.project.arch, list(input_args), sp_delta) if cc is None: l.warning('_analyze_function(): Cannot find a calling convention that fits the given arguments.') return cc
def copy(self): """Return a clone of this retry manager"""
return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff, max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func, deadline=self.deadline, retry_exceptions=self.retry_exceptions)
def run(self, s): """Split string ``s`` at delimiter, correctly interpreting quotes Further, interprets arrays wrapped in one level of ``[]``. No recursive brackets are interpreted (as this would make the grammar non-regular and currently this complexity is not needed). Currently, quoting inside of braces is not supported either. This is just to support the example from VCF v4.3. """
begins, ends = [0], [] # transition table DISPATCH = { self.NORMAL: self._handle_normal, self.QUOTED: self._handle_quoted, self.ARRAY: self._handle_array, self.DELIM: self._handle_delim, self.ESCAPED: self._handle_escaped, } # run state automaton state = self.NORMAL for pos, c in enumerate(s): state = DISPATCH[state](c, pos, begins, ends) ends.append(len(s)) assert len(begins) == len(ends) # Build resulting list return [s[start:end] for start, end in zip(begins, ends)]
def __updateNavButtons(self): """ Updates the navigation buttons that might be on the device screen. """
navButtons = None for v in self.views: if v.getId() == 'com.android.systemui:id/nav_buttons': navButtons = v break if navButtons: self.navBack = self.findViewById('com.android.systemui:id/back', navButtons) self.navHome = self.findViewById('com.android.systemui:id/home', navButtons) self.navRecentApps = self.findViewById('com.android.systemui:id/recent_apps', navButtons) else: if self.uiAutomatorHelper: print >> sys.stderr, "WARNING: nav buttons not found. Perhaps the device has hardware buttons." self.navBack = None self.navHome = None self.navRecentApps = None
def min(self): """ Compute the min across records. """
return self._constructor(self.values.min(axis=self.baseaxes, keepdims=True))
def read_url(url): """Reads given URL as JSON and returns data as loaded python object."""
logging.debug('reading {url} ...'.format(url=url)) token = os.environ.get("BOKEH_GITHUB_API_TOKEN") headers = {} if token: headers['Authorization'] = 'token %s' % token request = Request(url, headers=headers) response = urlopen(request).read() return json.loads(response.decode("UTF-8"))
def get_job_logs(id): """Get the crawl logs from the job."""
crawler_job = models.CrawlerJob.query.filter_by(id=id).one_or_none() if crawler_job is None: click.secho( ( "CrawlJob %s was not found, maybe it's not a crawl job?" % id ), fg='yellow', ) sys.exit(1) if crawler_job.logs is None: click.secho( ( "CrawlJob %s has no log, it might be that it has not run " "yet, you can try again later." % id ), fg='yellow', ) sys.exit(1) _show_file( file_path=crawler_job.logs, header_name='Log', )
def convert(self, json, fout): """Convert json to markdown. Takes in a .json file as input and convert it to Markdown format, saving the generated .png images into ./images. """
self.build_markdown_body(json) # create the body self.build_header(json['name']) # create the md header self.build_output(fout)
def download_shared_files(job, samples, config): """ Downloads files shared by all samples in the pipeline :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information """
job.fileStore.logToMaster('Downloaded shared files') file_names = ['reference', 'phase', 'mills', 'dbsnp', 'cosmic'] urls = [config.reference, config.phase, config.mills, config.dbsnp, config.cosmic] for name, url in zip(file_names, urls): if url: vars(config)[name] = job.addChildJobFn(download_url_job, url=url).rv() job.addFollowOnJobFn(reference_preprocessing, samples, config)
def get_news_content(self, url): """获取新闻内容 :param url: 新闻url :return: html code """
res = WyuNews.__wyu_news_content(url) soup = BeautifulSoup(res, from_encoding='utf-8') tag_table = soup.find(self.__get_tag_table) tr = tag_table.findAll('tr')[4] return tr.decode()
def __get_values(self): """ Gets values in this cell range as a tuple. This is much more effective than reading cell values one by one. """
array = self._get_target().getDataArray() return tuple(itertools.chain.from_iterable(array))
def sort_edge(edges): """Sort iterable of edges first by left node indices then right. Args: edges(list[Edge]): List of edges to be sorted. Returns: list[Edge]: Sorted list by left and right node indices. """
return sorted(edges, key=lambda x: (x.L, x.R))
def get_option_value(self, opt_name): """ Return the value of a given option :param opt_name: option name :type opt_name: str :returns: the value of the option """
if not self.has_option(opt_name): raise ValueError("Unknow option name (%s)" % opt_name) return self._options[opt_name].value
def _sampleRange(rng, start, end, step, k): """ Equivalent to: random.sample(xrange(start, end, step), k) except it uses our random number generator. This wouldn't need to create the arange if it were implemented in C. """
array = numpy.empty(k, dtype="uint32") rng.sample(numpy.arange(start, end, step, dtype="uint32"), array) return array
def list(self, request, *args, **kwargs): """ Available request parameters: - ?type=type_of_statistics_objects (required. Have to be from the list: 'customer', 'project') - ?from=timestamp (default: now - 30 days, for example: 1415910025) - ?to=timestamp (default: now, for example: 1415912625) - ?datapoints=how many data points have to be in answer (default: 6) Answer will be list of datapoints(dictionaries). Each datapoint will contain fields: 'to', 'from', 'value'. 'Value' - count of objects, that were created between 'from' and 'to' dates. Example: .. code-block:: javascript [ {"to": 471970877, "from": 1, "value": 5}, {"to": 943941753, "from": 471970877, "value": 0}, {"to": 1415912629, "from": 943941753, "value": 3} ] """
return super(CreationTimeStatsView, self).list(request, *args, **kwargs)
def validate(self, str_in): # type: (Text) -> None """ Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff (1) the string does not represent a base-10 integer; (2) the integer is not between `self.minimum` and `self.maximum`, if those exist; or (3) the integer is negative. :param str str_in: String to validate. :raises InvalidEntryError: When entry is invalid. """
if self.is_missing_value(str_in): return # noinspection PyCompatibility super().validate(str_in) try: value = int(str_in, base=10) except ValueError as e: msg = "Invalid integer. Read '{}'.".format(str_in) e_new = InvalidEntryError(msg) e_new.field_spec = self raise_from(e_new, e) return # to stop PyCharm thinking that value might be undefined # later if self.minimum is not None and value < self.minimum: msg = ("Expected integer value of at least {}. Read '{}'." .format(self.minimum, value)) e_new = InvalidEntryError(msg) e_new.field_spec = self raise e_new if self.maximum is not None and value > self.maximum: msg = ("Expected integer value of at most {}. Read '{}'." .format(self.maximum, value)) e_new = InvalidEntryError(msg) e_new.field_spec = self raise e_new
def get_content_string(self): """ Ge thet Clusterpoint response's content as a string. """
return ''.join([ET.tostring(element, encoding="utf-8", method="xml") for element in list(self._content)])
def set_rich_menu_image(self, rich_menu_id, content_type, content, timeout=None): """Call upload rich menu image API. https://developers.line.me/en/docs/messaging-api/reference/#upload-rich-menu-image Uploads and attaches an image to a rich menu. :param str rich_menu_id: IDs of the richmenu :param str content_type: image/jpeg or image/png :param content: image content as bytes, or file-like object :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) """
self._post( '/v2/bot/richmenu/{rich_menu_id}/content'.format(rich_menu_id=rich_menu_id), data=content, headers={'Content-Type': content_type}, timeout=timeout )
def reset(self): """ Stops the timer and resets its values to 0. """
self._elapsed = datetime.timedelta() self._delta = datetime.timedelta() self._starttime = datetime.datetime.now() self.refresh()
def decommission_brokers(self, broker_ids): """Decommission a list of brokers trying to keep the replication group the brokers belong to balanced. :param broker_ids: list of string representing valid broker ids in the cluster :raises: InvalidBrokerIdError when the id is invalid. """
groups = set() for b_id in broker_ids: try: broker = self.cluster_topology.brokers[b_id] except KeyError: self.log.error("Invalid broker id %s.", b_id) # Raise an error for now. As alternative we may ignore the # invalid id and continue with the others. raise InvalidBrokerIdError( "Broker id {} does not exist in cluster".format(b_id), ) broker.mark_decommissioned() groups.add(broker.replication_group) for group in groups: self._decommission_brokers_in_group(group)
def info(self, text): """ Posts an info message adding a timestamp and logging level to it for both file and console handlers. Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress at the very time they are being logged but their timestamp will be captured at the right time. Logger will redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw immediately (may produce flickering) then call 'flush' method. :param text: The text to log into file and console. """
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.INFO)))
def set_pixel(self, x, y, value): """Set pixel at position x, y to the given value. X and Y should be values of 0 to 7 and 0 to 15, resp. Value should be 0 for off and non-zero for on. """
if x < 0 or x > 7 or y < 0 or y > 15: # Ignore out of bounds pixels. return self.set_led((7 - x) * 16 + y, value)
def prev_img(self, loop=True): """Go to the previous image in the channel. """
channel = self.get_current_channel() if channel is None: self.show_error("Please create a channel.", raisetab=True) return channel.prev_image() return True
def include_callback_query_chat_id(fn=pair, types='all'): """ :return: a pair producer that enables static callback query capturing across seeder and delegator. :param types: ``all`` or a list of chat types (``private``, ``group``, ``channel``) """
@_ensure_seeders_list def p(seeders, delegator_factory, *args, **kwargs): return fn(seeders + [per_callback_query_chat_id(types=types)], delegator_factory, *args, include_callback_query=True, **kwargs) return p
def getwin_buffer(self, order='RGB', alpha=1.0, dtype=None): """Same as :meth:`getwin_array`, but with the output array converted to C-order Python bytes. """
outarr = self.getwin_array(order=order, alpha=alpha, dtype=dtype) if not hasattr(outarr, 'tobytes'): # older versions of numpy return outarr.tostring(order='C') return outarr.tobytes(order='C')
def _update_project(self, request, data): """Update project info"""
domain_id = identity.get_domain_id_for_operation(request) try: project_id = data['project_id'] # add extra information if keystone.VERSIONS.active >= 3: EXTRA_INFO = getattr(settings, 'PROJECT_TABLE_EXTRA_INFO', {}) kwargs = dict((key, data.get(key)) for key in EXTRA_INFO) else: kwargs = {} return api.keystone.tenant_update( request, project_id, name=data['name'], description=data['description'], enabled=data['enabled'], domain=domain_id, **kwargs) except exceptions.Conflict: msg = _('Project name "%s" is already used.') % data['name'] self.failure_message = msg return except Exception as e: LOG.debug('Project update failed: %s', e) exceptions.handle(request, ignore=True) return
def assert_in(obj, seq, message=None, extra=None): """Raises an AssertionError if obj is not in seq."""
assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra)
def save(obj, path): """ Pickle (serialize) object to input file path Parameters ---------- obj : any object path : string File path """
with open(path, 'wb') as f: try: pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL) except Exception as e: print('Pickling failed for object {0}, path {1}'.format(obj, path)) print('Error message: {0}'.format(e))
def init_get(self, request): """ Initialize the :class:`LogoutView` attributes on GET request :param django.http.HttpRequest request: The current request object """
self.request = request self.service = request.GET.get('service') self.url = request.GET.get('url') self.ajax = settings.CAS_ENABLE_AJAX_AUTH and 'HTTP_X_AJAX' in request.META
def get_elemental_abunds(self,cycle,index=None): """ returns the elemental abundances for one cycle, either for the whole star or a specific zone depending upon the value of 'index'. Parameters ---------- cycle : string or integer Model to get the abundances for. index : integer or list, optional zone number for which to get elemental abundances. If None the entire abundance profile is returned. If a 1x2 list, the abundances are returned between indices of index[0] and index[1]. The default is None. """
isoabunds=self.se.get(cycle,'iso_massf') A=array(self.se.A) Z=array(self.se.Z) names=self.se.isos Zuq=list(set(Z)) # list of unique Zs Zuq.sort() if index==None: index=[0,len(isoabunds)] if type(index)==list: elemabunds=[] for zone in range(index[0],index[1]): percent=int((zone-index[0])*100./(index[1]-index[0])) sys.stdout.flush() sys.stdout.write("\rgetting elemental abundances " + "...%d%%" % percent) elemabunds.append([sum(isoabunds[zone][where(Z==iZ)]) for iZ in Zuq]) else: elemabunds=[sum(isoabunds[index][where(Z==iZ)]) for iZ in Zuq] return elemabunds
def read_member(self, container_id, member_id, query_membership=None): """ReadMember. [Preview API] :param str container_id: :param str member_id: :param str query_membership: :rtype: :class:`<str> <azure.devops.v5_0.identity.models.str>` """
route_values = {} if container_id is not None: route_values['containerId'] = self._serialize.url('container_id', container_id, 'str') if member_id is not None: route_values['memberId'] = self._serialize.url('member_id', member_id, 'str') query_parameters = {} if query_membership is not None: query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str') response = self._send(http_method='GET', location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('str', response)
def page(self, iso_code=values.unset, continent=values.unset, country_code=values.unset, low_risk_numbers_enabled=values.unset, high_risk_special_numbers_enabled=values.unset, high_risk_tollfraud_numbers_enabled=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of CountryInstance records from the API. Request is executed immediately :param unicode iso_code: Filter to retrieve the country permissions by specifying the ISO country code :param unicode continent: Filter to retrieve the country permissions by specifying the continent :param unicode country_code: Country code filter :param bool low_risk_numbers_enabled: Filter to retrieve the country permissions with dialing to low-risk numbers enabled :param bool high_risk_special_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk special service numbers enabled :param bool high_risk_tollfraud_numbers_enabled: Filter to retrieve the country permissions with dialing to high-risk toll fraud numbers enabled :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CountryInstance :rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryPage """
params = values.of({ 'IsoCode': iso_code, 'Continent': continent, 'CountryCode': country_code, 'LowRiskNumbersEnabled': low_risk_numbers_enabled, 'HighRiskSpecialNumbersEnabled': high_risk_special_numbers_enabled, 'HighRiskTollfraudNumbersEnabled': high_risk_tollfraud_numbers_enabled, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return CountryPage(self._version, response, self._solution)
def check_no_blank_before(self, function, docstring): # def """D20{1,2}: No blank lines allowed around function/method docstring. There's no blank line either before or after the docstring. """
if docstring: before, _, after = function.source.partition(docstring) blanks_before = list(map(is_blank, before.split('\n')[:-1])) blanks_after = list(map(is_blank, after.split('\n')[1:])) blanks_before_count = sum(takewhile(bool, reversed(blanks_before))) blanks_after_count = sum(takewhile(bool, blanks_after)) if blanks_before_count != 0: yield violations.D201(blanks_before_count) if not all(blanks_after) and blanks_after_count != 0: yield violations.D202(blanks_after_count)
def add(self, rule, method, target, name=None): ''' Add a new route or replace the target for an existing route. ''' if rule in self.rules: self.rules[rule][method] = target if name: self.builder[name] = self.builder[rule] return target = self.rules[rule] = {method: target} # Build pattern and other structures for dynamic routes anons = 0 # Number of anonymous wildcards pattern = '' # Regular expression pattern filters = [] # Lists of wildcard input filters builder = [] # Data structure for the URL builder is_static = True for key, mode, conf in self.parse_rule(rule): if mode: is_static = False mask, in_filter, out_filter = self.filters[mode](conf) if key: pattern += '(?P<%s>%s)' % (key, mask) else: pattern += '(?:%s)' % mask key = 'anon%d' % anons; anons += 1 if in_filter: filters.append((key, in_filter)) builder.append((key, out_filter or str)) elif key: pattern += re.escape(key) builder.append((None, key)) self.builder[rule] = builder if name: self.builder[name] = builder if is_static and not self.strict_order: self.static[self.build(rule)] = target return def fpat_sub(m): return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:' flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern) try: re_match = re.compile('^(%s)$' % pattern).match except re.error, e: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e)) def match(path): """ Return an url-argument dictionary. """
url_args = re_match(path).groupdict() for name, wildcard_filter in filters: try: url_args[name] = wildcard_filter(url_args[name]) except ValueError: raise HTTPError(400, 'Path has wrong format.') return url_args try: combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((match, target)) except (AssertionError, IndexError), e: # AssertionError: Too many groups self.dynamic.append((re.compile('(^%s$)' % flat_pattern), [(match, target)])) return match
def get_interface_detail_output_interface_wavelength(self, **kwargs): """Auto Generated Code """
config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') wavelength = ET.SubElement(interface, "wavelength") wavelength.text = kwargs.pop('wavelength') callback = kwargs.pop('callback', self._callback) return callback(config)
def seek(self, n): """Gets to a certain marker position in the BED file. Args: n (int): The index of the marker to seek to. """
if self._mode != "r": raise UnsupportedOperation("not available in 'w' mode") if 0 <= n < self._nb_markers: self._n = n self._bed.seek(self._get_seek_position(n)) else: # Invalid seek value raise ValueError("invalid position in BED: {}".format(n))
def BL(self, params): """ BL label Branch to the label, storing the next instruction in the Link Register """
label = self.get_one_parameter(self.ONE_PARAMETER, params) self.check_arguments(label_exists=(label,)) # TODO check if label is within +- 16 MB # BL label def BL_func(): self.register['LR'] = self.register['PC'] # No need for the + 1, PC already points to the next instruction self.register['PC'] = self.labels[label] return BL_func
def add_plugin_directories(self, paths, except_blacklisted=True): """ Adds `directories` to the set of plugin directories. `directories` may be either a single object or a iterable. `directories` can be relative paths, but will be converted into absolute paths based on the current working directory. if `except_blacklisted` is `True` all `directories` in that are blacklisted will be removed """
self.directory_manager.add_directories(paths, except_blacklisted)
def table_measure(self, columns, content): """ Measure the width of each table column. """
dimensions = {} for row in content: i = 0 for cell in row: # Calculate maximum from: dimensions[i] = max( dimensions.get(i, 0), # current column max width len(cell), # current cell value width len(columns[i].get('label', '')), # column label width columns[i].get('width_min', 0), # configured column minimal width ) i += 1 return dimensions
def genestatus(args): """ %prog genestatus diploid.gff3.exon.ids Tag genes based on translation from GMAP models, using fasta.translate() --ids. """
p = OptionParser(genestatus.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) idsfile, = args data = get_tags(idsfile) key = lambda x: x[0].split(".")[0] for gene, cc in groupby(data, key=key): cc = list(cc) tags = [x[-1] for x in cc] if "complete" in tags: tag = "complete" elif "partial" in tags: tag = "partial" else: tag = "pseudogene" print("\t".join((gene, tag)))
def list(path='.'): """generator that returns all files of *path*"""
import os for f in os.listdir(path): if isfile(join(path, f)): yield join(path, f) if path != '.' else f
def set_dm(self, num): """ Make GUI changes based on data model num. Get info from WD in appropriate format. """
#enable or disable self.btn1a if self.data_model_num == 3: self.btn1a.Enable() else: self.btn1a.Disable() # # set pmag_gui_dialogs global pmag_gui_dialogs if self.data_model_num == 2: pmag_gui_dialogs = pgd2 wx.CallAfter(self.get_wd_data2) elif self.data_model_num == 3: pmag_gui_dialogs = pgd3 wx.CallAfter(self.get_wd_data) # do / re-do menubar menubar = pmag_gui_menu.MagICMenu(self, data_model_num=self.data_model_num) self.SetMenuBar(menubar) self.menubar = menubar
def rank( self, axis=0, method="average", numeric_only=None, na_option="keep", ascending=True, pct=False, ): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the [method] of the ranks of those values. Args: axis (int): 0 or 'index' for row-wise, 1 or 'columns' for column-wise method: {'average', 'min', 'max', 'first', 'dense'} Specifies which method to use for equal vals numeric_only (boolean) Include only float, int, boolean data. na_option: {'keep', 'top', 'bottom'} Specifies how to handle NA options ascending (boolean): Decedes ranking order pct (boolean): Computes percentage ranking of data Returns: A new DataFrame """
axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.rank( axis=axis, method=method, numeric_only=numeric_only, na_option=na_option, ascending=ascending, pct=pct, ) )
def append(self, event, help=""): """Creates a new event. `event` may be iterable or string Args: event (str): Name of event to declare Kwrgs: help (str): Help string for the event Raises: TypeError **Please** describe the event and its calling arguments in the help string. """
if isinstance(event, str): self._events[event] = HookList(is_waterfall=self.is_waterfall) self._help[event] = (help, getframeinfo(stack()[1][0])) if not help: logger.warning("Great, don't say anything about your hooks and \ wait for plugin creators to figure it out.") elif isinstance(event, Iterable): # Depricated. It does not give the ability to give help string # TODO: Remove this for name in event: self.append(name) else: raise TypeError("Invalid event name!")
def _bind_topics(self, topics): """Subscribe to all the topics we need to communication with this device Args: topics (MQTTTopicValidator): The topic validator for this device that we are connecting to. """
# FIXME: Allow for these subscriptions to fail and clean up the previous ones # so that this function is atomic self.client.subscribe(topics.status, self._on_status_message) self.client.subscribe(topics.tracing, self._on_trace) self.client.subscribe(topics.streaming, self._on_report) self.client.subscribe(topics.response, self._on_response_message)
def downsampling(self, factor=10, first=3, switch=True, verbose=True): """ rude downsampling of a `CMADataLogger` data file by `factor`, keeping also the first `first` entries. This function is a stump and subject to future changes. Return self. Arguments --------- - `factor` -- downsampling factor - `first` -- keep first `first` entries - `switch` -- switch the new logger to the downsampled logger original_name+'down' Details ------- ``self.name_prefix+'down'`` files are written Example ------- :: import cma cma.downsampling() # takes outcmaes* files cma.plot('outcmaesdown') """
newprefix = self.name_prefix + 'down' for name in self.file_names: f = open(newprefix + name + '.dat', 'w') iline = 0 cwritten = 0 for line in open(self.name_prefix + name + '.dat'): if iline < first or iline % factor == 0: f.write(line) cwritten += 1 iline += 1 f.close() if verbose and iline > first: print('%d' % (cwritten) + ' lines written in ' + newprefix + name + '.dat') if switch: self.name_prefix += 'down' return self