prompt
stringlengths
23
36.3k
response
stringlengths
0
1.55k
def update(self, obj, size): """Update this profile. """
self.number += 1 self.total += size if self.high < size: # largest self.high = size try: # prefer using weak ref self.objref, self.weak = Weakref.ref(obj), True except TypeError: self.objref, self.weak = obj, False
def _connected(service): """ Verify if a connman service is connected """
state = pyconnman.ConnService(os.path.join(SERVICE_PATH, service)).get_property('State') return state == 'online' or state == 'ready'
def all_internal_calls(self): """ recursive version of internal_calls """
if self._all_internals_calls is None: self._all_internals_calls = self._explore_functions(lambda x: x.internal_calls) return self._all_internals_calls
def decode_qwikcord(packet, channel=1): """Extract the qwikcord current measurements from val (CTavg, CTsum)."""
val = str(packet.get('val', '')) if len(val) != 16: return None if channel == 1: return int(val[6:12], 16) # CTavg return int(val[12:], 16)
def __set_timestamp(self, clock): """ If "clock" is None, set the time now. This function is called self.__init__() """
if clock is None: unix_timestamp = time.mktime( datetime.datetime.now().utctimetuple() ) timestamp = int(unix_timestamp) return timestamp else: return clock
def solve(self, table_size): """Solves problem :param table_size: Size of table :return: List of possible solutions """
if table_size == 0: return [[]] smaller_solutions = self.solve(table_size - 1) solutions = [] for solution in smaller_solutions: for column in range(1, self.board_size + 1): # try adding a new queen to row = n, column = column if not self.under_attack(column, solution): solutions.append(solution + [(table_size, column)]) return solutions
def list_hooks(self, key_name): """Return list of all hooks attached to key_name."""
kf = self.dct[key_name] if 'hooks' not in kf: return [] else: return kf['hooks'].iterkeys()
def get_outputs(self, merge_multi_context=True, begin=0, end=None): """Get outputs of the previous forward computation. If begin or end is specified, return [begin, end)-th outputs, otherwise return all outputs. Parameters ---------- merge_multi_context : bool Default is `True`. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A `True` value indicate that we should merge the collected results so that they look like from a single executor. begin : int starting index of returned outputs in all outputs end : int or None ending index (excluded) of returned outputs. Returns ------- If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are `NDArray`. """
if end is None: end = self.num_outputs outputs = [[exec_.outputs[i] for exec_ in self.execs] for i in range(begin, end)] if merge_multi_context: outputs = _merge_multi_context(outputs, self.output_layouts) return outputs
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ExecutionStepContext for this ExecutionStepInstance :rtype: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepContext """
if self._context is None: self._context = ExecutionStepContext( self._version, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], sid=self._solution['sid'], ) return self._context
def rollout(self, **kwargs): """Generate x for open loop movements. """
if kwargs.has_key('tau'): timesteps = int(self.timesteps / kwargs['tau']) else: timesteps = self.timesteps self.x_track = np.zeros(timesteps) self.reset_state() for t in range(timesteps): self.x_track[t] = self.x self.step(**kwargs) return self.x_track
def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, kern=None, likelihood=None): """ Get the predictive quantiles around the prediction at X :param X: The points at which to make a prediction :type X: np.ndarray (Xnew x self.input_dim) :param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval :type quantiles: tuple :param kern: optional kernel to use for prediction :type predict_kw: dict :returns: list of quantiles for each X and predictive quantiles for interval combination :rtype: [np.ndarray (Xnew x self.output_dim), np.ndarray (Xnew x self.output_dim)] """
m, v = self._raw_predict(X, full_cov=False, kern=kern) if likelihood is None: likelihood = self.likelihood quantiles = likelihood.predictive_quantiles(m, v, quantiles, Y_metadata=Y_metadata) if self.normalizer is not None: quantiles = [self.normalizer.inverse_mean(q) for q in quantiles] return quantiles
def var(self, ddof=0): """Calculate variance of timeseries. Return a vector containing the variances of each series in the timeseries. :parameter ddof: delta degree of freedom, the divisor used in the calculation is given by ``N - ddof`` where ``N`` represents the length of timeseries. Default ``0``. .. math:: var = \\frac{\\sum_i^N (x - \\mu)^2}{N-ddof} """
N = len(self) if N: v = self.values() mu = sum(v) return (sum(v*v) - mu*mu/N)/(N-ddof) else: return None
def ndarray(shape, dtype, location, order='F', readonly=False, lock=None, **kwargs): """ Create a shared memory numpy array. Lock is only necessary while doing multiprocessing on platforms without /dev/shm type shared memory as filesystem emulation will be used instead. Allocating the shared array requires cleanup on your part. A shared memory file will be located at sharedmemory.PLATFORM_SHM_DIRECTORY + location and must be unlinked when you're done. It will outlive the program. You should also call .close() on the mmap file handle when done. However, this is less of a problem because the operating system will close the file handle on process termination. Parameters: shape: same as numpy.ndarray dtype: same as numpy.ndarray location: the shared memory filename lock: (optional) multiprocessing.Lock Returns: (mmap filehandle, shared ndarray) """
if EMULATE_SHM: return ndarray_fs(shape, dtype, location, lock, readonly, order, **kwargs) return ndarray_shm(shape, dtype, location, readonly, order, **kwargs)
def _W(self, mu, weights, y=None): """ compute the PIRLS weights for model predictions. TODO lets verify the formula for this. if we use the square root of the mu with the stable opt, we get the same results as when we use non-sqrt mu with naive opt. this makes me think that they are equivalent. also, using non-sqrt mu with stable opt gives very small edofs for even lam=0.001 and the parameter variance is huge. this seems strange to me. computed [V * d(link)/d(mu)] ^(-1/2) by hand and the math checks out as hoped. ive since moved the square to the naive pirls method to make the code modular. Parameters --------- mu : array-like of shape (n_samples,) expected value of the targets given the model and inputs weights : array-like of shape (n_samples,) containing sample weights y = array-like of shape (n_samples,) or None, default None useful for computing the asymmetric weight. Returns ------- weights : scipy.sparse array of shape (n_samples, n_samples) """
# asymmetric weight asym = (y > mu) * self.expectile + (y <= mu) * (1 - self.expectile) return sp.sparse.diags((self.link.gradient(mu, self.distribution)**2 * self.distribution.V(mu=mu) * weights ** -1)**-0.5 * asym**0.5)
def get_grade_entries_by_ids(self, grade_entry_ids): """Gets a ``GradeEntryList`` corresponding to the given ``IdList``. arg: grade_entry_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.grading.GradeEntryList) - the returned ``GradeEntry`` list raise: NotFound - an ``Id was`` not found raise: NullArgument - ``grade_entry_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """
# Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_ids # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) object_id_list = [] for i in grade_entry_ids: object_id_list.append(ObjectId(self._get_id(i, 'grading').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.GradeEntryList(sorted_result, runtime=self._runtime, proxy=self._proxy)
def iter_comments(self, number=-1, etag=None): """Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s """
url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
def _xmlTextReaderErrorFunc(xxx_todo_changeme,msg,severity,locator): """Intermediate callback to wrap the locator"""
(f,arg) = xxx_todo_changeme return f(arg,msg,severity,xmlTextReaderLocator(locator))
def inspect_cuda(): """ Return cuda device information and nvcc/cuda setup """
nvcc_settings = nvcc_compiler_settings() sysconfig.get_config_vars() nvcc_compiler = ccompiler.new_compiler() sysconfig.customize_compiler(nvcc_compiler) customize_compiler_for_nvcc(nvcc_compiler, nvcc_settings) output = inspect_cuda_version_and_devices(nvcc_compiler, nvcc_settings) return json.loads(output), nvcc_settings
def intersection_box(box1, box2): """ Finds an intersection box that is common to both given boxes. :param box1: Box object 1 :param box2: Box object 2 :return: None if there is no intersection otherwise the new Box """
b1_x2, b1_y2 = box1.bottom_right() b2_x2, b2_y2 = box2.bottom_right() x, y = max(box1.x, box2.x), max(box1.y, box2.y) x2, y2 = min(b1_x2, b2_x2), min(b1_y2, b2_y2) w, h = max(0, x2-x), max(0, y2-y) return Box(x, y, w, h)
def set_defs(self, defs, position=None): """Sets the defs at the position."""
if position is None: position = self.position self.checkdefs[position][1] = defs
def get_relavent_flags(self): """ Retrieves the relevant flags for this data block. Returns: All flags related to this block. """
relavent_flags = {} for code, flags_list in self.flags.items(): relavent_flags[code] = [] for flag in flags_list: if self.flag_is_related(flag): relavent_flags[code].append(flag) # Remove that flag level if no error exists if not relavent_flags[code]: del relavent_flags[code] return relavent_flags
def _save_fastq_space(items): """Potentially save fastq space prior to merging, since alignments done. """
to_cleanup = {} for data in (utils.to_single_data(x) for x in items): for fname in data.get("files", []): if os.path.realpath(fname).startswith(dd.get_work_dir(data)): to_cleanup[fname] = data["config"] for fname, config in to_cleanup.items(): utils.save_diskspace(fname, "Cleanup prep files after alignment finished", config)
def getDataFromFIFO(self, bytesToRead): """ reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there is new data available (to avoid reading duplicate data). :param bytesToRead: the number of bytes to read. :return: the bytes read. """
return self.i2c_io.readBlock(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_R_W, bytesToRead)
def to_numpy(self): """ Converts this SFrame to a numpy array This operation will construct a numpy array in memory. Care must be taken when size of the returned object is big. Returns ------- out : numpy.ndarray A Numpy Array containing all the values of the SFrame """
assert HAS_NUMPY, 'numpy is not installed.' import numpy return numpy.transpose(numpy.asarray([self[x] for x in self.column_names()]))
def search_records(self, domain, record_type, name=None, data=None): """ Returns a list of all records configured for the specified domain that match the supplied search criteria. """
search_params = [] if name: search_params.append("name=%s" % name) if data: search_params.append("data=%s" % data) query_string = "&".join(search_params) dom_id = utils.get_id(domain) uri = "/domains/%s/records?type=%s" % (dom_id, record_type) if query_string: uri = "%s&%s" % (uri, query_string) resp, body = self._retry_get(uri) records = body.get("records", []) self._reset_paging("record", body) rec_paging = self._paging.get("record", {}) while rec_paging.get("next_uri"): resp, body = self._retry_get(rec_paging.get("next_uri")) self._reset_paging("record", body) records.extend(body.get("records", [])) for record in records: record["domain_id"] = dom_id return [CloudDNSRecord(self, record, loaded=False) for record in records if record]
def body_json(soup, base_url=None): """ Get body json and then alter it with section wrapping and removing boxed-text """
body_content = body(soup, remove_key_info_box=True, base_url=base_url) # Wrap in a section if the first block is not a section if (body_content and len(body_content) > 0 and "type" in body_content[0] and body_content[0]["type"] != "section"): # Wrap this one new_body_section = OrderedDict() new_body_section["type"] = "section" new_body_section["id"] = "s0" new_body_section["title"] = "Main text" new_body_section["content"] = [] for body_block in body_content: new_body_section["content"].append(body_block) new_body = [] new_body.append(new_body_section) body_content = new_body body_content_rewritten = elifetools.json_rewrite.rewrite_json("body_json", soup, body_content) return body_content_rewritten
def Elamvaluthi_Srinivas(m, x, D, rhol, rhog, Cpl, kl, mug, mu_b, mu_w=None): r"""Calculates the two-phase non-boiling heat transfer coefficient of a liquid and gas flowing inside a tube of any inclination, as in [1]_ and reviewed in [2]_. .. math:: \frac{h_{TP} D}{k_L} = 0.5\left(\frac{\mu_G}{\mu_L}\right)^{0.25} Re_M^{0.7} Pr^{1/3}_L (\mu_b/\mu_w)^{0.14} Re_M = \frac{D V_L \rho_L}{\mu_L} + \frac{D V_g \rho_g}{\mu_g} Parameters ---------- m : float Mass flow rate [kg/s] x : float Quality at the specific tube interval [-] D : float Diameter of the tube [m] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Cpl : float Constant-pressure heat capacity of liquid [J/kg/K] kl : float Thermal conductivity of liquid [W/m/K] mug : float Viscosity of gas [Pa*s] mu_b : float Viscosity of liquid at bulk conditions (average of inlet/outlet temperature) [Pa*s] mu_w : float, optional Viscosity of liquid at wall temperature [Pa*s] Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- If the viscosity at the wall temperature is not given, the liquid viscosity correction is not applied. Developed for vertical flow, and flow patters of bubbly and slug. Gas/liquid superficial velocity ratios from 0.3 to 4.6, liquid mass fluxes from 200 to 1600 kg/m^2/s, and the fluids tested were air-water and air-aqueous glycerine solutions. The tube inner diameter was 1 cm, and the L/D ratio was 86. Examples -------- >>> Elamvaluthi_Srinivas(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, Cpl=2300, ... kl=.6, mug=1E-5, mu_b=1E-3, mu_w=1.2E-3) 3901.2134471578584 References ---------- .. [1] Elamvaluthi, G., and N. S. Srinivas. "Two-Phase Heat Transfer in Two Component Vertical Flows." International Journal of Multiphase Flow 10, no. 2 (April 1, 1984): 237-42. doi:10.1016/0301-9322(84)90021-1. .. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L. Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with Seven Sets of Experimental Data, Including Flow Pattern and Tube Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1, 1999): 15-40. doi:10.1080/014576399271691. """
Vg = m*x/(rhog*pi/4*D**2) Vl = m*(1-x)/(rhol*pi/4*D**2) Prl = Prandtl(Cp=Cpl, mu=mu_b, k=kl) ReM = D*Vl*rhol/mu_b + D*Vg*rhog/mug Nu_TP = 0.5*(mug/mu_b)**0.25*ReM**0.7*Prl**(1/3.) if mu_w: Nu_TP *= (mu_b/mu_w)**0.14 return Nu_TP*kl/D
def commit(self, message, parent_commits=None, head=True, author=None, committer=None, author_date=None, commit_date=None, skip_hooks=False): """Commit the current default index file, creating a commit object. For more information on the arguments, see tree.commit. :note: If you have manually altered the .entries member of this instance, don't forget to write() your changes to disk beforehand. Passing skip_hooks=True is the equivalent of using `-n` or `--no-verify` on the command line. :return: Commit object representing the new commit"""
if not skip_hooks: run_commit_hook('pre-commit', self) self._write_commit_editmsg(message) run_commit_hook('commit-msg', self, self._commit_editmsg_filepath()) message = self._read_commit_editmsg() self._remove_commit_editmsg() tree = self.write_tree() rval = Commit.create_from_tree(self.repo, tree, message, parent_commits, head, author=author, committer=committer, author_date=author_date, commit_date=commit_date) if not skip_hooks: run_commit_hook('post-commit', self) return rval
def adapt_files(solver): """ Rename and remove files whenever necessary. """
print("adapting {0}'s files".format(solver)) root = os.path.join('solvers', solver) for arch in to_extract[solver]: arch = os.path.join(root, arch) extract_archive(arch, solver, put_inside=True) for fnames in to_move[solver]: old = os.path.join(root, fnames[0]) new = os.path.join(root, fnames[1]) os.rename(old, new) for f in to_remove[solver]: f = os.path.join(root, f) if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f)
def match_trailer(self, tokens, item): """Matches typedefs and as patterns."""
internal_assert(len(tokens) > 1 and len(tokens) % 2 == 1, "invalid trailer match tokens", tokens) match, trailers = tokens[0], tokens[1:] for i in range(0, len(trailers), 2): op, arg = trailers[i], trailers[i + 1] if op == "is": self.add_check("_coconut.isinstance(" + item + ", " + arg + ")") elif op == "as": if arg in self.names: self.add_check(self.names[arg] + " == " + item) elif arg != wildcard: self.add_def(arg + " = " + item) self.names[arg] = item else: raise CoconutInternalException("invalid trailer match operation", op) self.match(match, item)
def get_longest_non_repeat_v2(string): """ Find the length of the longest substring without repeating characters. Uses alternative algorithm. Return max_len and the substring as a tuple """
if string is None: return 0, '' sub_string = '' start, max_len = 0, 0 used_char = {} for index, char in enumerate(string): if char in used_char and start <= used_char[char]: start = used_char[char] + 1 else: if index - start + 1 > max_len: max_len = index - start + 1 sub_string = string[start: index + 1] used_char[char] = index return max_len, sub_string
def on_canvas_slave__electrode_selected(self, slave, data): """ .. versionchanged:: 0.11 Clear any temporary routes (drawn while mouse is down) from routes list. .. versionchanged:: 0.11.3 Clear temporary routes by setting ``df_routes`` property of :attr:`canvas_slave`. """
if self.plugin is None: return # XXX Negative `route_i` corresponds to temporary route being # drawn. Since electrode selection terminates route drawing, clear any # rows corresponding to negative `route_i` values from the routes # table. slave.df_routes = slave.df_routes.loc[slave.df_routes.route_i >= 0].copy() state = self.canvas_slave.electrode_states.get(data['electrode_id'], 0) self.plugin.execute_async('microdrop.electrode_controller_plugin', 'set_electrode_states', electrode_states=pd .Series([not state], index=[data['electrode_id']]))
def delete_all(self, user=None): """ Method to soft-delete all notifications of a User (if supplied) :param user: Notification recipient. :return: Updates QuerySet as soft-deleted. """
qs = self.active() if user: qs = qs.filter(recipient=user) soft_delete = getattr(settings, 'NOTIFY_SOFT_DELETE', True) if soft_delete: qs.update(deleted=True) else: qs.delete()
def _build_stack(self): """ Construct a list of dictionaries representing the connection configuration between the controller and the target. This is additionally used by the integration tests "mitogen_get_stack" action to fetch the would-be connection configuration. """
return self._stack_from_spec( ansible_mitogen.transport_config.PlayContextSpec( connection=self, play_context=self._play_context, transport=self.transport, inventory_name=self.inventory_hostname, ) )
def resource_id(**kwargs): """Create a valid resource id string from the given parts. This method builds the resource id from the left until the next required id parameter to be appended is not found. It then returns the built up id. :param dict kwargs: The keyword arguments that will make up the id. The method accepts the following keyword arguments: - subscription (required): Subscription id - resource_group: Name of resource group - namespace: Namespace for the resource provider (i.e. Microsoft.Compute) - type: Type of the resource (i.e. virtualMachines) - name: Name of the resource (or parent if child_name is also \ specified) - child_namespace_{level}: Namespace for the child resoure of that level (optional) - child_type_{level}: Type of the child resource of that level - child_name_{level}: Name of the child resource of that level :returns: A resource id built from the given arguments. :rtype: str """
kwargs = {k: v for k, v in kwargs.items() if v is not None} rid_builder = ['/subscriptions/{subscription}'.format(**kwargs)] try: try: rid_builder.append('resourceGroups/{resource_group}'.format(**kwargs)) except KeyError: pass rid_builder.append('providers/{namespace}'.format(**kwargs)) rid_builder.append('{type}/{name}'.format(**kwargs)) count = 1 while True: try: rid_builder.append('providers/{{child_namespace_{}}}' .format(count).format(**kwargs)) except KeyError: pass rid_builder.append('{{child_type_{0}}}/{{child_name_{0}}}' .format(count).format(**kwargs)) count += 1 except KeyError: pass return '/'.join(rid_builder)
def send(self, message): """ Sends a message to all subscribers of destination. @param message: The message frame. (The frame will be modified to set command to MESSAGE and set a message id.) @type message: L{stompclient.frame.Frame} """
dest = message.headers.get('destination') if not dest: raise ValueError( "Cannot send frame with no destination: %s" % message) message.cmd = 'message' message.headers.setdefault('message-id', str(uuid.uuid4())) bad_subscribers = set() for subscriber in self._topics[dest]: try: subscriber.send_frame(message) except: self.log.exception( "Error delivering message to subscriber %s; client will be disconnected." % subscriber) # We queue for deletion so we are not modifying the topics dict # while iterating over it. bad_subscribers.add(subscriber) for subscriber in bad_subscribers: self.disconnect(subscriber)
def common_install_mysql(self): """ Install mysql """
sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password password {0}'".format(self.mysql_password)) sudo("debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password {0}'".format(self.mysql_password)) sudo('apt-get install mysql-server -y') print(green(' * Installed MySql server in the system.')) print(green(' * Done')) print()
def getLorenzShares(data,weights=None,percentiles=[0.5],presorted=False): """ Calculates the Lorenz curve at the requested percentiles of (weighted) data. Median by default. Parameters ---------- data : numpy.array A 1D array of float data. weights : numpy.array A weighting vector for the data. percentiles : [float] A list of percentiles to calculate for the data. Each element should be in (0,1). presorted : boolean Indicator for whether data has already been sorted. Returns ------- lorenz_out : numpy.array The requested Lorenz curve points of the data. """
if weights is None: # Set equiprobable weights if none were given weights = np.ones(data.size) if presorted: # Sort the data if it is not already data_sorted = data weights_sorted = weights else: order = np.argsort(data) data_sorted = data[order] weights_sorted = weights[order] cum_dist = np.cumsum(weights_sorted)/np.sum(weights_sorted) # cumulative probability distribution temp = data_sorted*weights_sorted cum_data = np.cumsum(temp)/sum(temp) # cumulative ownership shares # Calculate the requested Lorenz shares by interpolating the cumulative ownership # shares over the cumulative distribution, then evaluating at requested points lorenzFunc = interp1d(cum_dist,cum_data,bounds_error=False,assume_sorted=True) lorenz_out = lorenzFunc(percentiles) return lorenz_out
def load_module(prefix, epoch, data_names, data_shapes): """Loads the model from checkpoint specified by prefix and epoch, binds it to an executor, and sets its parameters and returns a mx.mod.Module """
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) # We don't need CTC loss for prediction, just a simple softmax will suffice. # We get the output of the layer just before the loss layer ('pred_fc') and add softmax on top pred_fc = sym.get_internals()['pred_fc_output'] sym = mx.sym.softmax(data=pred_fc) mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes) mod.set_params(arg_params, aux_params, allow_missing=False) return mod
def _output_ret(self, ret, out, retcode=0): """ Print the output from a single return to the terminal """
import salt.output # Handle special case commands if self.config['fun'] == 'sys.doc' and not isinstance(ret, Exception): self._print_docs(ret) else: # Determine the proper output method and run it salt.output.display_output(ret, out=out, opts=self.config, _retcode=retcode) if not ret: sys.stderr.write('ERROR: No return received\n') sys.exit(2)
def can_use_enum(func): """ Decorator to use Enum value on type checks. """
@wraps(func) def inner(self, value): if isinstance(value, Enum): return self.check_value(value.value) or func(self, value.value) return func(self, value) return inner
def load_from_s3(self, bucket, prefix=None): """ Load messages previously saved to S3. """
n = 0 if prefix: prefix = '%s/' % prefix else: prefix = '%s/' % self.id[1:] rs = bucket.list(prefix=prefix) for key in rs: n += 1 m = self.new_message(key.get_contents_as_string()) self.write(m) return n
async def release(self, *args, **kwargs): """ Releases the lock. """
return await _maybe_await(self.lock.release(*args, **kwargs))
def proper_case_section(self, section): """Verify proper casing is retrieved, when available, for each dependency in the section. """
# Casing for section. changed_values = False unknown_names = [k for k in section.keys() if k not in set(self.proper_names)] # Replace each package with proper casing. for dep in unknown_names: try: # Get new casing for package name. new_casing = proper_case(dep) except IOError: # Unable to normalize package name. continue if new_casing != dep: changed_values = True self.register_proper_name(new_casing) # Replace old value with new value. old_value = section[dep] section[new_casing] = old_value del section[dep] # Return whether or not values have been changed. return changed_values
def combine_comments(comments): """ Given a list of comments, strings, a single comment or a single string, return a single string of text containing all of the comments, prepending the '#' and joining with newlines as necessary. """
if not isinstance(comments, list): comments = [comments] ret = [] for comment in comments: if not isinstance(comment, six.string_types): comment = str(comment) # Normalize for any spaces (or lack thereof) after the # ret.append('# {0}\n'.format(comment.lstrip('#').lstrip())) return ''.join(ret)
def _describe_list(self) -> List[Tuple[str, float]]: """Return useful information about the graph as a list of tuples."""
number_nodes = self.number_of_nodes() return [ ('Number of Nodes', number_nodes), ('Number of Edges', self.number_of_edges()), ('Number of Citations', self.number_of_citations()), ('Number of Authors', self.number_of_authors()), ('Network Density', '{:.2E}'.format(nx.density(self))), ('Number of Components', nx.number_weakly_connected_components(self)), ('Number of Warnings', self.number_of_warnings()), ]
def next_turn(self, *args): """Advance time by one turn, if it's not blocked. Block time by setting ``engine.universal['block'] = True``"""
if self.tmp_block: return eng = self.app.engine dial = self.dialoglayout if eng.universal.get('block'): Logger.info("MainScreen: next_turn blocked, delete universal['block'] to unblock") return if dial.idx < len(dial.todo): Logger.info("MainScreen: not advancing time while there's a dialog") return self.tmp_block = True self.app.unbind( branch=self.app._push_time, turn=self.app._push_time, tick=self.app._push_time ) eng.next_turn(cb=self._update_from_next_turn)
def unset_role(username, role, **kwargs): """ Remove role from username. username Username for role removal role Role to remove no_save_config If True, don't save configuration commands to startup configuration. If False, save configuration to startup configuration. Default: False .. code-block:: bash salt '*' nxos.cmd unset_role username=daniel role=vdc-admin """
role_line = 'no username {0} role {1}'.format(username, role) return config(role_line, **kwargs)
async def get_supported_methods(self): """Get information about supported methods. Calling this as the first thing before doing anything else is necessary to fill the available services table. """
response = await self.request_supported_methods() if "result" in response: services = response["result"][0] _LOGGER.debug("Got %s services!" % len(services)) for x in services: serv = await Service.from_payload( x, self.endpoint, self.idgen, self.debug, self.force_protocol ) if serv is not None: self.services[x["service"]] = serv else: _LOGGER.warning("Unable to create service %s", x["service"]) for service in self.services.values(): if self.debug > 1: _LOGGER.debug("Service %s", service) for api in service.methods: # self.logger.debug("%s > %s" % (service, api)) if self.debug > 1: _LOGGER.debug("> %s" % api) return self.services return None
def ekacli(handle, segno, column, ivals, entszs, nlflgs, rcptrs, wkindx): """ Add an entire integer column to an EK segment. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacli_c.html :param handle: EK file handle. :type handle: int :param segno: Number of segment to add column to. :type segno: int :param column: Column name. :type column: str :param ivals: Integer values to add to column. :type ivals: Array of ints :type entszs: Array of ints :param nlflgs: Array of null flags for column entries. :type nlflgs: Array of bools :param rcptrs: Record pointers for segment. :type rcptrs: Array of ints :param wkindx: Work space for column index. :type wkindx: Array of ints :return: Work space for column index. :rtype: Array of ints """
handle = ctypes.c_int(handle) segno = ctypes.c_int(segno) column = stypes.stringToCharP(column) ivals = stypes.toIntVector(ivals) entszs = stypes.toIntVector(entszs) nlflgs = stypes.toIntVector(nlflgs) rcptrs = stypes.toIntVector(rcptrs) wkindx = stypes.toIntVector(wkindx) libspice.ekacli_c(handle, segno, column, ivals, entszs, nlflgs, rcptrs, wkindx) return stypes.cVectorToPython(wkindx)
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1): """As above, but for when the correlation coefficient matching method is used """
template_mean = np.mean(template) template_minus_mean = template - template_mean template_norm = np.linalg.norm(template_minus_mean) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return normalised_matches.keys()
def diagonalize_blocks(arr, blocksize): """ Diagonalize sections of columns of an array for the whole array Parameters ---------- arr : numpy array Input array blocksize : int number of rows/colums forming one block Returns ------- numpy ndarray with shape (columns 'arr' * blocksize, columns 'arr' * blocksize) Example -------- arr: output: (blocksize = 3) 3 1 3 0 0 1 0 0 4 2 0 4 0 0 2 0 5 3 0 0 5 0 0 3 6 9 6 0 0 9 0 0 7 6 0 7 0 0 6 0 8 4 0 0 8 0 0 4 """
nr_col = arr.shape[1] nr_row = arr.shape[0] if np.mod(nr_row, blocksize): raise ValueError( 'Number of rows of input array must be a multiple of blocksize') arr_diag = np.zeros((nr_row, blocksize*nr_col)) for col_ind, col_val in enumerate(arr.T): col_start = col_ind*blocksize col_end = blocksize + col_ind*blocksize for _ind in range(int(nr_row/blocksize)): row_start = _ind*blocksize row_end = blocksize + _ind * blocksize arr_diag[row_start:row_end, col_start:col_end] = np.diag(col_val[row_start:row_end]) return arr_diag
def stitch_map(tiles, width, height, bbox, dpi): """ Merge tiles together into one image. Args: tiles (list of dict of file): tiles for each layer width (float): page width in mm height (height): page height in mm dpi (dpi): resolution in dots per inch Returns: PIL.Image: merged map. """
size = (int(width * dpi_to_dpmm(dpi)), int(height * dpi_to_dpmm(dpi))) background = Image.new('RGBA', size, (255, 255, 255)) for layer in tiles: layer_img = Image.new("RGBA", size) for (x, y), tile_path in layer.items(): tile = Image.open(tile_path) layer_img.paste(tile, ((x - bbox.min.x) * TILE_SIZE, (y - bbox.min.y) * TILE_SIZE)) background = Image.alpha_composite(background, layer_img) add_scales_bar(background, bbox) return background.convert("RGB")
def add_property_to_response(self, code='200', prop_name='data', **kwargs): """Add a property (http://json-schema.org/latest/json-schema-validation.html#anchor64) # noqa: E501 to the schema of the response identified by the code"""
self['responses'] \ .setdefault(str(code), self._new_operation()) \ .setdefault('schema', {'type': 'object'}) \ .setdefault('properties', {}) \ .setdefault(prop_name, {}) \ .update(**kwargs)
def render_to_console(self, message: str, **kwargs): """ Renders the specified message to the console using Jinja2 template rendering with the kwargs as render variables. The message will also be dedented prior to rendering in the same fashion as other Cauldron template rendering actions. :param message: Template string to be rendered. :param kwargs: Variables to be used in rendering the template. """
rendered = templating.render(message, **kwargs) return self.write_to_console(rendered)
def generic_visit(self, node: ast.AST) -> None: """Raise an exception that this node has not been handled."""
raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
def thermal_expansion_coeff(self, structure, temperature, mode="debye"): """ Gets thermal expansion coefficient from third-order constants. Args: temperature (float): Temperature in kelvin, if not specified will return non-cv-normalized value structure (Structure): Structure to be used in directional heat capacity determination, only necessary if temperature is specified mode (string): mode for finding average heat-capacity, current supported modes are 'debye' and 'dulong-petit' """
soec = ElasticTensor(self[0]) v0 = (structure.volume * 1e-30 / structure.num_sites) if mode == "debye": td = soec.debye_temperature(structure) t_ratio = temperature / td integrand = lambda x: (x**4 * np.exp(x)) / (np.exp(x) - 1)**2 cv = 9 * 8.314 * t_ratio**3 * quad(integrand, 0, t_ratio**-1)[0] elif mode == "dulong-petit": cv = 3 * 8.314 else: raise ValueError("Mode must be debye or dulong-petit") tgt = self.get_tgt(temperature, structure) alpha = np.einsum('ijkl,ij', soec.compliance_tensor, tgt) alpha *= cv / (1e9 * v0 * 6.022e23) return SquareTensor(alpha)
def logical_chassis_fwdl_sanity_output_fwdl_cmd_msg(self, **kwargs): """Auto Generated Code """
config = ET.Element("config") logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity") config = logical_chassis_fwdl_sanity output = ET.SubElement(logical_chassis_fwdl_sanity, "output") fwdl_cmd_msg = ET.SubElement(output, "fwdl-cmd-msg") fwdl_cmd_msg.text = kwargs.pop('fwdl_cmd_msg') callback = kwargs.pop('callback', self._callback) return callback(config)
def do_checkout(self, subcmd, opts, *args): """Check out a working copy from a repository. usage: checkout URL... [PATH] Note: If PATH is omitted, the basename of the URL will be used as the destination. If multiple URLs are given each will be checked out into a sub-directory of PATH, with the name of the sub-directory being the basename of the URL. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
def reset_pw_confirm_view(request, uidb64=None, token=None): """ View to confirm resetting password. """
return password_reset_confirm(request, template_name="reset_confirmation.html", uidb64=uidb64, token=token, post_reset_redirect=reverse('login'))
def _run_atexit(): """Hook frameworks must invoke this after the main hook body has successfully completed. Do not invoke it if the hook fails."""
global _atexit for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:]
def on_ok(self, sender): """ This callback is called when one task reaches status `S_OK`. It executes on_all_ok when all tasks in self have reached `S_OK`. """
logger.debug("in on_ok with sender %s" % sender) if self.all_ok: if self.finalized: return AttrDict(returncode=0, message="Work has been already finalized") else: # Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work) self.finalized = True try: results = AttrDict(**self.on_all_ok()) except Exception as exc: self.history.critical("on_all_ok raises %s" % str(exc)) self.finalized = False raise # Signal to possible observers that the `Work` reached S_OK self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self)) if self._finalized: self.send_signal(self.S_OK) return results return AttrDict(returncode=1, message="Not all tasks are OK!")
def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): """A high-level wrapper to map_port() that requests a mapping for a public UDP port on the NAT to a private UDP port on this host. Returns the complete response on success. public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True. """
return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime, gateway_ip=gateway_ip, retry=retry, use_exception=use_exception)
def pack(self): """ Pack this exception into a serializable dictionary that is safe for transport via msgpack """
if six.PY3: return {'message': six.text_type(self), 'args': self.args} return dict(message=self.__unicode__(), args=self.args)
def confusion_matrix(self): """ Returns the normalised confusion matrix """
confusion_matrix = self.pixel_classification_sum.astype(np.float) confusion_matrix = np.divide(confusion_matrix.T, self.pixel_truth_sum.T).T return confusion_matrix * 100.0
def attributes(self): """Return sync attributes."""
attr = { 'name': self.name, 'id': self.sync_id, 'network_id': self.network_id, 'serial': self.serial, 'status': self.status, 'region': self.region, 'region_id': self.region_id, } return attr
def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None): """ Remotely send an email template to multiple email addresses. http://docs.sailthru.com/api/send @param template: template string @param emails: List with email values or comma separated email string @param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself @param options: optional dictionary to include replyto and/or test keys @param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion """
_vars = _vars or {} evars = evars or {} options = options or {} data = {'template': template, 'email': ','.join(emails) if isinstance(emails, list) else emails, 'vars': _vars.copy(), 'evars': evars.copy(), 'options': options.copy()} if schedule_time is not None: data['schedule_time'] = schedule_time return self.api_post('send', data)
def log_response( self, response: Response, trim_log_values: bool = False, **kwargs: Any ) -> None: """ Log a response. Note this is different to log_request, in that it takes a Response object, not a string. Args: response: The Response object to log. Note this is different to log_request which takes a string. trim_log_values: Log an abbreviated version of the response. """
return log_(response.text, response_log, "info", trim=trim_log_values, **kwargs)
def change_number_matches(self, current_match=0, total_matches=0): """Change number of match and total matches."""
if current_match and total_matches: matches_string = u"{} {} {}".format(current_match, _(u"of"), total_matches) self.number_matches_text.setText(matches_string) elif total_matches: matches_string = u"{} {}".format(total_matches, _(u"matches")) self.number_matches_text.setText(matches_string) else: self.number_matches_text.setText(_(u"no matches"))
def volume_create(self, name, size=100, snapshot=None, voltype=None, availability_zone=None): """ Create a block device """
if self.volume_conn is None: raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn response = nt_ks.volumes.create( size=size, display_name=name, volume_type=voltype, snapshot_id=snapshot, availability_zone=availability_zone ) return self._volume_get(response.id)
def click_info_switch(f): """Decorator to create eager Click info switch option, as described in: http://click.pocoo.org/6/options/#callbacks-and-eager-options. Takes a no-argument function and abstracts the boilerplate required by Click (value checking, exit on done). Example: @click.option('--my-option', is_flag=True, callback=my_option, expose_value=False, is_eager=True) def test(): pass @click_info_switch def my_option() click.echo('some info related to my switch') """
@wraps(f) def wrapped(ctx, param, value): if not value or ctx.resilient_parsing: return f() ctx.exit() return wrapped
def wrap_results_for_axis(self): """ return the results for the rows """
results = self.results result = self.obj._constructor(data=results) if not isinstance(results[0], ABCSeries): try: result.index = self.res_columns except ValueError: pass try: result.columns = self.res_index except ValueError: pass return result
def _check_callback(callback): """ Turns a callback that is potentially a class into a callable object. Args: callback (object): An object that might be a class, method, or function. if the object is a class, this creates an instance of it. Raises: ValueError: If an instance can't be created or it isn't a callable object. TypeError: If the class requires arguments to be instantiated. Returns: callable: A callable object suitable for use as the consumer callback. """
# If the callback is a class, create an instance of it first if inspect.isclass(callback): callback_object = callback() if not callable(callback_object): raise ValueError( "Callback must be a class that implements __call__ or a function." ) elif callable(callback): callback_object = callback else: raise ValueError( "Callback must be a class that implements __call__ or a function." ) return callback_object
def auto_invalidate(self): """ Invalidate the cache if the current time is past the time to live. """
current = datetime.now() if current > self._invalidated + timedelta(seconds=self._timetolive): self.invalidate()
def _construct_retry(method_config, retry_codes, retry_params, retry_names): """Helper for ``construct_settings()``. Args: method_config (dict): A dictionary representing a single ``methods`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_codes (dict): A dictionary parsed from the ``retry_codes`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_params (dict): A dictionary parsed from the ``retry_params`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_names (dict): A dictionary mapping the string names used in the standard API client config file to API response status codes. Returns: Optional[RetryOptions]: The retry options, if applicable. """
if method_config is None: return None codes = None if retry_codes and 'retry_codes_name' in method_config: codes_name = method_config['retry_codes_name'] if codes_name in retry_codes and retry_codes[codes_name]: codes = [retry_names[name] for name in retry_codes[codes_name]] else: codes = [] backoff_settings = None if retry_params and 'retry_params_name' in method_config: params_name = method_config['retry_params_name'] if params_name and params_name in retry_params: backoff_settings = gax.BackoffSettings(**retry_params[params_name]) return gax.RetryOptions( backoff_settings=backoff_settings, retry_codes=codes, )
def create(self, request, *args, **kwargs): """ A new customer can only be created: - by users with staff privilege (is_staff=True); - by organization owners if OWNER_CAN_MANAGE_CUSTOMER is set to True; Example of a valid request: .. code-block:: http POST /api/customers/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "name": "Customer A", "native_name": "Customer A", "abbreviation": "CA", "contact_details": "Luhamaa 28, 10128 Tallinn", } """
return super(CustomerViewSet, self).create(request, *args, **kwargs)
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True): """ Concatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series, DataFrame, or Panel objects If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing inner/outer set logic. ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. .. versionadded:: 0.23.0 copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- Series.append : Concatenate Series. DataFrame.append : Concatenate DataFrames. DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] """
op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort) return op.get_result()
def parse_json_structure(string_item): """ Given a raw representation of a json structure, returns the parsed corresponding data structure (``JsonRpcRequest`` or ``JsonRpcRequestBatch``) :param string_item: :return: """
if not isinstance(string_item, str): raise TypeError("Expected str but got {} instead".format(type(string_item).__name__)) try: item = json.loads(string_item) except json.JSONDecodeError: raise JsonRpcParseError() if isinstance(item, dict): return JsonRpcRequest.from_dict(item) elif isinstance(item, list): if len(item) == 0: raise JsonRpcInvalidRequestError() request_batch = JsonRpcRequestBatch([]) for d in item: try: # handles the case of valid batch but with invalid # requests. if not isinstance(d, dict): raise JsonRpcInvalidRequestError() # is dict, all fine parsed_entry = JsonRpcRequest.from_dict(d) except JsonRpcInvalidRequestError: parsed_entry = GenericResponse.INVALID_REQUEST request_batch.add_item(parsed_entry) return request_batch
def _setbitpos(self, pos): """Move to absolute postion bit in bitstream."""
if pos < 0: raise ValueError("Bit position cannot be negative.") if pos > self.len: raise ValueError("Cannot seek past the end of the data.") self._pos = pos
def _get_headers(self, route: str, annotation: ResourceAnnotation) -> Dict: """Gets headers for the provided route. :param route: The route to get example values for. :type route: werkzeug.routing.Rule for a flask api. :param annotation: Schema annotation for the method to be requested. :type annotation: doctor.resource.ResourceAnnotation :retruns: A dict containing headers. """
headers = self.headers.copy() defined_header_values = self.defined_header_values.get( (annotation.http_method.lower(), str(route))) if defined_header_values is not None: if defined_header_values['update']: headers.update(defined_header_values['values']) else: headers = defined_header_values['values'] return headers
def parseAndSave(option, urlOrPaths, outDir=None, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar, responseMimeType='application/json', metaExtension='_meta.json', services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta'}): """ Parse the objects and write extracted metadata and/or text in JSON format to matching filename with an extension of '_meta.json'. :param option: :param urlOrPaths: :param outDir: :param serverEndpoint: :param verbose: :param tikaServerJar: :param responseMimeType: :param metaExtension: :param services: :return: """
metaPaths = [] paths = getPaths(urlOrPaths) for path in paths: if outDir is None: metaPath = path + metaExtension else: metaPath = os.path.join(outDir, os.path.split(path)[1] + metaExtension) log.info('Writing %s' % metaPath) with open(metaPath, 'w', 'utf-8') as f: f.write(parse1(option, path, serverEndpoint, verbose, tikaServerJar, \ responseMimeType, services)[1] + u"\n") metaPaths.append(metaPath) return metaPaths
def __make_message(self, topic, content): """ Prepares the message content """
return {"uid": str(uuid.uuid4()).replace('-', '').upper(), "topic": topic, "content": content}
def download_from_s3(self,bucket_name,s3_key, output_filename='temp_zappa_settings.py'): """ Download a file from S3 :param bucket_name: Name of the S3 bucket (string) :param s3_key: Name of the file hosted on S3 (string) :param output_filename: Name of the file the download operation will create (string) :return: False or the value of output_filename """
s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) try: s3.meta.client.head_object(Bucket=bucket_name,Key=s3_key) except botocore.exceptions.ClientError: return False print(u'Downloading the settings file ({0}) from S3'.format(s3_key)) new_file = bucket.download_file(s3_key,output_filename) return output_filename
def check_cgroup_availability_in_thread(options): """ Run check_cgroup_availability() in a separate thread to detect the following problem: If "cgexec --sticky" is used to tell cgrulesengd to not interfere with our child processes, the sticky flag unfortunately works only for processes spawned by the main thread, not those spawned by other threads (and this will happen if "benchexec -N" is used). """
thread = _CheckCgroupsThread(options) thread.start() thread.join() if thread.error: raise thread.error
def add_listener(self, listener): """Add the given listener to the wrapped client. The listener will be wrapped, so that it will be called in the reactor thread. This way, it can safely use Twisted APIs. """
internal_listener = partial(self._call_in_reactor_thread, listener) self._internal_listeners[listener] = internal_listener return self._client.add_listener(internal_listener)
def reference_transcripts_for_variant( variant, transcript_id_whitelist=None, only_coding_changes=True): """ For a given variant, find all the transcripts which overlap the variant and for which it has a predictable effect on the amino acid sequence of the protein. """
predicted_effects = predicted_effects_for_variant( variant=variant, transcript_id_whitelist=transcript_id_whitelist, only_coding_changes=only_coding_changes) return [effect.transcript for effect in predicted_effects]
def add_to_public_members(self, public_member): """ :calls: `PUT /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_ :param public_member: :class:`github.NamedUser.NamedUser` :rtype: None """
assert isinstance(public_member, github.NamedUser.NamedUser), public_member headers, data = self._requester.requestJsonAndCheck( "PUT", self.url + "/public_members/" + public_member._identity )
def get_assessment_basic_authoring_session_for_bank(self, bank_id, proxy): """Gets the ``OsidSession`` associated with the assessment authoring service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of a bank arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentBasicAuthoringSession) - an ``AssessmentBasicAuthoringSession`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_basic_authoring()`` or ``supports_visibe_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_basic_authoring()`` and ``supports_visibe_federation()`` is ``true``.* """
if not self.supports_assessment_basic_authoring(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentBasicAuthoringSession(bank_id, proxy, self._runtime)
def aggregate(self, other=None): """collect the furthest failure from self and other."""
if not self.status: return self if not other: return self if not other.status: return other return Value(True, other.index, self.value + other.value, None)
def on_vm_process_priority_change(self, priority): """Triggered when process priority of the associated virtual machine have changed. in priority of type :class:`VMProcPriority` The priority which set. raises :class:`VBoxErrorInvalidVmState` Session state prevents operation. raises :class:`VBoxErrorInvalidObjectState` Session type prevents operation. raises :class:`VBoxErrorVmError` Error from underlying level. See additional error info. """
if not isinstance(priority, VMProcPriority): raise TypeError("priority can only be an instance of type VMProcPriority") self._call("onVMProcessPriorityChange", in_p=[priority])
def phisheye_term_list(self, include_inactive=False, **kwargs): """Provides a list of terms that are set up for this account. This call is not charged against your API usage limit. NOTE: The terms must be configured in the PhishEye web interface: https://research.domaintools.com/phisheye. There is no API call to set up the terms. """
return self._results('phisheye_term_list', '/v1/phisheye/term-list', include_inactive=include_inactive, items_path=('terms', ), **kwargs)
def get_jobs(self, full=None, limit=None, skip=None, start=None, end=None, output_format=None): """List jobs belonging to a specific user."""
method = 'GET' endpoint = '/rest/v1/{}/jobs'.format(self.client.sauce_username) data = {} if full is not None: data['full'] = full if limit is not None: data['limit'] = limit if skip is not None: data['skip'] = skip if start is not None: data['from'] = start if end is not None: data['to'] = end if output_format is not None: data['format'] = output_format if data: endpoint = '?'.join([endpoint, urlencode(data)]) return self.client.request(method, endpoint)
def cols_to_numeric(df, col_list,dest = False): """ Coerces a list of columns to numeric Parameters: df - DataFrame DataFrame to operate on col_list - list of strings names of columns to coerce dest - bool, default False Whether to apply the result to the DataFrame or return it. True is apply, False is return. """
if not dest: return _pd.DataFrame({col_name:col_to_numeric(df,col_name) for col_name in col_list}) for col_name in col_list: col_to_numeric(df,col_name,dest)
def set_scene_name(self, scene_id, name): """rename a scene by scene ID"""
if not scene_id in self.state.scenes: # does that scene_id exist? err_msg = "Requested to rename scene {sceneNum}, which does not exist".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(name=name) # TODO: is there a better solution? sequence_number = self.zmq_publisher.publish_scene_name(scene_id, name) logging.debug("Renamed scene {sceneNum}".format(sceneNum=scene_id)) return (True, sequence_number, "OK")
def remove(self, interval): """ Returns self after removing the interval and balancing. If interval is not present, raise ValueError. """
# since this is a list, called methods can set this to [1], # making it true done = [] return self.remove_interval_helper(interval, done, should_raise_error=True)
def valueAt(self, axes, point): """ Returns the values for each axis at the given point within this renderer's axis rectangle. :param axes | [<XChartAxis>, ..] point | <QPointF> :return {<str> axis name: <variant> value} """
rect = self._buildData.get('axis_rect') if not rect: return dict([(axis.name(), None) for axis in axes]) try: x_perc = (point.x() - rect.left()) / (rect.right() - rect.left()) except ZeroDivisionError: x_perc = 0.0 try: y_perc = (rect.bottom() - point.y()) / (rect.bottom() - rect.top()) except ZeroDivisionError: y_perc = 0.0 out = {} for axis in axes: if axis.orientation() == Qt.Vertical: out[axis.name()] = axis.valueAt(y_perc) else: out[axis.name()] = axis.valueAt(x_perc) return out
def normalize_pred_string(predstr): """ Normalize the predicate string *predstr* to a conventional form. This makes predicate strings more consistent by removing quotes and the `_rel` suffix, and by lowercasing them. Examples: >>> normalize_pred_string('"_dog_n_1_rel"') '_dog_n_1' >>> normalize_pred_string('_dog_n_1') '_dog_n_1' """
tokens = [t for t in split_pred_string(predstr)[:3] if t is not None] if predstr.lstrip('\'"')[:1] == '_': tokens = [''] + tokens return '_'.join(tokens).lower()
def get_field_callback(self, field, event): # type: (str, str) -> Optional[Tuple[Callable, bool]] """ Retrieves the registered method for the given event. Returns None if not found :param field: Name of the dependency field :param event: A component life cycle event :return: A 2-tuple containing the callback associated to the given event and flag indicating if the callback must be called in valid state only """
try: return self.factory_context.field_callbacks[field][event] except KeyError: return None
def _GetResourceTimestamps(self, pefile_object): """Retrieves timestamps from resource directory entries, if available. Args: pefile_object (pefile.PE): pefile object. Returns: list[int]: resource timestamps. """
timestamps = [] if not hasattr(pefile_object, 'DIRECTORY_ENTRY_RESOURCE'): return timestamps for entrydata in pefile_object.DIRECTORY_ENTRY_RESOURCE.entries: directory = entrydata.directory timestamp = getattr(directory, 'TimeDateStamp', 0) if timestamp: timestamps.append(timestamp) return timestamps
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TaskQueueStatisticsContext for this TaskQueueStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext """
if self._context is None: self._context = TaskQueueStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], ) return self._context