text
stringlengths 2
999k
|
|---|
# coding: utf-8
# Copyright 2015 Eezee-It
import json
import logging
from hashlib import sha256
import urlparse
from odoo import models, fields, api
from odoo.tools.float_utils import float_compare
from odoo.tools.translate import _
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment_sips.controllers.main import SipsController
_logger = logging.getLogger(__name__)
CURRENCY_CODES = {
'EUR': '978',
'USD': '840',
'CHF': '756',
'GBP': '826',
'CAD': '124',
'JPY': '392',
'MXN': '484',
'TRY': '949',
'AUD': '036',
'NZD': '554',
'NOK': '578',
'BRL': '986',
'ARS': '032',
'KHR': '116',
'TWD': '901',
}
class AcquirerSips(models.Model):
_inherit = 'payment.acquirer'
provider = fields.Selection(selection_add=[('sips', 'Sips')])
sips_merchant_id = fields.Char('SIPS API User Password', required_if_provider='sips', groups='base.group_user')
sips_secret = fields.Char('SIPS Secret', size=64, required_if_provider='sips', groups='base.group_user')
def _get_sips_urls(self, environment):
""" Worldline SIPS URLS """
url = {
'prod': 'https://payment-webinit.sips-atos.com/paymentInit',
'test': 'https://payment-webinit.simu.sips-atos.com/paymentInit', }
return {'sips_form_url': url.get(environment, url['test']), }
def _sips_generate_shasign(self, values):
""" Generate the shasign for incoming or outgoing communications.
:param dict values: transaction values
:return string: shasign
"""
if self.provider != 'sips':
raise ValidationError(_('Incorrect payment acquirer provider'))
data = values['Data']
# Test key provided by Worldine
key = u'002001000000001_KEY1'
if self.environment == 'prod':
key = getattr(self, 'sips_secret')
shasign = sha256(data + key)
return shasign.hexdigest()
@api.multi
def sips_form_generate_values(self, values):
self.ensure_one()
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
currency = self.env['res.currency'].sudo().browse(values['currency_id'])
currency_code = CURRENCY_CODES.get(currency.name, False)
if not currency_code:
raise ValidationError(_('Currency not supported by Wordline'))
amount = int(values['amount'] * 100)
if self.environment == 'prod':
# For production environment, key version 2 is required
merchant_id = getattr(self, 'sips_merchant_id')
key_version = '2'
else:
# Test key provided by Atos Wordline works only with version 1
merchant_id = '002001000000001'
key_version = '1'
sips_tx_values = dict(values)
sips_tx_values.update({
'Data': u'amount=%s|' % amount +
u'currencyCode=%s|' % currency_code +
u'merchantId=%s|' % merchant_id +
u'normalReturnUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) +
u'automaticResponseUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) +
u'transactionReference=%s|' % values['reference'] +
u'statementReference=%s|' % values['reference'] +
u'keyVersion=%s' % key_version,
'InterfaceVersion': 'HP_2.3',
})
return_context = {}
if sips_tx_values.get('return_url'):
return_context[u'return_url'] = u'%s' % sips_tx_values.pop('return_url')
return_context[u'reference'] = u'%s' % sips_tx_values['reference']
sips_tx_values['Data'] += u'|returnContext=%s' % (json.dumps(return_context))
shasign = self._sips_generate_shasign(sips_tx_values)
sips_tx_values['Seal'] = shasign
return sips_tx_values
@api.multi
def sips_get_form_action_url(self):
self.ensure_one()
return self._get_sips_urls(self.environment)['sips_form_url']
class TxSips(models.Model):
_inherit = 'payment.transaction'
_sips_valid_tx_status = ['00']
_sips_wait_tx_status = ['90', '99']
_sips_refused_tx_status = ['05', '14', '34', '54', '75', '97']
_sips_error_tx_status = ['03', '12', '24', '25', '30', '40', '51', '63', '94']
_sips_pending_tx_status = ['60']
_sips_cancel_tx_status = ['17']
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _sips_data_to_object(self, data):
res = {}
for element in data.split('|'):
element_split = element.split('=')
res[element_split[0]] = element_split[1]
return res
@api.model
def _sips_form_get_tx_from_data(self, data):
""" Given a data dict coming from sips, verify it and find the related
transaction record. """
data = self._sips_data_to_object(data.get('Data'))
reference = data.get('transactionReference')
if not reference:
custom = json.loads(data.pop('returnContext', False) or '{}')
reference = custom.get('reference')
payment_tx = self.search([('reference', '=', reference)])
if not payment_tx or len(payment_tx) > 1:
error_msg = _('Sips: received data for reference %s') % reference
if not payment_tx:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.error(error_msg)
raise ValidationError(error_msg)
return payment_tx
@api.multi
def _sips_form_get_invalid_parameters(self, data):
invalid_parameters = []
data = self._sips_data_to_object(data.get('Data'))
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if self.acquirer_reference and data.get('transactionReference') != self.acquirer_reference:
invalid_parameters.append(('transactionReference', data.get('transactionReference'), self.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')) / 100, self.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % self.amount))
if self.partner_reference and data.get('customerId') != self.partner_reference:
invalid_parameters.append(('customerId', data.get('customerId'), self.partner_reference))
return invalid_parameters
@api.multi
def _sips_form_validate(self, data):
data = self._sips_data_to_object(data.get('Data'))
status = data.get('responseCode')
data = {
'acquirer_reference': data.get('transactionReference'),
'partner_reference': data.get('customerId'),
'date_validate': data.get('transactionDateTime',
fields.Datetime.now())
}
res = False
if status in self._sips_valid_tx_status:
msg = 'Payment for tx ref: %s, got response [%s], set as done.' % \
(self.reference, status)
_logger.info(msg)
data.update(state='done', state_message=msg)
res = True
elif status in self._sips_error_tx_status:
msg = 'Payment for tx ref: %s, got response [%s], set as ' \
'error.' % (self.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_wait_tx_status:
msg = 'Received wait status for payment ref: %s, got response ' \
'[%s], set as error.' % (self.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_refused_tx_status:
msg = 'Received refused status for payment ref: %s, got response' \
' [%s], set as error.' % (self.reference, status)
data.update(state='error', state_message=msg)
elif status in self._sips_pending_tx_status:
msg = 'Payment ref: %s, got response [%s] set as pending.' \
% (self.reference, status)
data.update(state='pending', state_message=msg)
elif status in self._sips_cancel_tx_status:
msg = 'Received notification for payment ref: %s, got response ' \
'[%s], set as cancel.' % (self.reference, status)
data.update(state='cancel', state_message=msg)
else:
msg = 'Received unrecognized status for payment ref: %s, got ' \
'response [%s], set as error.' % (self.reference, status)
data.update(state='error', state_message=msg)
_logger.info(msg)
self.write(data)
return res
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Generalized Pareto distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# Dependency imports
import hypothesis as hp
import hypothesis.strategies as hps
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
# Pylint doesn't understand hps.composite.
# pylint: disable=no-value-for-parameter
@hps.composite
def generalized_paretos(draw, batch_shape=None):
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
constraints = dict(
loc=tfp_hps.identity_fn,
scale=tfp_hps.softplus_plus_eps(),
concentration=lambda x: tf.math.tanh(x) * 0.24) # <.25==safe for variance
params = draw(
tfp_hps.broadcasting_params(
batch_shape,
params_event_ndims=dict(loc=0, scale=0, concentration=0),
constraint_fn_for=constraints.get))
dist = tfd.GeneralizedPareto(validate_args=draw(hps.booleans()), **params)
if dist.batch_shape != batch_shape:
raise AssertionError('batch_shape mismatch: expect {} but got {}'.format(
batch_shape, dist))
return dist
@test_util.test_all_tf_execution_regimes
class GeneralizedParetoTest(test_util.TestCase):
@hp.given(generalized_paretos())
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testShape(self, dist):
# batch_shape == dist.batch_shape asserted in generalized_paretos()
self.assertEqual(dist.batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testLogPDF(self, dist):
xs = self.evaluate(dist.sample())
logp = dist.log_prob(xs)
self.assertEqual(dist.batch_shape, logp.shape)
p = dist.prob(xs)
self.assertEqual(dist.batch_shape, p.shape)
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
expected_logp = sp_stats.genpareto(conc, loc=loc, scale=scale).logpdf(xs)
actual_logp = self.evaluate(logp)
self.assertAllClose(expected_logp, actual_logp, rtol=1e-5)
self.assertAllClose(np.exp(expected_logp), self.evaluate(p), rtol=1e-5)
def testLogPDFBoundary(self):
# When loc = concentration = 0, we have an exponential distribution. Check
# that at 0 we have finite log prob.
scale = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
dist = tfd.GeneralizedPareto(loc=0, scale=scale, concentration=0)
log_pdf = dist.log_prob(0.)
self.assertAllClose(-np.log(scale), self.evaluate(log_pdf), rtol=1e-5)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testCDF(self, dist):
xs = self.evaluate(dist.sample())
cdf = dist.cdf(xs)
self.assertEqual(dist.batch_shape, cdf.shape)
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
expected_cdf = sp_stats.genpareto(conc, loc=loc, scale=scale).cdf(xs)
self.assertAllClose(expected_cdf, self.evaluate(cdf), rtol=5e-5)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testMean(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
self.assertEqual(dist.batch_shape, dist.mean().shape)
if np.abs(conc) < 1e-5 and conc != 0:
return # scipy does badly at small nonzero concentrations.
expected = sp_stats.genpareto(conc, loc=loc, scale=scale).mean()
actual = self.evaluate(dist.mean())
self.assertAllClose(expected, actual, rtol=5e-4)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testVariance(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
self.assertEqual(dist.batch_shape, dist.variance().shape)
expected = sp_stats.genpareto(conc, loc=loc, scale=scale).var()
if np.abs(conc) < 1e-4 and conc != 0:
return # scipy does badly at small nonzero concentrations.
if expected <= 0:
return # scipy sometimes returns nonsense zero or negative variances.
actual = self.evaluate(dist.variance())
print('var', loc, scale, conc, expected, actual, file=sys.stderr)
self.assertAllClose(expected, actual, rtol=.01)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testEntropy(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
self.assertEqual(dist.batch_shape, dist.entropy().shape)
expected = sp_stats.genpareto.entropy(conc, loc=loc, scale=scale)
actual = self.evaluate(dist.entropy())
self.assertAllClose(expected, actual)
def testSample(self):
loc = np.float32(-7.5)
scale = np.float32(3.5)
conc = np.float32(0.07)
n = 100000
dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc)
samples = dist.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertTrue(self._kstest(loc, scale, conc, sample_values))
self.assertAllClose(
sp_stats.genpareto.mean(conc, loc=loc, scale=scale),
sample_values.mean(),
rtol=.005)
self.assertAllClose(
sp_stats.genpareto.var(conc, loc=loc, scale=scale),
sample_values.var(),
rtol=.01)
def testFullyReparameterized(self):
loc = tf.constant(4.0)
scale = tf.constant(3.0)
conc = tf.constant(2.0)
_, grads = tfp.math.value_and_gradient(
lambda *args: tfd.GeneralizedPareto(*args).sample(100),
[loc, scale, conc])
self.assertLen(grads, 3)
self.assertAllNotNone(grads)
def testSampleKolmogorovSmirnovMultiDimensional(self):
loc = np.linspace(-10, 10, 3).reshape(3, 1, 1)
scale = np.linspace(1e-6, 7, 5).reshape(5, 1)
conc = np.linspace(-1.3, 1.3, 7)
dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc)
n = 10000
samples = dist.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 3, 5, 7), samples.shape)
self.assertEqual((n, 3, 5, 7), sample_values.shape)
fails = 0
trials = 0
for li, l in enumerate(loc.reshape(-1)):
for si, s in enumerate(scale.reshape(-1)):
for ci, c in enumerate(conc.reshape(-1)):
samps = sample_values[:, li, si, ci]
trials += 1
fails += 0 if self._kstest(l, s, c, samps) else 1
self.assertLess(fails, trials * 0.01)
def _kstest(self, loc, scale, conc, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = sp_stats.kstest(samples,
sp_stats.genpareto(conc, loc=loc, scale=scale).cdf)
# Return True when the test passes.
return ks < 0.02
def testPdfOfSampleMultiDims(self):
dist = tfd.GeneralizedPareto(
loc=0, scale=[[2.], [3.]], concentration=[-.37, .11])
num = 50000
samples = dist.sample(num, seed=test_util.test_seed())
pdfs = dist.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual((num, 2, 2), samples.shape)
self.assertEqual((num, 2, 2), pdfs.shape)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testNonPositiveInitializationParamsRaises(self):
scale = tf.constant(0.0, name='scale')
with self.assertRaisesOpError('Argument `scale` must be positive.'):
dist = tfd.GeneralizedPareto(
loc=0, scale=scale, concentration=1, validate_args=True)
self.evaluate(dist.mean())
def testGradientThroughConcentration(self):
concentration = tf.Variable(3.)
d = tfd.GeneralizedPareto(loc=0, scale=1, concentration=concentration)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grad = tape.gradient(loss, d.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
def testAssertsPositiveScale(self):
scale = tf.Variable([1., 2., -3.])
self.evaluate(scale.initializer)
with self.assertRaisesOpError('Argument `scale` must be positive.'):
d = tfd.GeneralizedPareto(
loc=0, scale=scale, concentration=1, validate_args=True)
self.evaluate(d.sample())
def testAssertsPositiveScaleAfterMutation(self):
scale = tf.Variable([1., 2., 3.])
self.evaluate(scale.initializer)
d = tfd.GeneralizedPareto(
loc=0, scale=scale, concentration=0.25, validate_args=True)
self.evaluate(d.mean())
with self.assertRaisesOpError('Argument `scale` must be positive.'):
with tf.control_dependencies([scale.assign([1., 2., -3.])]):
self.evaluate(d.sample())
def testGradientThroughLocScale(self):
loc = tf.Variable(1.)
scale = tf.Variable(2.5)
d = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=.15)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grads = tape.gradient(loss, d.trainable_variables)
self.assertLen(grads, 2)
self.assertAllNotNone(grads)
if __name__ == '__main__':
tf.test.main()
|
#
# vect3dotfun.py
# Dot product of two 3-d vectors using function in Python 3.7
#
# Sparisoma Viridi | https://github.com/dudung
#
# 20210110
# 2001 Start creating this example.
# 2002 Test it and ok.
#
# Define dot function with two arguments
def dot(a, b):
p = a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
return p
# Define two vector using array
r1 = [1, 2, 3]
r2 = [2, 2, 9]
# Calculate dot product of two vectors
p = dot(r1, r2)
# Display result
print("r1 = ", r1, sep="");
print("r2 = ", r2, sep="");
print("p = r1 \xb7 r2 = ", p, sep="");
|
from typing import Any, Dict, Union, Optional
from dataclasses import asdict, dataclass
Headers = Optional[Dict[str, Union[str, bool, int]]]
@dataclass
class APIGatewayProxyResult:
"""
Key names are expected and given by AWS APIGateway specifications and must not be changed
"""
statusCode: int
body: Union[str, Dict[str, Any]]
headers: Headers = None
multiValueHeaders: Headers = None
isBase64Encoded: Optional[bool] = None
def asdict(self):
return {k: v for k, v in asdict(self).items() if v is not None}
|
class Status:
OK = "OK"
ERROR = "ERROR"
class Response(dict):
def __init__(self, status, data):
super().__init__()
self["status"] = status
self["data"] = data
|
# Copyright (c) 2022 Andreas Törnkvist | MIT License
import math
class worldfile:
def __init__(self, filename):
wFile = open(filename)
w = wFile.readlines()
w = [line.rstrip() for line in w]
self.A = float(w[0])
self.D = float(w[1])
self.B = float(w[2])
self.E = float(w[3])
self.C = float(w[4])
self.F = float(w[5])
Xv = math.atan(self.D/self.A)
Yv = math.atan(self.B/self.E)
self.Xx = (math.cos(Xv) ** 2) / self.A
self.Xy = (math.cos(Xv) * math.sin(Xv)) / self.A
self.Yy = (math.cos(Yv) ** 2) / self.E
self.Yx = (math.cos(Yv) * math.sin(Yv)) / self.E
def coordToPx(self, lon, lat):
Dx = lon - self.C
Dy = lat - self.F
Px = (Dx * self.Xx) + (Dy * self.Yx)
Py = (Dx * self.Xy) + (Dy * self.Yy)
return(Px, Py)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""A parser for Relay's text format."""
from __future__ import absolute_import
import sys
from ast import literal_eval
from collections import deque
import tvm
from . import module
from .base import Span, SourceName
from . import expr
from . import ty
from . import op
PYTHON_VERSION = sys.version_info.major
try:
from .grammar.py3.RelayVisitor import RelayVisitor
from .grammar.py3.RelayParser import RelayParser
from .grammar.py3.RelayLexer import RelayLexer
except ImportError:
raise Exception("Couldn't find ANTLR parser. Try building with USE_ANTLR=ON.")
try:
from antlr4 import InputStream, CommonTokenStream
from antlr4.error.ErrorListener import ErrorListener
except ImportError:
raise Exception("Couldn't find ANTLR runtime." +
"Try running `pip{version} install antlr4-python{version}-runtime`."
.format(version=PYTHON_VERSION))
sys.setrecursionlimit(10000)
class ParseError(Exception):
"""Exception type for parse errors."""
def __init__(self, message):
# type: (str) -> None
super(ParseError, self).__init__()
self.message = message
def __repr__(self):
return "ParseError({})".format(self.message)
def __str__(self):
return repr(self)
class OpWrapper:
"""Overload the __call__ for op."""
pass
class ExprOp(OpWrapper):
"""Call an expr. The default, but does not handle attrs well."""
def __init__(self, operator):
self.operator = operator
def __call__(self, args, attrs, type_args):
try:
return expr.Call(self.operator, args, attrs, type_args)
except Exception:
raise Exception("Operator {} is not registered. It's attributes are {}"
.format(self.operator, attrs))
class FuncOp(OpWrapper):
"""Convert the attrs, call the python function with the attrs passed in as keyword arguments.
Tvm should provide this in the future, as this is pretty similar to what op.get is providing.
"""
def __init__(self, operator):
self.operator = operator
def convert(self, v):
if isinstance(v, tuple):
return tuple([self.convert(x) for x in v])
if isinstance(v, expr.Constant):
return v.data.asnumpy().item()
if isinstance(v, str):
return v
raise Exception(v)
def __call__(self, args, attrs, type_args):
if attrs is None:
attrs = {}
x = self.operator(*args, **{k: self.convert(v) for k, v in attrs.items()})
if isinstance(x, expr.TupleWrapper):
x = x.astuple()
return x
BINARY_OPS = {
RelayParser.MUL: op.multiply,
RelayParser.DIV: op.divide,
RelayParser.ADD: op.add,
RelayParser.SUB: op.subtract,
RelayParser.LT: op.less,
RelayParser.GT: op.greater,
RelayParser.LE: op.less_equal,
RelayParser.GE: op.greater_equal,
RelayParser.EQ: op.equal,
RelayParser.NE: op.not_equal,
}
FUNC_OPS = {
"nn.conv2d": op.nn.conv2d,
"nn.batch_norm": op.nn.batch_norm,
"nn.dense": op.nn.dense,
"nn.bias_add": op.nn.bias_add,
"nn.max_pool2d": op.nn.max_pool2d,
"nn.global_max_pool2d": op.nn.global_max_pool2d,
"nn.avg_pool2d": op.nn.avg_pool2d,
"nn.global_avg_pool2d": op.nn.global_avg_pool2d,
"nn.softmax": op.nn.softmax,
"reshape": op.reshape,
"nn.conv2d_transpose": op.nn.conv2d_transpose,
"concatenate": op.concatenate,
"nn.dropout": op.nn.dropout_raw,
"zeros": op.zeros,
"split": op.split,
"cast": op.cast
}
TYPE_PREFIXES = [
"int",
"uint",
"float",
"bool",
]
T = ty.TypeVar("T")
# Scope = Deque[Tuple[str, T]]
# Scopes = Deque[Scope[T]]
def lookup(scopes, name):
# type: (Scopes[T], str) -> Optional[T]
"""Look up `name` in `scopes`."""
for scope in scopes:
for key, val in scope:
if key == name:
return val
return None
def spanify(f):
"""A decorator which attaches span information
to the value returned by calling `f`.
Intended for use with the below AST visiting
methods. The idea is that after we do the work
of constructing the AST we attach Span information.
"""
def _wrapper(*args, **kwargs):
# Assumes 0th arg is self and gets source_name from object.
sn = args[0].source_name
# Assumes 1st arg is an ANTLR parser context.
ctx = args[1]
ast = f(*args, **kwargs)
line, col = ctx.getSourceInterval()
sp = Span(sn, line, col)
if isinstance(ast, tvm.relay.expr.TupleWrapper):
ast = ast.astuple()
ast.set_span(sp)
return ast
return _wrapper
# TODO(@jmp): Use https://stackoverflow.com/q/13889941
# to figure out how to get ANTLR4 to be more unhappy about syntax errors
class ParseTreeToRelayIR(RelayVisitor):
"""Parse Relay text format into Relay IR."""
def __init__(self, source_name):
# type: (str) -> None
self.source_name = source_name
self.module = module.Module({}) # type: module.Module
# Adding an empty scope allows naked lets without pain.
self.var_scopes = deque([deque()]) # type: Scopes[expr.Var]
self.global_var_scope = deque() # type: Scope[expr.GlobalVar]
self.type_param_scopes = deque([deque()]) # type: Scopes[ty.TypeVar]
self.graph_expr = [] # type: List[expr.Expr]
super(ParseTreeToRelayIR, self).__init__()
def enter_var_scope(self):
# type: () -> None
"""Enter a new Var scope so it can be popped off later."""
self.var_scopes.appendleft(deque())
def exit_var_scope(self):
# type: () -> Scope[expr.Var]
"""Pop off the current Var scope and return it."""
return self.var_scopes.popleft()
def mk_var(self, name, type_):
# type: (str, ty.Type) -> expr.Var
"""Create a new Var and add it to the Var scope."""
var = expr.Var(name, type_)
self.var_scopes[0].appendleft((name, var))
return var
def mk_global_var(self, name):
# type: (str) -> expr.GlobalVar
"""Create a new GlobalVar and add it to the GlobalVar scope."""
var = expr.GlobalVar(name)
self.global_var_scope.append((name, var))
return var
def enter_type_param_scope(self):
# type: () -> None
"""Enter a new TypeVar scope so it can be popped off later."""
self.type_param_scopes.appendleft(deque())
def exit_type_param_scope(self):
# type: () -> Scope[ty.TypeVar]
"""Pop off the current TypeVar scope and return it."""
return self.type_param_scopes.popleft()
def mk_typ(self, name, kind):
# (str, ty.Kind) -> ty.TypeVar
"""Create a new TypeVar and add it to the TypeVar scope."""
typ = ty.TypeVar(name, kind)
self.type_param_scopes[0].appendleft((name, typ))
return typ
def visitProjection(self, ctx):
return expr.TupleGetItem(self.visit(ctx.expr()), self.visit(ctx.NAT()))
def visitTerminal(self, node):
# type: (TerminalNode) -> Union[expr.Expr, int, float]
"""Visit lexer tokens that aren't ignored or visited by other functions."""
node_type = node.getSymbol().type
node_text = node.getText()
name = node_text[1:]
# variables
if node_type == RelayLexer.GLOBAL_VAR:
return lookup(deque([self.global_var_scope]), node_text[1:])
if node_type == RelayLexer.LOCAL_VAR:
# Remove the leading '%' and lookup the name.
var = lookup(self.var_scopes, name)
if var is None:
raise ParseError("Couldn't resolve `{}`.".format(name))
return var
if node_type == RelayLexer.GRAPH_VAR:
try:
return self.graph_expr[int(name)]
except IndexError:
raise ParseError("Couldn't resolve `{}`".format(name))
# data types
if node_type == RelayLexer.NAT:
return int(node_text)
if node_type == RelayLexer.FLOAT:
return float(node_text[:-1])
if node_type == RelayLexer.BOOL_LIT:
if node_text == "True":
return True
if node_text == "False":
return False
raise ParseError("Unrecognized BOOL_LIT: `{}`".format(node_text))
if node_type == RelayLexer.QUOTED_STRING:
return literal_eval(node_text)
raise ParseError("todo: `{}`".format(node_text))
def visit_list(self, ctx_list):
# type: (List[ParserRuleContext]) -> List[Any]
""""Visit a list of contexts."""
assert isinstance(ctx_list, list)
return [self.visit(ctx) for ctx in ctx_list]
def getType_(self, ctx):
# type: (Optional[RelayParser.Type_Context]) -> Optional[ty.Type]
"""Return a (possibly None) Relay type."""
if ctx is None:
return None
return self.visit(ctx)
def visitProg(self, ctx):
self.meta = None
if ctx.METADATA():
header, data = str(ctx.METADATA()).split('\n', 1)
assert header == "METADATA:"
self.meta = tvm.load_json(data)
# type: (RelayParser.ProgContext) -> Union[expr.Expr, module.Module]
if ctx.defn():
self.visit_list(ctx.defn())
return self.module
if ctx.expr():
return self.visit(ctx.expr())
return self.module
# Exprs
def visitOpIdent(self, ctx):
# type: (RelayParser.OpIdentContext) -> op.Op
op_name = ctx.CNAME().getText()
if op_name in FUNC_OPS:
return FuncOp(FUNC_OPS[op_name])
return ExprOp(op.get(op_name))
# pass through
def visitParen(self, ctx):
# type: (RelayParser.ParenContext) -> expr.Expr
return self.visit(ctx.expr())
# pass through
def visitBody(self, ctx):
# type: (RelayParser.BodyContext) -> expr.Expr
return self.visit(ctx.expr())
def visitScalarFloat(self, ctx):
# type: (RelayParser.ScalarFloatContext) -> expr.Constant
return expr.const(self.visit(ctx.FLOAT()))
def visitScalarInt(self, ctx):
# type: (RelayParser.ScalarIntContext) -> expr.Constant
return expr.const(self.visit(ctx.NAT()))
def visitScalarBool(self, ctx):
# type: (RelayParser.ScalarBoolContext) -> expr.Constant
return expr.const(self.visit(ctx.BOOL_LIT()))
def visitNeg(self, ctx):
# type: (RelayParser.NegContext) -> Union[expr.Constant, expr.Call]
val = self.visit(ctx.expr())
if isinstance(val, expr.Constant) and val.data.asnumpy().ndim == 0:
# fold Neg in for scalars
return expr.const(-val.data.asnumpy().item())
return op.negative(val)
def visitTuple(self, ctx):
# type: (RelayParser.TupleContext) -> expr.Tuple
tup = self.visit_list(ctx.expr())
return expr.Tuple(tup)
def visitLet(self, ctx):
# type: (RelayParser.SeqContext) -> expr.Let
"""Desugar various sequence constructs to Relay Let nodes."""
if ctx.var() is None:
# anonymous identity
ident = "_"
type_ = None
var = self.mk_var(ident, type_)
else:
var = self.visitVar(ctx.var())
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
body = self.visit(ctx.expr(1))
return expr.Let(var, value, body)
def visitBinOp(self, ctx):
# type: (RelayParser.BinOpContext) -> expr.Call
"""Desugar binary operators."""
arg0, arg1 = self.visit_list(ctx.expr())
relay_op = BINARY_OPS.get(ctx.op.type)
if relay_op is None:
raise ParseError("Unimplemented binary op.")
return relay_op(arg0, arg1)
@spanify
def visitVar(self, ctx):
# type: (RelayParser.VarContext) -> expr.Var
"""Visit a single variable."""
ident = ctx.LOCAL_VAR()
if ident is None:
raise ParseError("Only local ids may be used in vars.")
type_ = self.getType_(ctx.type_())
return self.mk_var(ident.getText()[1:], type_)
def visitVarList(self, ctx):
# type: (RelayParser.VarListContext) -> List[expr.Var]
return self.visit_list(ctx.var())
# TODO: support a larger class of values than just Relay exprs
def visitAttr(self, ctx):
# type: (RelayParser.AttrContext) -> Tuple[str, expr.Expr]
return (ctx.CNAME().getText(), self.visit(ctx.expr()))
def visitArgNoAttr(self, ctx):
return (self.visit_list(ctx.varList().var()), None)
def visitAttrSeq(self, ctx):
# type: (RelayParser.AttrListContext) -> Dict[str, expr.Expr]
return dict(self.visit_list(ctx.attr()))
def visitArgWithAttr(self, ctx):
return (self.visit_list(ctx.var()), self.visitAttrSeq(ctx.attrSeq()))
def visitArgList(self,
ctx # type: RelayParser.ArgListContext
):
# type: (...) -> Tuple[Optional[List[expr.Var]], Optional[Dict[str, expr.Expr]]]
var_list = self.visit(ctx.varList()) if ctx.varList() else None
attr_list = self.visit(ctx.attrList()) if ctx.attrList() else None
return (var_list, attr_list)
def visitMeta(self, ctx):
type_key = str(ctx.CNAME())
index = int(self.visit(ctx.NAT()))
return self.meta[type_key][index]
def mk_func(self, ctx):
# type: (Union[RelayParser.FuncContext, RelayParser.DefnContext]) -> expr.Function
"""Construct a function from either a Func or Defn."""
# Enter var scope early to put params in scope.
self.enter_var_scope()
# Capture type params in params.
self.enter_type_param_scope()
type_params = ctx.typeParamList()
if type_params is not None:
type_params = type_params.ident()
assert type_params
for ty_param in type_params:
name = ty_param.getText()
self.mk_typ(name, ty.Kind.Type)
var_list, attr_list = self.visit(ctx.argList())
if var_list is None:
var_list = []
ret_type = self.getType_(ctx.type_())
body = self.visit(ctx.body())
# NB(@jroesch): you must stay in the type parameter scope until
# after you exit the body, you can reference the type parameters
# of your parent scopes.
type_params = list(self.exit_type_param_scope())
if type_params:
_, type_params = zip(*type_params)
self.exit_var_scope()
attrs = tvm.make.node("DictAttrs", **attr_list) if attr_list is not None else None
return expr.Function(var_list, body, ret_type, type_params, attrs)
@spanify
def visitFunc(self, ctx):
# type: (RelayParser.FuncContext) -> expr.Function
return self.mk_func(ctx)
# TODO: how to set spans for definitions?
# @spanify
def visitDefn(self, ctx):
# type: (RelayParser.DefnContext) -> None
ident = ctx.ident().GLOBAL_VAR()
if ident is None:
raise ParseError("Only global ids may be used in `def`s.")
ident_name = ident.getText()[1:]
ident = self.mk_global_var(ident_name)
self.module[ident] = self.mk_func(ctx)
def visitCallNoAttr(self, ctx):
return (self.visit_list(ctx.exprList().expr()), None)
def visitCallWithAttr(self, ctx):
return (self.visit_list(ctx.expr()), self.visit(ctx.attrSeq()))
def call(self, func, args, attrs, type_args):
if isinstance(func, OpWrapper):
return func(args, attrs, type_args)
return expr.Call(func, args, attrs, type_args)
@spanify
def visitCall(self, ctx):
# type: (RelayParser.CallContext) -> expr.Call
func = self.visit(ctx.expr())
args, attrs = self.visit(ctx.callList())
return self.call(func, args, attrs, [])
@spanify
def visitIfElse(self, ctx):
# type: (RelayParser.IfElseContext) -> expr.If
"""Construct a Relay If node. Creates a new scope for each branch."""
cond = self.visit(ctx.expr())
self.enter_var_scope()
true_branch = self.visit(ctx.body(0))
self.exit_var_scope()
self.enter_var_scope()
false_branch = self.visit(ctx.body(1))
self.exit_var_scope()
return expr.If(cond, true_branch, false_branch)
@spanify
def visitGraph(self, ctx):
# type: (RelayParser.GraphContext) -> expr.Expr
"""Visit a graph variable assignment."""
graph_nid = int(ctx.GRAPH_VAR().getText()[1:])
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
if graph_nid != len(self.graph_expr):
raise ParseError(
"Expected new graph variable to be `%{}`,".format(len(self.graph_expr)) + \
"but got `%{}`".format(graph_nid))
self.graph_expr.append(value)
kont = self.visit(ctx.expr(1))
return kont
# Types
# pylint: disable=unused-argument
def visitIncompleteType(self, ctx):
# type (RelayParser.IncompleteTypeContext) -> None:
return None
def visitTypeIdent(self, ctx):
# type: (RelayParser.TypeIdentContext) -> Union[ty.TensorType, str]
'''
Handle type identifier.
'''
type_ident = ctx.CNAME().getText()
# Look through all type prefixes for a match
for type_prefix in TYPE_PREFIXES:
if type_ident.startswith(type_prefix):
return ty.scalar_type(type_ident)
type_param = lookup(self.type_param_scopes, type_ident)
if type_param is not None:
return type_param
raise ParseError("Unknown builtin type: {}".format(type_ident))
# def visitCallType(self, ctx):
# # type: (RelayParser.CallTypeContext) -> Union[expr.Expr, ty.TensorType]
# ident_type = ctx.identType().CNAME().getText()
# args = self.visit_list(ctx.type_())
# if not args:
# raise ParseError("Type-level functions must have arguments!")
# func_type = TYPE_FUNCS.get(ident_type)(args)
# if func_type is None:
# raise ParseError("Unknown type-level function: `{}`".format(ident_type))
# else:
# return func_type
def visitParensShape(self, ctx):
# type: (RelayParser.ParensShapeContext) -> int
return self.visit(ctx.shape())
def visitShapeList(self, ctx):
# type: (RelayParser.ShapeListContext) -> List[int]
return self.visit_list(ctx.shape())
def visitTensor(self, ctx):
return tuple(self.visit_list(ctx.expr()))
def visitTensorType(self, ctx):
# type: (RelayParser.TensorTypeContext) -> ty.TensorType
"""Create a simple tensor type. No generics."""
shape = self.visit(ctx.shapeList())
dtype = self.visit(ctx.type_())
if not isinstance(dtype, ty.TensorType):
raise ParseError("Expected dtype to be a Relay base type.")
dtype = dtype.dtype
return ty.TensorType(shape, dtype)
def visitTupleType(self, ctx):
# type: (RelayParser.TupleTypeContext) -> ty.TupleType
return ty.TupleType(self.visit_list(ctx.type_()))
def visitFuncType(self, ctx):
# type: (RelayParser.FuncTypeContext) -> ty.FuncType
types = self.visit_list(ctx.type_())
arg_types = types[:-1]
ret_type = types[-1]
return ty.FuncType(arg_types, ret_type, [], None)
def make_parser(data):
# type: (str) -> RelayParser
"""Construct a RelayParser a given data stream."""
input_stream = InputStream(data)
lexer = RelayLexer(input_stream)
lexer.addErrorListener(StrictErrorListener(data))
token_stream = CommonTokenStream(lexer)
p = RelayParser(token_stream)
p.addErrorListener(StrictErrorListener(data))
return p
__source_name_counter__ = 0
class StrictErrorListener(ErrorListener):
"""This ErrorListener fail eagerly on all error, and report the program."""
def __init__(self, text):
self.text = text
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("Syntax Error in:\n" + self.text)
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise Exception("Ambiguity Error in:\n" + self.text)
def reportAttemptingFullContext(self,
recognizer,
dfa,
startIndex,
stopIndex,
conflictingAlts,
configs):
raise Exception("Attempting Full Context in:\n" + self.text)
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise Exception("Context Sensitivity in:\n" + self.text)
def fromtext(data, source_name=None):
# type: (str, str) -> Union[expr.Expr, module.Module]
"""Parse a Relay program."""
if data == "":
raise ParseError("Cannot parse the empty string.")
global __source_name_counter__
if source_name is None:
source_name = "source_file{0}".format(__source_name_counter__)
if isinstance(source_name, str):
source_name = SourceName(source_name)
tree = make_parser(data).prog()
return ParseTreeToRelayIR(source_name).visit(tree)
|
# Copyright (C) 2015 Stefan C. Mueller
import functools
from twisted.internet import defer
def on_error_close(logger):
"""
Decorator for callback methods that implement `IProtocol`.
Any uncaught exception is logged and the connection is closed
forcefully.
Usage::
import logger
logger = logging.getLogger(__name__)
class MyProtocol(Protocol):
@on_error_close(logger.error)
def connectionMade():
...
The argument passed to `on_error_close` will be invoked with a
string message.
The motivation behind this decorator is as follows:
Due to bugs it sometimes happens that exceptions are thrown out out
callback methods in protocols. Twisted ignores them, at best they
are logged. This is always a bug, as errors should be handled in the
callback and not let to continue up the call stack. As such, the
behaviour after this occured is typically not well defined and
unpredictable.
A well made protocol implementation can handle unexpected connection
losses as they may occur at any time in a real world environment.
By closing the connection, there is a certain chance
that we enter a code path that can recover, or at least gracefully
cleanup.
In my experience, this often means that unit-tests fail with a more
useful error message. Without it, I sometimes get the case that a
unit-test (or even the final application) just blocks forever
with no information on what is going wrong.
"""
def make_wrapper(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
d = defer.maybeDeferred(func, self, *args, **kwargs)
def on_error(err):
logger("Unhandled failure in %r:%s" % (func, err. getTraceback()))
if hasattr(self, "transport"):
if hasattr(self.transport, "abortConnection"):
self.transport.abortConnection()
elif hasattr(self.transport, "loseConnection"):
self.transport.loseConnection()
d.addErrback(on_error)
return wrapper
return make_wrapper
|
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Key(object):
@classmethod
def from_path(cls, *args, **kwds):
raise NotImplementedError("Paths are not currently supported")
def __init__(self, encoded=None, obj=None):
self.name = None
if obj:
self.id = obj.id
self.kind = obj.kind()
else:
self.id = None
self.kind = None
def app(self):
raise NotImplementedError("Applications are not currently supported")
def kind(self):
return self.kind
def id(self):
return self.id
def name(self):
raise NotImplementedError("Key Names are not currently supported")
def id_or_name(self):
return self.id
def has_id_or_name(self):
return self.id is not None
def parent(self):
raise NotImplementedError("Key parents are not currently supported")
def __str__(self):
return self.id_or_name()
|
# Copyright 2013 Lars Butler & individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tokenize
try:
import StringIO
except ImportError:
import io
StringIO = io
INVALID_WKT_FMT = 'Invalid WKT: `%s`'
def dump(obj, dest_file):
"""
Dump GeoJSON-like `dict` to WKT and write it to the `dest_file`.
:param dict obj:
A GeoJSON-like dictionary. It must at least the keys 'type' and
'coordinates'.
:param dest_file:
Open and writable file-like object.
"""
dest_file.write(dumps(obj))
def load(source_file):
"""
Load a GeoJSON `dict` object from a ``source_file`` containing WKT.
:param source_file:
Open and readable file-like object.
:returns:
A GeoJSON `dict` representing the geometry read from the file.
"""
return loads(source_file.read())
def dumps(obj, decimals=16):
"""
Dump a GeoJSON-like `dict` to a WKT string.
"""
geom_type = obj['type']
exporter = _dumps_registry.get(geom_type)
if exporter is None:
_unsupported_geom_type(geom_type)
fmt = '%%.%df' % decimals
return exporter(obj, fmt)
def loads(string):
"""
Construct a GeoJSON `dict` from WKT (`string`).
"""
sio = StringIO.StringIO(string)
# NOTE: This is not the intended purpose of `tokenize`, but it works.
tokens = (x[1] for x in tokenize.generate_tokens(sio.readline))
tokens = _tokenize_wkt(tokens)
geom_type = next(tokens)
importer = _loads_registry.get(geom_type)
if importer is None:
_unsupported_geom_type(geom_type)
return importer(tokens, string)
def _tokenize_wkt(tokens):
"""
Since the tokenizer treats "-" and numeric strings as separate values,
combine them and yield them as a single token. This utility encapsulates
parsing of negative numeric values from WKT can be used generically in all
parsers.
"""
negative = False
for t in tokens:
if t == '-':
negative = True
continue
else:
if negative:
yield '-%s' % t
else:
yield t
negative = False
def _unsupported_geom_type(geom_type):
raise ValueError("Unsupported geometry type '%s'" % geom_type)
def _dump_point(obj, fmt):
"""
Dump a GeoJSON-like Point object to WKT.
:param dict obj:
A GeoJSON-like `dict` representing a Point.
:param str fmt:
Format string which indicates the number of digits to display after the
decimal point when formatting coordinates.
:returns:
WKT representation of the input GeoJSON Point ``obj``.
"""
coords = obj['coordinates']
pt = 'POINT (%s)' % ' '.join(fmt % c for c in coords)
return pt
def _dump_linestring(obj, fmt):
"""
Dump a GeoJSON-like LineString object to WKT.
Input parameters and return value are the LINESTRING equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
ls = 'LINESTRING (%s)'
ls %= ', '.join(' '.join(fmt % c for c in pt) for pt in coords)
return ls
def _dump_polygon(obj, fmt):
"""
Dump a GeoJSON-like Polygon object to WKT.
Input parameters and return value are the POLYGON equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
poly = 'POLYGON (%s)'
rings = (', '.join(' '.join(fmt % c for c in pt) for pt in ring)
for ring in coords)
rings = ('(%s)' % r for r in rings)
poly %= ', '.join(rings)
return poly
def _dump_multipoint(obj, fmt):
"""
Dump a GeoJSON-like MultiPoint object to WKT.
Input parameters and return value are the MULTIPOINT equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mp = 'MULTIPOINT (%s)'
points = (' '.join(fmt % c for c in pt) for pt in coords)
# Add parens around each point.
points = ('(%s)' % pt for pt in points)
mp %= ', '.join(points)
return mp
def _dump_multilinestring(obj, fmt):
"""
Dump a GeoJSON-like MultiLineString object to WKT.
Input parameters and return value are the MULTILINESTRING equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mlls = 'MULTILINESTRING (%s)'
linestrs = ('(%s)' % ', '.join(' '.join(fmt % c for c in pt)
for pt in linestr) for linestr in coords)
mlls %= ', '.join(ls for ls in linestrs)
return mlls
def _dump_multipolygon(obj, fmt):
"""
Dump a GeoJSON-like MultiPolygon object to WKT.
Input parameters and return value are the MULTIPOLYGON equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mp = 'MULTIPOLYGON (%s)'
polys = (
# join the polygons in the multipolygon
', '.join(
# join the rings in a polygon,
# and wrap in parens
'(%s)' % ', '.join(
# join the points in a ring,
# and wrap in parens
'(%s)' % ', '.join(
# join coordinate values of a vertex
' '.join(fmt % c for c in pt)
for pt in ring)
for ring in poly)
for poly in coords)
)
mp %= polys
return mp
def _dump_geometrycollection(obj, fmt):
"""
Dump a GeoJSON-like GeometryCollection object to WKT.
Input parameters and return value are the GEOMETRYCOLLECTION equivalent to
:func:`_dump_point`.
The WKT conversions for each geometry in the collection are delegated to
their respective functions.
"""
gc = 'GEOMETRYCOLLECTION (%s)'
geoms = obj['geometries']
geoms_wkt = []
for geom in geoms:
geom_type = geom['type']
geoms_wkt.append(_dumps_registry.get(geom_type)(geom, fmt))
gc %= ','.join(geoms_wkt)
return gc
def _load_point(tokens, string):
"""
:param tokens:
A generator of string tokens for the input WKT, begining just after the
geometry type. The geometry type is consumed before we get to here. For
example, if :func:`loads` is called with the input 'POINT(0.0 1.0)',
``tokens`` would generate the following values:
.. code-block:: python
['(', '0.0', '1.0', ')']
:param str string:
The original WKT string.
:returns:
A GeoJSON `dict` Point representation of the WKT ``string``.
"""
if not next(tokens) == '(':
raise ValueError(INVALID_WKT_FMT % string)
coords = []
try:
for t in tokens:
if t == ')':
break
else:
coords.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='Point', coordinates=coords)
def _load_linestring(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling LINESTRING geometry.
:returns:
A GeoJSON `dict` LineString representation of the WKT ``string``.
"""
if not next(tokens) == '(':
raise ValueError(INVALID_WKT_FMT % string)
# a list of lists
# each member list represents a point
coords = []
try:
pt = []
for t in tokens:
if t == ')':
coords.append(pt)
break
elif t == ',':
# it's the end of the point
coords.append(pt)
pt = []
else:
pt.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='LineString', coordinates=coords)
def _load_polygon(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling POLYGON geometry.
:returns:
A GeoJSON `dict` Polygon representation of the WKT ``string``.
"""
open_parens = next(tokens), next(tokens)
if not open_parens == ('(', '('):
raise ValueError(INVALID_WKT_FMT % string)
# coords contains a list of rings
# each ring contains a list of points
# each point is a list of 2-4 values
coords = []
ring = []
on_ring = True
try:
pt = []
for t in tokens:
if t == ')' and on_ring:
# The ring is finished
ring.append(pt)
coords.append(ring)
on_ring = False
elif t == ')' and not on_ring:
# it's the end of the polygon
break
elif t == '(':
# it's a new ring
ring = []
pt = []
on_ring = True
elif t == ',' and on_ring:
# it's the end of a point
ring.append(pt)
pt = []
elif t == ',' and not on_ring:
# there's another ring.
# do nothing
pass
else:
pt.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='Polygon', coordinates=coords)
def _load_multipoint(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling MULTIPOINT geometry.
:returns:
A GeoJSON `dict` MultiPoint representation of the WKT ``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
coords = []
pt = []
paren_depth = 1
try:
for t in tokens:
if t == '(':
paren_depth += 1
elif t == ')':
paren_depth -= 1
if paren_depth == 0:
break
elif t == '':
pass
elif t == ',':
# the point is done
coords.append(pt)
pt = []
else:
pt.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
# Given the way we're parsing, we'll probably have to deal with the last
# point after the loop
if len(pt) > 0:
coords.append(pt)
return dict(type='MultiPoint', coordinates=coords)
def _load_multipolygon(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling MULTIPOLYGON geometry.
:returns:
A GeoJSON `dict` MultiPolygon representation of the WKT ``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
polygons = []
while True:
try:
poly = _load_polygon(tokens, string)
polygons.append(poly['coordinates'])
t = next(tokens)
if t == ')':
# we're done; no more polygons.
break
except StopIteration:
# If we reach this, the WKT is not valid.
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='MultiPolygon', coordinates=polygons)
def _load_multilinestring(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling MULTILINESTRING geometry.
:returns:
A GeoJSON `dict` MultiLineString representation of the WKT ``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
linestrs = []
while True:
try:
linestr = _load_linestring(tokens, string)
linestrs.append(linestr['coordinates'])
t = next(tokens)
if t == ')':
# we're done; no more linestrings.
break
except StopIteration:
# If we reach this, the WKT is not valid.
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='MultiLineString', coordinates=linestrs)
def _load_geometrycollection(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling GEOMETRYCOLLECTIONs.
Delegates parsing to the parsers for the individual geometry types.
:returns:
A GeoJSON `dict` GeometryCollection representation of the WKT
``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
geoms = []
result = dict(type='GeometryCollection', geometries=geoms)
while True:
try:
t = next(tokens)
if t == ')':
break
elif t == ',':
# another geometry still
continue
else:
geom_type = t
load_func = _loads_registry.get(geom_type)
geom = load_func(tokens, string)
geoms.append(geom)
except StopIteration:
raise ValueError(INVALID_WKT_FMT % string)
return result
_dumps_registry = {
'Point': _dump_point,
'LineString': _dump_linestring,
'Polygon': _dump_polygon,
'MultiPoint': _dump_multipoint,
'MultiLineString': _dump_multilinestring,
'MultiPolygon': _dump_multipolygon,
'GeometryCollection': _dump_geometrycollection,
}
_loads_registry = {
'POINT': _load_point,
'LINESTRING': _load_linestring,
'POLYGON': _load_polygon,
'MULTIPOINT': _load_multipoint,
'MULTILINESTRING': _load_multilinestring,
'MULTIPOLYGON': _load_multipolygon,
'GEOMETRYCOLLECTION': _load_geometrycollection,
}
|
import binascii
import re
import socket
from abc import ABCMeta
from hashlib import md5
from ipaddress import ip_network, _BaseNetwork
from typing import Iterable, Optional, Tuple, Generator, Dict, Iterator
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from djing.lib.decorators import LazyInitMetaclass
from gw_app.nas_managers import core
from gw_app.nas_managers import structs as i_structs
DEBUG = getattr(settings, 'DEBUG', False)
LIST_USERS_ALLOWED = 'DjingUsersAllowed'
LIST_DEVICES_ALLOWED = 'DjingDevicesAllowed'
class ApiRos(object):
"""Routeros api"""
__sk = None
is_login = False
def __init__(self, ip: str, port: int):
if self.__sk is None:
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.connect((ip, port or 8728))
self.__sk = sk
def login(self, username, pwd):
if self.is_login:
return
chal = None
for repl, attrs in self.talk_iter(("/login",)):
chal = binascii.unhexlify(attrs['=ret'])
md = md5()
md.update(b'\x00')
md.update(bytes(pwd, 'utf-8'))
md.update(chal)
for _ in self.talk_iter(("/login", "=name=" + username,
"=response=00" + binascii.hexlify(
md.digest()).decode('utf-8'))):
pass
self.is_login = True
def talk_iter(self, words: Iterable):
if self.write_sentence(words) == 0:
return
while 1:
i = self.read_sentence()
if len(i) == 0:
continue
reply = i[0]
attrs = {}
for w in i[1:]:
j = w.find('=', 1)
if j == -1:
attrs[w] = ''
else:
attrs[w[:j]] = w[j + 1:]
yield (reply, attrs)
if reply == '!done':
return
def write_sentence(self, words: Iterable):
ret = 0
for w in words:
self.write_word(w)
ret += 1
self.write_word('')
return ret
def read_sentence(self):
r = []
while 1:
w = self.read_word()
if w == '':
return r
r.append(w)
def write_word(self, w):
if DEBUG:
print("<<< " + w)
b = bytes(w, "utf-8")
self.write_len(len(b))
self.write_bytes(b)
def read_word(self):
ret = self.read_bytes(self.read_len()).decode('utf-8')
if DEBUG:
print(">>> " + ret)
return ret
def write_len(self, l):
if l < 0x80:
self.write_bytes(bytes((l,)))
elif l < 0x4000:
l |= 0x8000
self.write_bytes(bytes(((l >> 8) & 0xff, l & 0xff)))
elif l < 0x200000:
l |= 0xC00000
self.write_bytes(
bytes(((l >> 16) & 0xff, (l >> 8) & 0xff, l & 0xff)))
elif l < 0x10000000:
l |= 0xE0000000
self.write_bytes(bytes(((l >> 24) & 0xff, (l >> 16) & 0xff,
(l >> 8) & 0xff, l & 0xff)))
else:
self.write_bytes(bytes((0xf0, (l >> 24) & 0xff, (l >> 16) & 0xff,
(l >> 8) & 0xff, l & 0xff)))
def read_len(self):
c = self.read_bytes(1)[0]
if (c & 0x80) == 0x00:
pass
elif (c & 0xC0) == 0x80:
c &= ~0xC0
c <<= 8
c += self.read_bytes(1)[0]
elif (c & 0xE0) == 0xC0:
c &= ~0xE0
c <<= 8
c += self.read_bytes(1)[0]
c <<= 8
c += self.read_bytes(1)[0]
elif (c & 0xF0) == 0xE0:
c &= ~0xF0
c <<= 8
c += self.read_bytes(1)[0]
c <<= 8
c += self.read_bytes(1)[0]
c <<= 8
c += self.read_bytes(1)[0]
elif (c & 0xF8) == 0xF0:
c = self.read_bytes(1)[0]
c <<= 8
c += self.read_bytes(1)[0]
c <<= 8
c += self.read_bytes(1)[0]
c <<= 8
c += self.read_bytes(1)[0]
return c
def write_bytes(self, s):
n = 0
while n < len(s):
r = self.__sk.send(s[n:])
if r == 0:
raise core.NasFailedResult("connection closed by remote end")
n += r
def read_bytes(self, length):
ret = b''
while len(ret) < length:
s = self.__sk.recv(length - len(ret))
if len(s) == 0:
raise core.NasFailedResult("connection closed by remote end")
ret += s
return ret
def __del__(self):
if self.__sk is not None:
self.__sk.close()
class MikrotikTransmitter(core.BaseTransmitter, ApiRos,
metaclass=type('_ABC_Lazy_mcs',
(ABCMeta, LazyInitMetaclass), {})):
description = _('Mikrotik NAS')
def __init__(self, login: str, password: str, ip: str, port: int,
enabled: bool, *args, **kwargs):
if not enabled:
raise core.NasFailedResult(_('Gateway disabled'))
try:
core.BaseTransmitter.__init__(
self, login=login,
password=password,
ip=ip, port=port,
*args, **kwargs
)
ApiRos.__init__(self, ip, port)
self.login(username=login, pwd=password)
except ConnectionRefusedError:
raise core.NasNetworkError('Connection to %s is Refused' % ip)
def _exec_cmd(self, cmd: Iterable) -> Dict:
if not isinstance(cmd, (list, tuple)):
raise TypeError
r = dict()
for k, v in self.talk_iter(cmd):
if k == '!done':
break
elif k == '!trap':
raise core.NasFailedResult(v.get('=message'))
r[k] = v or None
return r
def _exec_cmd_iter(self, cmd: Iterable) -> Generator:
if not isinstance(cmd, (list, tuple)):
raise TypeError
for k, v in self.talk_iter(cmd):
if k == '!done':
break
elif k == '!trap':
raise core.NasFailedResult(v.get('=message'))
if v:
yield v
@staticmethod
def _build_shape_obj(info: Dict) -> i_structs.SubnetQueue:
# Переводим приставку скорости Mikrotik в Mbit/s
def parse_speed(text_speed):
text_speed_digit = float(text_speed[:-1] or 0.0)
text_append = text_speed[-1:]
if text_append == 'M':
res = text_speed_digit
elif text_append == 'k':
res = text_speed_digit / 1000
# elif text_append == 'G':
# res = text_speed_digit * 0x400
else:
res = float(re.sub(r'[a-zA-Z]', '', text_speed)) / 1000 ** 2
return res
speed_out, speed_in = info['=max-limit'].split('/')
speed_in = parse_speed(speed_in)
speed_out = parse_speed(speed_out)
try:
target = info.get('=target')
if target is None:
target = info.get('=target-addresses')
name = info.get('=name')
disabled = info.get('=disabled', False)
if disabled is not None:
disabled = True if disabled == 'true' else False
if target and name:
# target may be '192.168.0.3/32,192.168.0.2/32'
net = target.split(',')[0]
if not net:
return
a = i_structs.SubnetQueue(
name=name,
network=net,
max_limit=(speed_in, speed_out),
is_access=not disabled,
queue_id=info.get('=.id')
)
return a
except ValueError as e:
print('ValueError:', e)
#################################################
# QUEUES
#################################################
# Find queue by name
def find_queue(self, name: str) -> Optional[i_structs.SubnetQueue]:
r = self._exec_cmd(('/queue/simple/print', '?name=%s' % name))
if r:
return self._build_shape_obj(r.get('!re'))
def add_queue(self, queue: i_structs.SubnetQueue) -> None:
if not isinstance(queue, i_structs.SubnetQueue):
raise TypeError('queue must be instance of SubnetQueue')
return self._exec_cmd((
'/queue/simple/add',
'=name=%s' % queue.name,
# FIXME: тут в разных микротиках или =target-addresses или =target
'=target=%s' % queue.network,
'=max-limit=%.3fM/%.3fM' % queue.max_limit,
'=queue=Djing_pcq_up/Djing_pcq_down',
'=burst-time=1/5',
#'=total-queue=Djing_pcq_down'
))
def remove_queue(self, queue: i_structs.SubnetQueue) -> None:
if not isinstance(queue, i_structs.SubnetQueue):
raise TypeError
if not queue.queue_id:
queue = self.find_queue(queue.name)
if queue is not None:
if queue.queue_id:
self._exec_cmd((
'/queue/simple/remove',
'=.id=%s' % queue.queue_id
))
def remove_queue_range(self, q_ids: Iterable[str]):
ids = ','.join(q_ids)
if len(ids) > 1:
self._exec_cmd(('/queue/simple/remove', '=numbers=%s' % ids))
def update_queue(self, queue: i_structs.SubnetQueue):
if not isinstance(queue, i_structs.SubnetQueue):
raise TypeError
queue_gw = self.find_queue(queue.name)
if queue_gw is None:
return self.add_queue(queue)
else:
cmd = [
'/queue/simple/set',
'=name=%s' % queue.name,
'=max-limit=%.3fM/%.3fM' % queue.max_limit,
# FIXME: тут в разных версиях прошивки микротика
# или =target-addresses или =target
'=target=%s' % queue.network,
'=queue=Djing_pcq_up/Djing_pcq_down',
'=burst-time=1/1'
]
if queue.queue_id:
cmd.insert(1, '=.id=%s' % queue.queue_id)
r = self._exec_cmd(cmd)
return r
def read_queue_iter(self) -> Generator:
for dat in self._exec_cmd_iter(('/queue/simple/print', '=detail')):
sobj = self._build_shape_obj(dat)
if sobj is not None:
yield sobj
#################################################
# Ip->firewall->address list
#################################################
def add_ip(self, list_name: str, net):
if not issubclass(net.__class__, _BaseNetwork):
raise TypeError
commands = (
'/ip/firewall/address-list/add',
'=list=%s' % list_name,
'=address=%s' % net
)
return self._exec_cmd(commands)
def remove_ip(self, mk_id):
return self._exec_cmd((
'/ip/firewall/address-list/remove',
'=.id=%s' % mk_id
))
def remove_ip_range(self, ip_firewall_ids: Iterable[str]):
return self._exec_cmd((
'/ip/firewall/address-list/remove',
'=numbers=%s' % ','.join(ip_firewall_ids)
))
def find_ip(self, net, list_name: str):
if not issubclass(net.__class__, _BaseNetwork):
raise TypeError
if net.prefixlen == net.max_prefixlen:
ip = net.network_address
else:
ip = net.with_prefixlen
r = self._exec_cmd((
'/ip/firewall/address-list/print', 'where',
'?list=%s' % list_name,
'?address=%s' % ip
))
return r.get('!re')
def read_nets_iter(self, list_name: str) -> Generator:
nets = self._exec_cmd_iter((
'/ip/firewall/address-list/print', 'where',
'?list=%s' % list_name,
'?dynamic=no'
))
for dat in nets:
n = ip_network(dat.get('=address'))
n.queue_id = dat.get('=.id')
yield n
def update_ip(self, net):
if not issubclass(net.__class__, _BaseNetwork):
raise TypeError
res_net_gw = self.find_ip(net, LIST_USERS_ALLOWED)
if not res_net_gw:
self.add_ip(LIST_USERS_ALLOWED, net)
#################################################
# BaseTransmitter implementation
#################################################
def add_user_range(self, queue_list: i_structs.VectorQueue):
for q in queue_list:
self.add_user(q)
def remove_user_range(self, queues: i_structs.VectorQueue):
if not isinstance(queues, (tuple, list, set)):
raise ValueError('*users* is used twice, generator does not fit')
queue_ids = (q.queue_id for q in queues if q)
self.remove_queue_range(queue_ids)
for q in queues:
if isinstance(q, i_structs.SubnetQueue):
ip_list_entity = self.find_ip(q.network, LIST_USERS_ALLOWED)
if ip_list_entity:
self.remove_ip(ip_list_entity.get('=.id'))
def add_user(self, queue: i_structs.SubnetQueue, *args):
try:
self.add_queue(queue)
except core.NasFailedResult as e:
print('Error:', e)
net = queue.network
if not issubclass(net.__class__, _BaseNetwork):
raise TypeError
try:
self.add_ip(LIST_USERS_ALLOWED, net)
except core.NasFailedResult as e:
print('Error:', e)
def remove_user(self, queue: i_structs.SubnetQueue):
self.remove_queue(queue)
r = self.find_ip(queue.network, LIST_USERS_ALLOWED)
if r:
ip_id = r.get('=.id')
self.remove_ip(ip_id)
def update_user(self, queue: i_structs.SubnetQueue, *args):
if queue.is_access:
self.update_queue(queue)
self.update_ip(queue.network)
else:
self.remove_queue(queue)
res_ips = self.find_ip(queue.network, LIST_USERS_ALLOWED)
if res_ips:
self.remove_ip(res_ips.get('=.id'))
def ping(self, host, count=10) -> Optional[Tuple[int, int]]:
r = self._exec_cmd((
'/ip/arp/print',
'?address=%s' % host
))
if r == {}:
return
interface = r['!re'].get('=interface')
r = self._exec_cmd((
'/ping', '=address=%s' % host, '=arp-ping=yes', '=interval=100ms',
'=count=%d' % count,
'=interface=%s' % interface
))
res = r.get('!re')
if res is not None:
received, sent = int(res.get('=received')), int(res.get('=sent'))
return received, sent
def read_users(self) -> i_structs.VectorQueue:
return self.read_queue_iter()
def sync_nas(self, users_from_db: Iterator):
queues_from_db = (
ab.build_agent_struct() for ab in users_from_db
if ab is not None and ab.is_access()
)
queues_from_db = set(filter(lambda x: x is not None, queues_from_db))
queues_from_gw = self.read_queue_iter()
user_q_for_add, user_q_for_del = core.diff_set(queues_from_db,
set(queues_from_gw))
self.remove_queue_range(
(q.queue_id for q in user_q_for_del)
)
for q in user_q_for_add:
self.add_queue(q)
del user_q_for_add, user_q_for_del
# sync ip addrs list
db_nets = set(net.network for net in queues_from_db)
gw_nets = set(self.read_nets_iter(LIST_USERS_ALLOWED))
nets_add, nets_del = core.diff_set(db_nets, gw_nets)
self.remove_ip_range(
(q.queue_id for q in nets_del)
)
for q in nets_add:
self.add_ip(LIST_USERS_ALLOWED, q)
|
# coding: utf-8
"""
Fabric task for deploying project on servers(production, staging, development)
"""
import os
import sys
from contextlib import contextmanager
from fabric.contrib import django
from fabric.api import local, run, lcd, cd
from fabric.tasks import Task
from fab_settings import env
sys.path.append(os.path.dirname(__file__) + '/../../mysite/')
django.settings_module('mysite.settings')
STAGING_BRANCH = 'master'
BASE_PATH = os.path.dirname(__file__)
STAGING_HOST = 'staging.courselets.org'
def debug(*args, **kwargs):
output = ""
for x in args:
print(x)
output += str(x)
return output
@contextmanager
def debug_cd(path):
print("run on path:{0}".format(path))
yield
class Deploying(Task):
"""
Deploy project on Production
"""
func = local
func_cd = lcd
code_branch = STAGING_BRANCH
@property
def project_path(self):
return os.path.join(BASE_PATH, 'socraticqs2')
@property
def local_settings_path(self):
return os.path.join(self.project_path, '../settings')
def __virtualenv(self):
with self.func_cd(os.path.join(self.project_path, '../')):
self.func('source {}/bin/activate'.format(env.venv_name))
def update_requirements(self):
with self.func_cd(self.project_path):
self.func("sudo pip install -r requirements.txt")
def _get_settings(self, branch='master'):
with self.func_cd(self.local_settings_path):
self.func('git pull origin {0}'.format(branch))
self.func('cp production_conf.py ../socraticqs2/mysite/mysite/settings/production_conf.py')
def __restart_service(self):
self.func('sudo supervisorctl restart gunicorn')
self.func('sudo supervisorctl restart celery')
self.func('sudo service nginx restart')
@property
def __is_new_branch(self):
if self.func == run:
return self.code_branch in self.func('git branch')
else:
return self.code_branch in self.func('git branch', capture=True)
def __update(self):
if self.__is_new_branch:
self.func('git checkout {0} --force'.format(self.code_branch))
self.func('git pull origin {0} --force'.format(self.code_branch))
else:
self.func('git fetch origin')
self.func('git checkout -b {0} origin/{0}'.format(self.code_branch))
self._get_settings()
self.func('find . -name "*.pyc" -print -delete')
self.__virtualenv()
self.update_requirements()
with self.func_cd("mysite"):
self.func('python manage.py collectstatic --noinput')
self.func('python manage.py syncdb --noinput')
self.func('python manage.py fsm_deploy --noinput')
self.__restart_service()
def run(self, running='local', branch='master', suffix=None):
self.code_branch = branch
if running == 'local':
self.func = local
self.func_cd = lcd
self.__update()
elif running == 'remote':
self.func = run
self.func_cd = cd
env.hosts = [STAGING_HOST, ]
global BASE_PATH
BASE_PATH = env.project_root
with self.func_cd(self.project_path):
self.__update()
elif running == 'debug':
print("DEBUG:\n")
self.func = debug
self.func_cd = debug_cd
self.__update()
class Staging(Deploying):
"""Deploy on Staging"""
def _get_settings(self, branch='master'):
"""On dev/staging we don't use production settings"""
with self.func_cd(self.local_settings_path):
self.func('git pull origin {0} --force'.format(branch))
self.func('cp local_conf.py ../dev/socraticqs2/mysite/mysite/settings/local_conf.py')
class Development(Staging):
"""Deploy on Development server
Args:
running - deploy code local or in server(local/run)
branch - git branch name
Example:
fab deploy.dev:running='local', branch='dev'
"""
@property
def project_path(self):
if self.func == local:
return os.path.join(BASE_PATH, '../../../../dev')
else:
return os.path.join(BASE_PATH, 'dev/socraticqs2')
@property
def local_settings_path(self):
if self.func == local:
return os.path.join(self.project_path, '../settings')
else:
return os.path.join(self.project_path, '../../settings')
code_branch = 'dev'
prod = Deploying()
staging = Staging()
dev = Development()
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.license_license_tier_entitlements_exceeded_alert import LicenseLicenseTierEntitlementsExceededAlert # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestLicenseLicenseTierEntitlementsExceededAlert(unittest.TestCase):
"""LicenseLicenseTierEntitlementsExceededAlert unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLicenseLicenseTierEntitlementsExceededAlert(self):
"""Test LicenseLicenseTierEntitlementsExceededAlert"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.license_license_tier_entitlements_exceeded_alert.LicenseLicenseTierEntitlementsExceededAlert() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2020-2022 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from tests.lib.testcase import ConfluenceTestCase
from tests.lib.testcase import setup_builder
import os
class TestConfluenceMetadata(ConfluenceTestCase):
@classmethod
def setUpClass(cls):
super(TestConfluenceMetadata, cls).setUpClass()
cls.dataset = os.path.join(cls.datasets, 'metadata')
def test_confluence_metadata_directive_expected(self):
with self.prepare(self.dataset) as app:
app.build()
builder_metadata = app.builder.metadata
self.assertTrue(builder_metadata)
self.assertTrue('index' in builder_metadata)
doc_labels = builder_metadata['index']
self.assertTrue(doc_labels)
self.assertTrue('labels' in doc_labels)
labels = doc_labels['labels']
self.assertEqual(len(labels), 2)
self.assertTrue('tag-a' in labels)
self.assertTrue('tag-c' in labels)
@setup_builder('html')
def test_html_confluence_metadata_directive_ignore(self):
with self.prepare(self.dataset, relax=True) as app:
# build attempt should not throw an exception/error
app.build()
|
import asyncio
import websockets
import time
import threading
players = 0
class Player:
def __init__(self, id, x = 0, y = 0, speed = 5):
self.id = id
self.x = x
self.y = y
self.dirX = 0
self.dirY = 0
self.speed = speed
print("Player criado com sucesso!")
def setX(self, x):
self.x = x
def setY(self, y):
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
async def hello(websocket, path):
global players
jogador = Player(players, 500, 500)
async def moveUP():
while 1:
jogador.setY(jogador.getY()-jogador.speed)
websocket.send("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY()))
print("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY()))
time.sleep(1)
async def moveR():
while 1:
jogador.setX(jogador.getX()+jogador.speed)
await websocket.send("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY()))
print("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY()))
time.sleep(1)
def threadEvoque():
global players
loop = asyncio.new_event_loop()
task = loop.create_task(moveUP())
loop.run_until_complete(task)
players += 1
print(players)
def threadEvoque2():
global players
loop = asyncio.new_event_loop()
task2 = loop.create_task(moveR())
loop.run_until_complete(task2)
players += 1
print(players)
while 1:
msg = await websocket.recv()
print(msg)
if(msg == "start"):
players +=1
await websocket.send("spawn:"+str(players)+":"+ str(jogador.getX())+":"+str(jogador.getY()))
print("spawn:"+str(players)+":"+ str(jogador.getX())+":"+str(jogador.getY()))
start_server = websockets.serve(hello, "0.0.0.0", 8888)
print("Iniciando server...")
asyncio.get_event_loop().run_until_complete(start_server)
print("Sever em funcionamento!")
asyncio.get_event_loop().run_forever()
|
__author__ = 'pulphix'
from app import TestApplication
|
# coding: utf-8
from __future__ import unicode_literals
from django.contrib import admin
from .models import ThumbnailOption
from django.contrib.admin.widgets import AdminFileWidget
@admin.register(ThumbnailOption)
class ThumbnailOptionAdmin(admin.ModelAdmin):
fields = ['source', 'alias', 'options']
class ThumbnailOptionMixin(admin.ModelAdmin):
class Media:
pass
def media(self):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
try:
import os
import signal
import time
from conf import *
from lib import logger
from lib import mqtt
except Exception as e:
print(f"Import error: {str(e)} line {sys.exc_info()[-1].tb_lineno}, check requirements.txt")
sys.exit(1)
log = logger.Log("SignalTestcase", MI2_SHORTNAME, 10)
print(__name__, MI2_SHORTNAME, 20)
# store default handler of signal.SIGINT
# default_handler = signal.getsignal(signal.SIGINT)
def publish(state:str="Online"):
mqtt_client = mqtt.client()
if mqtt_client and mqtt_client.ready:
mqtt_client.publish_simple("tele/apptest/LWT", state, True)
def handler(signum, frame):
publish('Offline')
log.debug("Ende Application")
exit(0)
def main():
while True:
try:
pass
time.sleep(30)
except Exception as e:
Log.error(f"Error while running the script: {str(e)}, line {sys.exc_info()[-1].tb_lineno}")
if __name__ == "__main__":
log.debug("Start Application")
signal.signal(signal.SIGINT, handler)
publish('Online')
main()
|
from database.adatabase import ADatabase
import pandas as pd
class SEC(ADatabase):
def __init__(self):
super().__init__("sec")
def retrieve_num_data(self,adsh):
try:
db = self.client[self.name]
table = db["nums"]
data = table.find({"adsh":adsh},{"_id":0},show_record_id=False)
return pd.DataFrame(list(data))
except Exception as e:
print(str(e))
def retrieve_filing_data(self,cik):
try:
db = self.client[self.name]
table = db["filings"]
data = table.find({"cik":cik},{"_id":0},show_record_id=False)
return pd.DataFrame(list(data))
except Exception as e:
print(str(e))
def retrieve_adshs(self):
try:
db = self.client[self.name]
table = db["filings"]
data = table.find({},{"_id":0,"adsh":1},show_record_id=False)
return pd.DataFrame(list(data))
except Exception as e:
print(str(e))
|
import game_framework
from pico2d import *
import title_state
name = "StartState"
image = None
logo_time = 0.0
def enter():
global image
image = load_image('kpu_credit.png')
def exit():
global image
del(image)
def update():
global logo_time
if (logo_time > 1.0):
logo_time = 0.8
game_framework.change_state(title_state)
delay(0.01)
logo_time += 0.05
def draw():
global image
clear_canvas()
image.draw(400,300)
update_canvas()
def handle_events():
events = get_events()
pass
def pause(): pass
def resume(): pass
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 20:30:46 2020
@author: Aaronga
"""
# Datos faltantes
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("Data.csv")
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Tratamiento de los NaN
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values="NaN", strategy="mean", axis = 0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3]= imputer.transform(X[:,1:3])
print(X)
|
from sqlalchemy.testing import assert_raises, eq_
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import (
testing, exc, case, select, literal_column, text, and_, Integer, cast,
String, Column, Table, MetaData)
from sqlalchemy.sql import table, column
info_table = None
class CaseTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
metadata = MetaData(testing.db)
global info_table
info_table = Table(
'infos', metadata,
Column('pk', Integer, primary_key=True),
Column('info', String(30)))
info_table.create()
info_table.insert().execute(
{'pk': 1, 'info': 'pk_1_data'},
{'pk': 2, 'info': 'pk_2_data'},
{'pk': 3, 'info': 'pk_3_data'},
{'pk': 4, 'info': 'pk_4_data'},
{'pk': 5, 'info': 'pk_5_data'},
{'pk': 6, 'info': 'pk_6_data'})
@classmethod
def teardown_class(cls):
info_table.drop()
@testing.fails_on('firebird', 'FIXME: unknown')
@testing.requires.subqueries
def test_case(self):
inner = select(
[
case(
[
[info_table.c.pk < 3, 'lessthan3'],
[
and_(info_table.c.pk >= 3, info_table.c.pk < 7),
'gt3']]).label('x'),
info_table.c.pk, info_table.c.info], from_obj=[info_table])
inner_result = inner.execute().fetchall()
# Outputs:
# lessthan3 1 pk_1_data
# lessthan3 2 pk_2_data
# gt3 3 pk_3_data
# gt3 4 pk_4_data
# gt3 5 pk_5_data
# gt3 6 pk_6_data
assert inner_result == [
('lessthan3', 1, 'pk_1_data'),
('lessthan3', 2, 'pk_2_data'),
('gt3', 3, 'pk_3_data'),
('gt3', 4, 'pk_4_data'),
('gt3', 5, 'pk_5_data'),
('gt3', 6, 'pk_6_data')
]
outer = select([inner.alias('q_inner')])
outer_result = outer.execute().fetchall()
assert outer_result == [
('lessthan3', 1, 'pk_1_data'),
('lessthan3', 2, 'pk_2_data'),
('gt3', 3, 'pk_3_data'),
('gt3', 4, 'pk_4_data'),
('gt3', 5, 'pk_5_data'),
('gt3', 6, 'pk_6_data')
]
w_else = select(
[
case(
[
[info_table.c.pk < 3, cast(3, Integer)],
[
and_(
info_table.c.pk >= 3, info_table.c.pk < 6),
6]],
else_=0).label('x'),
info_table.c.pk, info_table.c.info],
from_obj=[info_table])
else_result = w_else.execute().fetchall()
assert else_result == [
(3, 1, 'pk_1_data'),
(3, 2, 'pk_2_data'),
(6, 3, 'pk_3_data'),
(6, 4, 'pk_4_data'),
(6, 5, 'pk_5_data'),
(0, 6, 'pk_6_data')
]
def test_literal_interpretation(self):
t = table('test', column('col1'))
assert_raises(exc.ArgumentError, case, [("x", "y")])
self.assert_compile(
case([("x", "y")], value=t.c.col1),
"CASE test.col1 WHEN :param_1 THEN :param_2 END")
self.assert_compile(
case([(t.c.col1 == 7, "y")], else_="z"),
"CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END")
def test_text_doesnt_explode(self):
for s in [
select(
[
case(
[
(
info_table.c.info == 'pk_4_data',
text("'yes'"))],
else_=text("'no'"))
]).order_by(info_table.c.info),
select(
[
case(
[
(
info_table.c.info == 'pk_4_data',
literal_column("'yes'"))],
else_=literal_column("'no'")
)]
).order_by(info_table.c.info),
]:
if testing.against("firebird"):
eq_(s.execute().fetchall(), [
('no ', ), ('no ', ), ('no ', ), ('yes', ),
('no ', ), ('no ', ),
])
else:
eq_(s.execute().fetchall(), [
('no', ), ('no', ), ('no', ), ('yes', ),
('no', ), ('no', ),
])
@testing.fails_on('firebird', 'FIXME: unknown')
def testcase_with_dict(self):
query = select(
[
case(
{
info_table.c.pk < 3: 'lessthan3',
info_table.c.pk >= 3: 'gt3',
}, else_='other'),
info_table.c.pk, info_table.c.info
],
from_obj=[info_table])
assert query.execute().fetchall() == [
('lessthan3', 1, 'pk_1_data'),
('lessthan3', 2, 'pk_2_data'),
('gt3', 3, 'pk_3_data'),
('gt3', 4, 'pk_4_data'),
('gt3', 5, 'pk_5_data'),
('gt3', 6, 'pk_6_data')
]
simple_query = select(
[
case(
{1: 'one', 2: 'two', },
value=info_table.c.pk, else_='other'),
info_table.c.pk
],
whereclause=info_table.c.pk < 4,
from_obj=[info_table])
assert simple_query.execute().fetchall() == [
('one', 1),
('two', 2),
('other', 3),
]
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import apache_beam as beam
from apache_beam.runners.interactive.user_pipeline_tracker import UserPipelineTracker
class UserPipelineTrackerTest(unittest.TestCase):
def test_getting_unknown_pid_returns_none(self):
ut = UserPipelineTracker()
p = beam.Pipeline()
self.assertIsNone(ut.get_pipeline(str(id(p))))
def test_getting_unknown_pipeline_returns_none(self):
ut = UserPipelineTracker()
p = beam.Pipeline()
self.assertIsNone(ut.get_user_pipeline(p))
def test_no_parent_returns_none(self):
ut = UserPipelineTracker()
user = beam.Pipeline()
derived = beam.Pipeline()
orphan = beam.Pipeline()
ut.add_derived_pipeline(user, derived)
self.assertIsNone(ut.get_user_pipeline(orphan))
def test_get_user_pipeline_is_same(self):
ut = UserPipelineTracker()
p = beam.Pipeline()
ut.add_user_pipeline(p)
self.assertIs(ut.get_user_pipeline(p), p)
def test_can_add_derived(self):
ut = UserPipelineTracker()
user = beam.Pipeline()
derived = beam.Pipeline()
ut.add_derived_pipeline(user, derived)
self.assertIs(ut.get_user_pipeline(derived), user)
def test_can_add_multiple_derived(self):
"""Tests that there can be many user pipelines with many derived
pipelines.
"""
ut = UserPipelineTracker()
# Add the first set of user and derived pipelines.
user1 = beam.Pipeline()
derived11 = beam.Pipeline()
derived12 = beam.Pipeline()
ut.add_derived_pipeline(user1, derived11)
ut.add_derived_pipeline(user1, derived12)
# Add the second set of user and derived pipelines.
user2 = beam.Pipeline()
derived21 = beam.Pipeline()
derived22 = beam.Pipeline()
ut.add_derived_pipeline(user2, derived21)
ut.add_derived_pipeline(user2, derived22)
# Assert that the user pipelines are correct.
self.assertIs(ut.get_user_pipeline(derived11), user1)
self.assertIs(ut.get_user_pipeline(derived12), user1)
self.assertIs(ut.get_user_pipeline(derived21), user2)
self.assertIs(ut.get_user_pipeline(derived22), user2)
def test_cannot_have_multiple_parents(self):
ut = UserPipelineTracker()
user1 = beam.Pipeline()
user2 = beam.Pipeline()
derived = beam.Pipeline()
ut.add_derived_pipeline(user1, derived)
with self.assertRaises(AssertionError):
ut.add_derived_pipeline(user2, derived)
self.assertIs(ut.get_user_pipeline(derived), user1)
def test_adding_derived_with_derived_gets_user_pipeline(self):
"""Tests that one can correctly add a derived pipeline from a derived
pipeline and still get the correct user pipeline.
"""
ut = UserPipelineTracker()
user = beam.Pipeline()
derived1 = beam.Pipeline()
derived2 = beam.Pipeline()
# Add the first derived pipeline to the user pipelne.
ut.add_derived_pipeline(user, derived1)
# Add the second derived pipeline to the first derived pipeline. This should
# get the user pipeline of the first and add the second to it.
ut.add_derived_pipeline(derived1, derived2)
# Asserts that both derived pipelines are under the same user pipeline.
self.assertIs(ut.get_user_pipeline(derived1), user)
self.assertIs(ut.get_user_pipeline(derived2), user)
def test_can_get_pipeline_from_id(self):
"""Tests the pid -> pipeline memoization."""
ut = UserPipelineTracker()
user = beam.Pipeline()
derived = beam.Pipeline()
ut.add_user_pipeline(user)
ut.add_derived_pipeline(user, derived)
self.assertIs(ut.get_pipeline(str(id(user))), user)
self.assertIs(ut.get_pipeline(str(id(derived))), derived)
def test_clear(self):
ut = UserPipelineTracker()
user = beam.Pipeline()
derived = beam.Pipeline()
ut.add_derived_pipeline(user, derived)
self.assertIs(ut.get_user_pipeline(derived), user)
ut.clear()
self.assertIsNone(ut.get_user_pipeline(user))
self.assertIsNone(ut.get_user_pipeline(derived))
def test_can_iterate(self):
ut = UserPipelineTracker()
user1 = beam.Pipeline()
derived11 = beam.Pipeline()
derived12 = beam.Pipeline()
ut.add_derived_pipeline(user1, derived11)
ut.add_derived_pipeline(user1, derived12)
user2 = beam.Pipeline()
derived21 = beam.Pipeline()
derived22 = beam.Pipeline()
ut.add_derived_pipeline(user2, derived21)
ut.add_derived_pipeline(user2, derived22)
user_pipelines = set(p for p in ut)
self.assertSetEqual(set([user1, user2]), user_pipelines)
def test_can_evict_user_pipeline(self):
ut = UserPipelineTracker()
user1 = beam.Pipeline()
derived11 = beam.Pipeline()
derived12 = beam.Pipeline()
ut.add_derived_pipeline(user1, derived11)
ut.add_derived_pipeline(user1, derived12)
user2 = beam.Pipeline()
derived21 = beam.Pipeline()
derived22 = beam.Pipeline()
ut.add_derived_pipeline(user2, derived21)
ut.add_derived_pipeline(user2, derived22)
ut.evict(user1)
self.assertIsNone(ut.get_user_pipeline(user1))
self.assertIsNone(ut.get_user_pipeline(derived11))
self.assertIsNone(ut.get_user_pipeline(derived12))
self.assertIs(user2, ut.get_user_pipeline(derived21))
self.assertIs(user2, ut.get_user_pipeline(derived22))
if __name__ == '__main__':
unittest.main()
|
# VARIAVEIS
# ATRIBUINDO VALOR A UMA VARIAVEL
var_teste = 1
print(var_teste)
print(type(var_teste))
#DECLARAÇÂO MULTIPLA
pessoa1, pessoa2, pessoa3 = 'Jose', 'Joao','Maria'
print(pessoa1)
print(pessoa2)
print(pessoa3)
# VARIAVEL COM ATRIBUIÇÃO
pessoa1=pessoa2=pessoa3 = 'Jose'
print(pessoa1)
print(pessoa2)
print(pessoa3)
# OPERAÇÂO COM VARIAVEL
idade = 32
idade1 = 28
print('SOMA',idade + idade1)
print('SUBTRAÇÃO',idade - idade1)
print('MULTIPLICAÇÂO',idade * idade1)
print('DIVISAO',idade / idade1)
print('POTENCIA',idade ** idade1)
print('DIVISAO INTEIRO',idade // idade1)
print('RESTO DIVISÂO',idade % idade1)
|
#-*- coding:utf-8 -*-
import json
import copy
import requests
import json
from flask import render_template, abort, request, url_for, redirect, g
import time
import datetime
from rrd import app
from rrd.model.screen import DashboardScreen
from rrd.model.graph import DashboardGraph
from rrd import consts
from rrd.utils.graph_urls import generate_graph_urls
from rrd import config
@app.route("/screen", methods=["GET", "POST"])
def dash_screens():
top_screens = DashboardScreen.gets(pid='0')
top_screens = sorted(top_screens, key=lambda x:x.name)
return render_template("screen/index.html", **locals())
@app.route("/screen/<int:sid>/delete")
def dash_screen_delete(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
DashboardScreen.remove(sid)
return redirect("/screen")
@app.route("/screen/<int:sid>/edit", methods=["GET", "POST"])
def dash_screen_edit(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
if request.method == "POST":
screen_name = request.form.get("screen_name")
screen.update(name=screen_name)
return redirect("/screen/%s" %screen.id)
else:
return render_template("screen/edit.html", **locals())
@app.route("/screen/<int:sid>/clone", methods=["GET", "POST"])
def dash_screen_clone(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
if request.method == "POST":
screen_name = request.form.get("screen_name")
with_graph = request.form.get("with_graph")
new_s = DashboardScreen.add(screen.pid, screen_name)
if not new_s:
abort(404, "创建screen失败了")
if with_graph:
old_graphs = DashboardGraph.gets_by_screen_id(sid)
for o in old_graphs:
DashboardGraph.add(o.title, o.hosts, o.counters, new_s.id,
o.timespan, o.graph_type, o.method, o.position)
return redirect("/screen/%s" %new_s.id)
else:
return render_template("screen/clone.html", **locals())
@app.route("/graph/<int:gid>/delete")
def dash_graph_delete(gid):
graph = DashboardGraph.get(gid)
if not graph:
abort(404, "no such graph")
DashboardGraph.remove(gid)
return redirect("/screen/" + graph.screen_id)
@app.route("/screen/<int:sid>")
def dash_screen(sid):
start = request.args.get("start")
end = request.args.get("end")
top_screens = DashboardScreen.gets(pid=0)
top_screens = sorted(top_screens, key=lambda x:x.name)
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
if str(screen.pid) == '0':
sub_screens = DashboardScreen.gets(pid=sid)
sub_screens = sorted(sub_screens, key=lambda x:x.name)
return render_template("screen/top_screen.html", **locals())
pscreen = DashboardScreen.get(screen.pid)
sub_screens = DashboardScreen.gets(pid=screen.pid)
sub_screens = sorted(sub_screens, key=lambda x:x.name)
graphs = DashboardGraph.gets_by_screen_id(screen.id)
all_graphs = []
for graph in graphs:
all_graphs.extend(generate_graph_urls(graph, start, end) or [])
all_graphs = sorted(all_graphs, key=lambda x:x.position)
return render_template("screen/screen.html", **locals())
@app.route("/screen/embed/<int:sid>")
def dash_screen_embed(sid):
start = request.args.get("start")
end = request.args.get("end")
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
if screen.pid == '0':
abort(404, "top screen")
graphs = DashboardGraph.gets_by_screen_id(screen.id)
all_graphs = []
for graph in graphs:
all_graphs.extend(generate_graph_urls(graph, start, end) or [])
all_graphs = sorted(all_graphs, key=lambda x:x.position)
return render_template("screen/screen_embed.html", **locals())
@app.route("/screen/add", methods=["GET", "POST"])
def dash_screen_add():
if request.method == "POST":
name = request.form.get("screen_name")
pid = request.form.get("pid", '0')
screen = DashboardScreen.add(pid, name)
return redirect("/screen/%s" % screen.id)
else:
pid = request.args.get("pid", '0')
screen = DashboardScreen.get(pid)
return render_template("screen/add.html", **locals())
@app.route("/screen/<int:sid>/graph", methods=["GET", "POST"])
def dash_graph_add(sid):
all_screens = DashboardScreen.gets()
top_screens = [x for x in all_screens if x.pid == '0']
children = []
for t in top_screens:
children.append([x for x in all_screens if x.pid == t.id])
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
pscreen = DashboardScreen.get(screen.pid)
if request.method == "POST":
title = request.form.get("title")
hosts = request.form.get("hosts", "").strip()
hosts = hosts and hosts.split("\n") or []
hosts = [x.strip() for x in hosts]
counters = request.form.get("counters", "").strip()
counters = counters and counters.split("\n") or []
counters = [x.strip() for x in counters]
timespan = request.form.get("timespan", 3600)
graph_type = request.form.get("graph_type", 'h')
method = request.form.get("method", '').upper()
position = request.form.get("position", 0)
graph = DashboardGraph.add(title, hosts, counters, sid,
timespan, graph_type, method, position)
return redirect("/screen/%s" % sid)
else:
gid = request.args.get("gid")
graph = gid and DashboardGraph.get(gid)
return render_template("screen/graph_add.html", config=config, **locals())
@app.route("/graph/<int:gid>/edit", methods=["GET", "POST"])
def dash_graph_edit(gid):
error = ""
graph = DashboardGraph.get(gid)
if not graph:
abort(404, "no graph")
all_screens = DashboardScreen.gets()
top_screens = [x for x in all_screens if x.pid == '0']
children = []
for t in top_screens:
children.append([x for x in all_screens if x.pid == t.id])
screen = DashboardScreen.get(graph.screen_id)
if not screen:
abort(404, "no screen")
pscreen = DashboardScreen.get(screen.pid)
if request.method == "POST":
ajax = request.form.get("ajax", "")
screen_id = request.form.get("screen_id")
title = request.form.get("title", "").strip()
hosts = request.form.get("hosts", "").strip()
hosts = hosts and hosts.split("\n") or []
hosts = [x.strip() for x in hosts]
counters = request.form.get("counters", "").strip()
counters = counters and counters.split("\n") or []
counters = [x.strip() for x in counters]
timespan = request.form.get("timespan", 3600)
graph_type = request.form.get("graph_type", 'h')
method = request.form.get("method", '').upper()
position = request.form.get("position", 0)
graph = graph.update(title, hosts, counters, screen_id,
timespan, graph_type, method, position)
error = u"修改成功了"
if not ajax:
return render_template("screen/graph_edit.html", config=config, **locals())
else:
return "ok"
else:
ajax = request.args.get("ajax", "")
return render_template("screen/graph_edit.html", **locals())
@app.route("/graph/multi_edit", methods=["GET", "POST"])
def dash_graph_multi_edit():
ret = {
"ok": False,
"msg": "",
"data": [],
}
if request.method == "POST":
d = request.data
try:
jdata = json.loads(d)
except ValueError:
jdata = None
if not jdata:
return json.dumps({
"ok": False,
"msg": "no_data_post",
})
rows = []
for x in jdata:
rows.append({"id": x["id"], "hosts": x["endpoints"], "counters": x["counters"]})
DashboardGraph.update_multi(rows)
return json.dumps({
"ok": True,
"msg": "",
})
elif request.method == "GET":
sid = request.args.get("sid")
if not sid or not DashboardScreen.get(sid):
ret["msg"] = "no_screen"
return json.dumps(ret)
ret["ok"] = True
graphs = DashboardGraph.gets_by_screen_id(sid)
ret['data'] = [{"id": x.id, "title": x.title, "endpoints":x.hosts, "counters":x.counters} for x in graphs]
return json.dumps(ret)
|
import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import (ASAPooling,
GraphConv, global_mean_pool,
JumpingKnowledge)
class ASAP(torch.nn.Module):
def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, num_layers, hidden, ratio=0.8, dropout=0, num_class=0):
super(ASAP, self).__init__()
self.num_class = num_class
self.max_seq_len = max_seq_len
self.node_encoder = node_encoder
self.conv1 = GraphConv(emb_dim, hidden, aggr='mean')
self.convs = torch.nn.ModuleList()
self.pools = torch.nn.ModuleList()
self.convs.extend([
GraphConv(hidden, hidden, aggr='mean')
for i in range(num_layers - 1)
])
self.pools.extend([
ASAPooling(hidden, ratio, dropout=dropout)
for i in range((num_layers) // 2)
])
self.jump = JumpingKnowledge(mode='cat')
self.lin1 = Linear(num_layers * hidden, hidden)
# self.lin2 = Linear(hidden, dataset.num_classes)
if self.num_class > 0: # classification
self.graph_pred_linear = torch.nn.Linear(hidden, self.num_class)
else:
self.graph_pred_linear_list = torch.nn.ModuleList()
for i in range(max_seq_len):
self.graph_pred_linear_list.append(torch.nn.Linear(hidden, num_vocab))
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
for pool in self.pools:
pool.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, node_depth, batch = data.x, data.edge_index, data.node_depth, data.batch
x = self.node_encoder(x, node_depth.view(-1, ))
edge_weight = None
x = F.relu(self.conv1(x, edge_index))
xs = [global_mean_pool(x, batch)]
for i, conv in enumerate(self.convs):
x = conv(x=x, edge_index=edge_index, edge_weight=edge_weight)
x = F.relu(x)
xs += [global_mean_pool(x, batch)]
if i % 2 == 0 and i < len(self.convs) - 1:
pool = self.pools[i // 2]
x, edge_index, edge_weight, batch, _ = pool(
x=x, edge_index=edge_index, edge_weight=edge_weight,
batch=batch)
x = self.jump(xs)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
# x = self.lin2(x)
# return F.log_softmax(x, dim=-1)
if self.num_class > 0:
return self.graph_pred_linear(x)
pred_list = []
for i in range(self.max_seq_len):
pred_list.append(self.graph_pred_linear_list[i](x))
return pred_list
def __repr__(self):
return self.__class__.__name__
|
#!/usr/bin/env python
import rospy
from duckietown_msgs.msg import WheelsCmdStamped, FSMState
class WheelsCmdSwitchNode(object):
def __init__(self):
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initializing " %(self.node_name))
# Read parameters
self.mappings = rospy.get_param("~mappings")
source_topic_dict = rospy.get_param("~source_topics")
self.current_src_name = None
# Construct publisher
self.pub_cmd = rospy.Publisher("~wheels_cmd",WheelsCmdStamped,queue_size=1)
# Construct subscribers
self.sub_fsm_state = rospy.Subscriber(rospy.get_param("~mode_topic"),FSMState,self.cbFSMState)
self.sub_dict = dict()
for src_name, topic_name in source_topic_dict.items():
self.sub_dict[src_name] = rospy.Subscriber(topic_name,WheelsCmdStamped,self.cbWheelsCmd,callback_args=src_name)
def cbFSMState(self,fsm_state_msg):
self.current_src_name = self.mappings.get(fsm_state_msg.state)
if self.current_src_name is None:
rospy.logwarn("[%s] FSMState %s not handled. No msg pass through the switch." %(self.node_name,fsm_state_msg.state))
def cbWheelsCmd(self,msg,src_name):
if src_name == self.current_src_name:
self.pub_cmd.publish(msg)
def on_shutdown(self):
rospy.loginfo("[%s] Shutting down." %(self.node_name))
if __name__ == '__main__':
# Initialize the node with rospy
rospy.init_node('wheels_cmd_switch_node', anonymous=False)
# Create the DaguCar object
node = WheelsCmdSwitchNode()
# Setup proper shutdown behavior
rospy.on_shutdown(node.on_shutdown)
# Keep it spinning to keep the node alive
rospy.spin()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.SettleEntity import SettleEntity
class AlipayTradeSettleReceivablesQueryModel(object):
def __init__(self):
self._biz_product = None
self._extend_params = None
self._merchant_info = None
self._out_request_no = None
@property
def biz_product(self):
return self._biz_product
@biz_product.setter
def biz_product(self, value):
self._biz_product = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
self._extend_params = value
@property
def merchant_info(self):
return self._merchant_info
@merchant_info.setter
def merchant_info(self, value):
if isinstance(value, SettleEntity):
self._merchant_info = value
else:
self._merchant_info = SettleEntity.from_alipay_dict(value)
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
def to_alipay_dict(self):
params = dict()
if self.biz_product:
if hasattr(self.biz_product, 'to_alipay_dict'):
params['biz_product'] = self.biz_product.to_alipay_dict()
else:
params['biz_product'] = self.biz_product
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.merchant_info:
if hasattr(self.merchant_info, 'to_alipay_dict'):
params['merchant_info'] = self.merchant_info.to_alipay_dict()
else:
params['merchant_info'] = self.merchant_info
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeSettleReceivablesQueryModel()
if 'biz_product' in d:
o.biz_product = d['biz_product']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'merchant_info' in d:
o.merchant_info = d['merchant_info']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
return o
|
import os
from deta import Deta
from datetime import date, datetime
from fastapi import HTTPException
import urllib
import base64
deta = Deta()
base = deta.Base("drawings")
drive = deta.Drive("drawings")
def get_all(db, query):
blob_gen = db.fetch(query)
blobs = []
for stored_blob in blob_gen:
for blob in stored_blob:
blobs.append(blob)
return blobs
# list all drawings
def get_drawings():
try:
return get_all(base, {})
except:
return None
# save existing drawing
def save(name, file):
encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8')
b = base.get(encoded_name)
try:
if (b):
base.put({"key": encoded_name, "name": name, "public": b["public"], "lastModified": datetime.utcnow().timestamp()})
return drive.put(name, file)
base.put({"key":encoded_name, "name": name, "public": False, "lastModified": datetime.utcnow().timestamp()})
return drive.put(name, file)
except:
return None
# save
def save_as(name, file, overwrite):
encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8')
b = base.get(encoded_name)
record = {"key": encoded_name, "name":name, "public": False, 'lastModified': datetime.utcnow().timestamp()}
if (overwrite or not b): # Overwrite allowed or Record Does not Exist
base.put(record)
drive.put(name, file)
return record
else: # Overwrite False and Record Exists
return None
def get_drawing(name):
encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8')
b = base.get(encoded_name)
d = drive.get(name)
if (b and d):
return d.read()
base.delete(encoded_name)
drive.delete(name)
return None
def get_metadata(name):
encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8')
b = base.get(encoded_name)
if (b):
return b
return None
def delete_drawing(name):
encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8')
try:
base.delete(encoded_name)
drive.delete(name)
return name
except:
return None
def modify_public(name, public):
encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8')
try:
b = base.get(encoded_name)
if (b):
b["public"] = public
return base.put(b)
return None
except:
return None
def get_public_drawing(name):
encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8')
try:
b = base.get(encoded_name)
if (b and b["public"]):
return drive.get(name)
return None
except:
return None
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Simulator instruction to save statevector amplitudes and amplitudes squared.
"""
from qiskit.circuit import QuantumCircuit
from qiskit.extensions.exceptions import ExtensionError
from .save_data import SaveSingleData, SaveAverageData, default_qubits
class SaveAmplitudes(SaveSingleData):
"""Save complex statevector amplitudes."""
def __init__(self,
key,
num_qubits,
params,
pershot=False,
conditional=False):
"""Instruction to save complex statevector amplitudes.
Args:
key (str): the key for retrieving saved data from results.
num_qubits (int): the number of qubits for the snapshot type.
params (list): list of entries to vale.
pershot (bool): if True save a list of amplitudes vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the amplitudes vector conditional
on the current classical register values
[Default: False].
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
params = _format_amplitude_params(params, num_qubits)
super().__init__("save_amplitudes",
key,
num_qubits,
pershot=pershot,
conditional=conditional,
params=params)
class SaveAmplitudesSquared(SaveAverageData):
"""Save squared statevector amplitudes (probabilities)."""
def __init__(self,
key,
num_qubits,
params,
unnormalized=False,
pershot=False,
conditional=False):
"""Instruction to save squared statevector amplitudes (probabilities).
Args:
key (str): the key for retrieving saved data from results.
num_qubits (int): the number of qubits for the snapshot type.
params (list): list of entries to vale.
unnormalized (bool): If True return save the unnormalized accumulated
probabilities over all shots [Default: False].
pershot (bool): if True save a list of probability vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the probability vector conditional
on the current classical register values
[Default: False].
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
params = _format_amplitude_params(params, num_qubits)
super().__init__("save_amplitudes_sq",
key,
num_qubits,
unnormalized=unnormalized,
pershot=pershot,
conditional=conditional,
params=params)
def save_amplitudes(self, key, params, pershot=False, conditional=False):
"""Save complex statevector amplitudes.
Args:
key (str): the key for retrieving saved data from results.
params (List[int] or List[str]): the basis states to return amplitudes for.
pershot (bool): if True save a list of amplitudes vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the amplitudes vector conditional
on the current classical register values
[Default: False].
Returns:
QuantumCircuit: with attached instruction.
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
qubits = default_qubits(self)
instr = SaveAmplitudes(key, len(qubits), params,
pershot=pershot, conditional=conditional)
return self.append(instr, qubits)
def save_amplitudes_squared(self, key, params,
unnormalized=False,
pershot=False,
conditional=False):
"""Save squared statevector amplitudes (probabilities).
Args:
key (str): the key for retrieving saved data from results.
params (List[int] or List[str]): the basis states to return amplitudes for.
unnormalized (bool): If True return save the unnormalized accumulated
probabilities over all shots [Default: False].
pershot (bool): if True save a list of probability vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the probability vector conditional
on the current classical register values
[Default: False].
Returns:
QuantumCircuit: with attached instruction.
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
qubits = default_qubits(self)
instr = SaveAmplitudesSquared(key, len(qubits), params,
unnormalized=unnormalized,
pershot=pershot,
conditional=conditional)
return self.append(instr, qubits)
def _format_amplitude_params(params, num_qubits=None):
"""Format amplitude params as a interger list."""
if isinstance(params[0], str):
if params[0].find('0x') == 0:
params = [int(i, 16) for i in params]
else:
params = [int(i, 2) for i in params]
if num_qubits and max(params) >= 2 ** num_qubits:
raise ExtensionError(
"Param values contain a state larger than the number of qubits")
return params
QuantumCircuit.save_amplitudes = save_amplitudes
QuantumCircuit.save_amplitudes_squared = save_amplitudes_squared
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Africoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Africoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
#!/usr/bin/env python
import sys
import re
import optparse
from ctypes import *
"""
This script will use the prototypes from "checkdocs.py -s" to concoct
a 1:1 Python wrapper for Allegro.
"""
class _AL_UTF8String:
pass
class Allegro:
def __init__(self):
self.types = {}
self.functions = {}
self.constants = {}
def add_struct(self, name):
x = type(name, (Structure, ), {})
self.types[name] = x
def add_union(self, name):
x = type(name, (Union, ), {})
self.types[name] = x
def get_type(self, ptype):
conversion = {
"bool": c_bool,
"_Bool": c_bool,
"char": c_byte,
"unsignedchar": c_ubyte,
"int": c_int,
"unsigned": c_uint,
"unsignedint": c_uint,
"int16_t": c_int16,
"uint16_t": c_uint16,
"int32_t": c_int32,
"uint32_t": c_uint32,
"int64_t": c_int64,
"uint64_t": c_uint64,
"uintptr_t": c_void_p,
"intptr_t": c_void_p,
"GLuint": c_uint,
"unsignedlong": c_ulong,
"long": c_long,
"size_t": c_size_t,
"off_t": c_int64,
"time_t": c_int64,
"va_list": c_void_p,
"float": c_float,
"double": c_double,
"al_fixed": c_int,
"HWND": c_void_p,
"char*": _AL_UTF8String,
# hack: this probably shouldn't be in the public docs
"postprocess_callback_t": c_void_p,
}
ptype = re.sub(r"\bstruct|union\b", "", ptype)
ptype = re.sub(r"\bconst\b", "", ptype)
ptype = re.sub(r"\bextern\b", "", ptype)
ptype = re.sub(r"\b__inline__\b", "", ptype)
ptype = re.sub(r"\s+", "", ptype)
if ptype.endswith("*"):
if ptype in conversion:
return conversion[ptype]
t = ptype[:-1]
if t in self.types:
return POINTER(self.types[t])
return c_void_p
elif ptype in self.types:
return self.types[ptype]
else:
try:
return conversion[ptype]
except KeyError:
print("Type Error:" + str(ptype))
return None
def parse_funcs(self, funcs):
"""
Go through all documented functions and add their prototypes
as Python functions.
The file should have been generated by Allegro's documentation
generation scripts.
"""
for func in funcs:
name, proto = func.split(":", 1)
if not name.startswith("al_"):
continue
proto = proto.strip()
name = name[:-2]
if proto.startswith("enum"):
continue
if proto.startswith("typedef"):
continue
if "=" in proto:
continue
if proto.startswith("#"):
continue
funcstart = proto.find(name)
funcend = funcstart + len(name)
ret = proto[:funcstart].rstrip()
params = proto[funcend:].strip(" ;")
if params[0] != "(" or params[-1] != ")":
print("Error:")
print(params)
continue
params2 = params[1:-1]
# remove callback argument lists
balance = 0
params = ""
for c in params2:
if c == ")":
balance -= 1
if balance == 0:
params += c
if c == "(":
balance += 1
params = params.split(",")
plist = []
for param in params:
param = re.sub(r"\bconst\b", "", param)
param = param.strip()
if param == "void":
continue
if param == "":
continue
if param == "...":
continue
# treat arrays as a void pointer, for now
if param.endswith("]") or param.endswith("*"):
plist.append(c_void_p)
continue
# treat callbacks as a void pointer, for now
if param.endswith(")"):
plist.append(c_void_p)
continue
mob = re.match("^.*?(\w+)$", param)
if mob:
pnamepos = mob.start(1)
if pnamepos == 0:
# Seems the parameter is not named
pnamepos = len(param)
else:
print(params)
print(proto)
print("")
continue
ptype = param[:pnamepos]
ptype = self.get_type(ptype)
plist.append(ptype)
f = type("", (object, ), {"restype": c_int})
if not ret.endswith("void"):
f.restype = self.get_type(ret)
try:
f.argtypes = plist
except TypeError as e:
print(e)
print(name)
print(plist)
self.functions[name] = f
def parse_protos(self, filename):
protos = []
unions = []
funcs = []
# first pass: create all structs, but without fields
for line in open(filename):
name, proto = line.split(":", 1)
proto = proto.lstrip()
if name.endswith("()"):
funcs.append(line)
continue
# anonymous structs have no name at all
if name and not name.startswith("ALLEGRO_"):
continue
if name == "ALLEGRO_OGL_EXT_API":
continue
if proto.startswith("union") or\
proto.startswith("typedef union"):
self.add_union(name)
unions.append((name, proto))
elif proto.startswith("struct") or\
proto.startswith("typedef struct"):
self.add_struct(name)
protos.append((name, proto))
elif proto.startswith("enum") or\
proto.startswith("typedef enum"):
if name:
self.types[name] = c_int
protos.append(("", proto))
elif proto.startswith("#define"):
if not name.startswith("_") and not name.startswith("GL_"):
i = eval(proto.split(None, 2)[2])
self.constants[name] = i
else:
# actual typedef
mob = re.match("typedef (.*) " + name, proto)
if mob:
t = mob.group(1)
self.types[name] = self.get_type(t.strip())
else:
# Probably a function pointer
self.types[name] = c_void_p
protos += unions
# second pass: fill in fields
for name, proto in protos:
bo = proto.find("{")
if bo == -1:
continue
bc = proto.rfind("}")
braces = proto[bo + 1:bc]
if proto.startswith("enum") or \
proto.startswith("typedef enum"):
fields = braces.split(",")
i = 0
for field in fields:
if "=" in field:
fname, val = field.split("=", 1)
fname = fname.strip()
try:
i = int(eval(val, globals(), self.constants))
except NameError:
i = val
else:
fname = field.strip()
if not fname:
continue
self.constants[fname] = i
try:
i += 1
except TypeError:
pass
continue
balance = 0
fields = [""]
for c in braces:
if c == "{":
balance += 1
if c == "}":
balance -= 1
if c == ";" and balance == 0:
fields.append("")
else:
fields[-1] += c
flist = []
for field in fields:
if not field:
continue
# add function pointer as void pointer
mob = re.match(".*?\(\*(\w+)\)", field)
if mob:
flist.append((mob.group(1), "c_void_p"))
continue
# add any pointer as void pointer
mob = re.match(".*?\*(\w+)$", field)
if mob:
flist.append((mob.group(1), "c_void_p"))
continue
# add an array
mob = re.match("(.*)( \w+)\[(.*?)\]$", field)
if mob:
# this is all a hack
n = 0
ftype = mob.group(1)
if ftype.startswith("struct"):
if ftype == "struct {float axis[3];}":
t = "c_float * 3"
else:
print("Error: Can't parse " + ftype + " yet.")
t = None
else:
n = mob.group(3)
# something in A5 uses a 2d array
if "][" in n:
n = n.replace("][", " * ")
# something uses a division expression
if "/" in n:
n = "(" + n.replace("/", "//") + ")"
t = self.get_type(ftype).__name__ + " * " + n
fname = mob.group(2)
flist.append((fname, t))
continue
vars = field.split(",")
mob = re.match("\s*(.*?)\s+(\w+)\s*$", vars[0])
t = self.get_type(mob.group(1))
vname = mob.group(2)
if t is not None and vname is not None:
flist.append((vname, t.__name__))
for v in vars[1:]:
flist.append((v.strip(), t.__name__))
else:
print("Error: " + str(vars))
try:
self.types[name].my_fields = flist
except AttributeError:
print(name, flist)
self.parse_funcs(funcs)
def main():
p = optparse.OptionParser()
p.add_option("-o", "--output", help="location of generated file")
p.add_option("-p", "--protos", help="A file with all " +
"prototypes to generate Python wrappers for, one per line. "
"Generate it with docs/scripts/checkdocs.py -p")
p.add_option("-t", "--type", help="the library type to " +
"use, e.g. debug")
p.add_option("-v", "--version", help="the library version to " +
"use, e.g. 5.1")
options, args = p.parse_args()
if not options.protos:
p.print_help()
return
al = Allegro()
al.parse_protos(options.protos)
f = open(options.output, "w") if options.output else sys.stdout
release = options.type
version = options.version
f.write(r"""# Generated by generate_python_ctypes.py.
import os, platform, sys
from ctypes import *
from ctypes.util import *
# You must adjust this function to point ctypes to the A5 DLLs you are
# distributing.
_dlls = []
def _add_dll(name):
release = "%(release)s"
if os.name == "nt":
release = "%(release)s-%(version)s"
# Under Windows, DLLs are found in the current directory, so this
# would be an easy way to keep all your DLLs in a sub-folder.
# os.chdir("dlls")
path = find_library(name + release)
if not path:
if os.name == "mac":
path = name + release + ".dylib"
elif os.name == "nt":
path = name + release + ".dll"
elif os.name == "posix":
if platform.mac_ver()[0]:
path = name + release + ".dylib"
else:
path = "lib" + name + release + ".so"
else:
sys.stderr.write("Cannot find library " + name + "\n")
# In most cases, you actually don't want the above and instead
# use the exact filename within your game distribution, possibly
# even within a .zip file.
# if not os.path.exists(path):
# path = "dlls/" + path
try:
# RTLD_GLOBAL is required under OSX for some reason (?)
_dlls.append(CDLL(path, RTLD_GLOBAL))
except OSError:
# No need to fail here, might just be one of the addons.
pass
# os.chdir("..")
_add_dll("allegro")
_add_dll("allegro_acodec")
_add_dll("allegro_audio")
_add_dll("allegro_primitives")
_add_dll("allegro_color")
_add_dll("allegro_font")
_add_dll("allegro_ttf")
_add_dll("allegro_image")
_add_dll("allegro_dialog")
_add_dll("allegro_memfile")
_add_dll("allegro_physfs")
_add_dll("allegro_shader")
_add_dll("allegro_main")
_add_dll("allegro_monolith")
# We don't have information ready which A5 function is in which DLL,
# so we just try them all.
def _dll(func, ret, params):
for dll in _dlls:
try:
f = dll[func]
f.restype = ret
f.argtypes = params
return f
except AttributeError: pass
sys.stderr.write("Cannot find function " + func + "\n")
return lambda *args: None
# In Python3, all Python strings are unicode so we have to convert to
# UTF8 byte strings before passing to Allegro.
if sys.version_info[0] > 2:
class _AL_UTF8String:
def from_param(x):
return x.encode("utf8")
else:
_AL_UTF8String = c_char_p
""" % locals())
postpone = []
for name, val in sorted(al.constants.items()):
try:
if isinstance(val, str):
val = int(eval(val, globals(), al.constants))
f.write(name + " = " + str(val) + "\n")
except:
postpone.append((name, val))
for name, val in postpone:
f.write(name + " = " + val + "\n")
structs = set()
# output everything except structs and unions
for name, x in sorted(al.types.items()):
if not name:
continue
base = x.__bases__[0]
if base != Structure and base != Union:
f.write(name + " = " + x.__name__ + "\n")
else:
structs.add(name)
# order structs and unions by their dependencies
structs_list = []
remaining = set(structs)
while remaining:
for name in sorted(remaining):
ok = True
x = al.types[name]
if hasattr(x, "my_fields"):
for fname, ftype in x.my_fields:
if " " in ftype:
ftype = ftype.split()[0]
if ftype in structs and ftype in remaining:
ok = False
break
if ok:
structs_list.append(name)
remaining.remove(name)
for name in structs_list:
x = al.types[name]
base = x.__bases__[0]
f.write("class " + name + "(" + base.__name__ + "):\n")
if hasattr(x, "my_fields"):
f.write(" _fields_ = [\n")
for fname, ftype in x.my_fields:
f.write(" (\"" + fname + "\", " + ftype + "),\n")
f.write(" ]\n")
else:
f.write(" pass\n")
pt = POINTER(x)
f.write("%s = POINTER(%s)\n" % (pt.__name__, name))
for name, x in sorted(al.functions.items()):
try:
line = name + " = _dll(\"" + name + "\", "
line += x.restype.__name__ + ", "
line += "[" + (", ".join([a.__name__ for a in x.argtypes])) +\
"])\n"
f.write(line)
except AttributeError as e:
print("Ignoring " + name + " because of errors (" + str(e) + ").")
# some stuff the automated parser doesn't pick up
f.write(r"""
ALLEGRO_VERSION_INT = \
((ALLEGRO_VERSION << 24) | (ALLEGRO_SUB_VERSION << 16) | \
(ALLEGRO_WIP_VERSION << 8) | ALLEGRO_RELEASE_NUMBER)
""")
f.write(r"""
# work around bug http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36834
if os.name == "nt":
def al_map_rgba_f(r, g, b, a): return ALLEGRO_COLOR(r, g, b, a)
def al_map_rgb_f(r, g, b): return ALLEGRO_COLOR(r, g, b, 1)
def al_map_rgba(r, g, b, a):
return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, a / 255.0)
def al_map_rgb(r, g, b):
return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, 1)
""")
f.write("""
def al_main(real_main, *args):
def python_callback(argc, argv):
real_main(*args)
return 0
cb = CFUNCTYPE(c_int, c_int, c_void_p)(python_callback)
al_run_main(0, 0, cb);
""")
f.close()
main()
|
"""File for Google Cloud Storage."""
import logging
import os
import urllib.parse
from pathlib import Path
import aiohttp
from aiofile import AIOFile
from gcloud.aio.storage import Storage
from google.cloud import storage
from one_barangay.local_settings import logger
async def async_upload_to_bucket(
filepath: str,
file_obj,
gcs_path: str,
):
"""Upload files to bucket.
Args:
filepath: str: The path to the file to be uploaded.
file_obj: The file object from reading a file
gcs_path: str: The target bucket name and sub-folder in
GCS to upload to. (e.g. documents/photo)
Returns:
The path to the uploaded file.
"""
async with aiohttp.ClientSession() as session:
gcs_storage = Storage(session=session) # skipcq
gcs_filename = filepath.split("/")[-1]
await gcs_storage.upload(gcs_path, gcs_filename, file_obj)
return f"https://storage.googleapis.com/{gcs_path}/{urllib.parse.quote(gcs_filename)}"
async def upload_to_gcs_runner(
filepath: str,
gcs_path: str,
):
"""Call the 'async_upload_to_bucket'.
Args:
filepath: str: The path to the file to be uploaded.
gcs_path: str: The target bucket name and sub-folder in GCS.
Returns:
The path to the uploaded file.
"""
# target_bucket_name = target_bucket_name
# bucket_folder = bucket_folder
try:
async with AIOFile(filepath, mode="rb") as afp:
f = await afp.read()
path = await async_upload_to_bucket(filepath, f, gcs_path)
return path
except FileNotFoundError as e:
logger.exception("File not found. Make sure the file exists. %s", e)
except OSError as e:
logger.exception("File not uploaded. %s", e)
def download_from_gcs(
filename: str,
target_bucket_name: str,
bucket_folder: str,
):
"""Download file from Google Cloud Storage bucket.
Args:
filename: str: The name of file being downloaded.
target_bucket_name: str: The bucket name from which to download to.
bucket_folder: str: The folder from the bucket name from which to download to.
Returns:
None.
"""
try:
storage_client = storage.Client(os.getenv("GOOGLE_PROJECT_ID"))
bucket_name = storage_client.get_bucket(target_bucket_name)
bucket = storage_client.get_bucket(bucket_name)
path = os.path.join(bucket_folder, filename)
base_dir = Path(__file__).resolve().parent.parent # TODO: Change to user location
destination = os.path.join(base_dir, filename)
blob = bucket.blob(path)
blob.download_to_filename(destination)
logging.info("%s downloaded to %s.", filename, destination)
except FileNotFoundError as e:
logger.exception("File not found. Make sure the file exists. %s", e)
except OSError as e:
logger.exception("%s not downloaded. %s", filename, e)
# if __name__ == "__main__":
# Sample Calls to Uploading to GCS
# asyncio.run(
# upload_to_gcs_runner(
# "<your_absolute_filepath>"
# )
# )
# Sample Calls to Downloading from GCS
# download_from_gcs(
# "kath.png",
# str(os.getenv("GS_MEDIA_BUCKET_NAME")),
# str(os.getenv("FILE_BUCKET_FOLDER")),
# )
|
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 #
#####################################################
# python exps/LFNA/basic-same.py --srange 1-999 --env_version v1 --hidden_dim 16
# python exps/LFNA/basic-same.py --srange 1-999 --env_version v2 --hidden_dim
#####################################################
import sys, time, copy, torch, random, argparse
from tqdm import tqdm
from copy import deepcopy
from pathlib import Path
lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve()
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint
from log_utils import time_string
from log_utils import AverageMeter, convert_secs2time
from utils import split_str2indexes
from procedures.advanced_main import basic_train_fn, basic_eval_fn
from procedures.metric_utils import SaveMetric, MSEMetric, ComposeMetric
from datasets.synthetic_core import get_synthetic_env
from models.xcore import get_model
from lfna_utils import lfna_setup
def subsample(historical_x, historical_y, maxn=10000):
total = historical_x.size(0)
if total <= maxn:
return historical_x, historical_y
else:
indexes = torch.randint(low=0, high=total, size=[maxn])
return historical_x[indexes], historical_y[indexes]
def main(args):
logger, env_info, model_kwargs = lfna_setup(args)
# check indexes to be evaluated
to_evaluate_indexes = split_str2indexes(args.srange, env_info["total"], None)
logger.log(
"Evaluate {:}, which has {:} timestamps in total.".format(
args.srange, len(to_evaluate_indexes)
)
)
w_container_per_epoch = dict()
per_timestamp_time, start_time = AverageMeter(), time.time()
for i, idx in enumerate(to_evaluate_indexes):
need_time = "Time Left: {:}".format(
convert_secs2time(
per_timestamp_time.avg * (len(to_evaluate_indexes) - i), True
)
)
logger.log(
"[{:}]".format(time_string())
+ " [{:04d}/{:04d}][{:04d}]".format(i, len(to_evaluate_indexes), idx)
+ " "
+ need_time
)
# train the same data
historical_x = env_info["{:}-x".format(idx)]
historical_y = env_info["{:}-y".format(idx)]
# build model
model = get_model(dict(model_type="simple_mlp"), **model_kwargs)
# build optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True)
criterion = torch.nn.MSELoss()
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=[
int(args.epochs * 0.25),
int(args.epochs * 0.5),
int(args.epochs * 0.75),
],
gamma=0.3,
)
train_metric = MSEMetric()
best_loss, best_param = None, None
for _iepoch in range(args.epochs):
preds = model(historical_x)
optimizer.zero_grad()
loss = criterion(preds, historical_y)
loss.backward()
optimizer.step()
lr_scheduler.step()
# save best
if best_loss is None or best_loss > loss.item():
best_loss = loss.item()
best_param = copy.deepcopy(model.state_dict())
model.load_state_dict(best_param)
with torch.no_grad():
train_metric(preds, historical_y)
train_results = train_metric.get_info()
metric = ComposeMetric(MSEMetric(), SaveMetric())
eval_dataset = torch.utils.data.TensorDataset(
env_info["{:}-x".format(idx)], env_info["{:}-y".format(idx)]
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0
)
results = basic_eval_fn(eval_loader, model, metric, logger)
log_str = (
"[{:}]".format(time_string())
+ " [{:04d}/{:04d}]".format(idx, env_info["total"])
+ " train-mse: {:.5f}, eval-mse: {:.5f}".format(
train_results["mse"], results["mse"]
)
)
logger.log(log_str)
save_path = logger.path(None) / "{:04d}-{:04d}.pth".format(
idx, env_info["total"]
)
w_container_per_epoch[idx] = model.get_w_container().no_grad_clone()
save_checkpoint(
{
"model_state_dict": model.state_dict(),
"model": model,
"index": idx,
"timestamp": env_info["{:}-timestamp".format(idx)],
},
save_path,
logger,
)
logger.log("")
per_timestamp_time.update(time.time() - start_time)
start_time = time.time()
save_checkpoint(
{"w_container_per_epoch": w_container_per_epoch},
logger.path(None) / "final-ckp.pth",
logger,
)
logger.log("-" * 200 + "\n")
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("Use the data in the past.")
parser.add_argument(
"--save_dir",
type=str,
default="./outputs/lfna-synthetic/use-same-timestamp",
help="The checkpoint directory.",
)
parser.add_argument(
"--env_version",
type=str,
required=True,
help="The synthetic enviornment version.",
)
parser.add_argument(
"--hidden_dim",
type=int,
required=True,
help="The hidden dimension.",
)
parser.add_argument(
"--init_lr",
type=float,
default=0.1,
help="The initial learning rate for the optimizer (default is Adam)",
)
parser.add_argument(
"--batch_size",
type=int,
default=512,
help="The batch size",
)
parser.add_argument(
"--epochs",
type=int,
default=1000,
help="The total number of epochs.",
)
parser.add_argument(
"--srange", type=str, required=True, help="The range of models to be evaluated"
)
parser.add_argument(
"--workers",
type=int,
default=4,
help="The number of data loading workers (default: 4)",
)
# Random Seed
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = random.randint(1, 100000)
assert args.save_dir is not None, "The save dir argument can not be None"
args.save_dir = "{:}-{:}-d{:}".format(
args.save_dir, args.env_version, args.hidden_dim
)
main(args)
|
'''
Given a collection of intervals, merge all overlapping intervals.
Example 1:
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
Example 2:
Input: intervals = [[1,4],[4,5]]
Output: [[1,5]]
Explanation: Intervals [1,4] and [4,5] are considered overlapping.
NOTE: input types have been changed on April 15, 2019. Please reset to default code definition to get new method signature.
Constraints:
intervals[i][0] <= intervals[i][1]
'''
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals = sorted(intervals, key = lambda x: x[0])
output = []
i = 0
if len(intervals) <= 1:
return intervals
while i < len(intervals) - 1:
tmp = intervals[i]
while tmp[1] >= intervals[i + 1][0]:
tmp[1] = max(tmp[1], intervals[i + 1][1])
i += 1
if i >= len(intervals) - 1:
break
i += 1
output.append(tmp)
if i <= len(intervals) - 1:
output.append(intervals[-1])
return output
|
from rest_framework import viewsets
from periodic_tasks_api.models import CustomExtendedPeriodicTask
from periodic_tasks_api.serializers import PeriodicTaskSerializer
from periodic_tasks_api.filters import PeriodicTaskFilterSet
class PeriodicTaskView(viewsets.ModelViewSet):
queryset = CustomExtendedPeriodicTask.objects.all()
serializer_class = PeriodicTaskSerializer
filter_backends = [PeriodicTaskFilterSet]
|
#!/usr/bin/python3
# coding: utf-8
from network.group import Group
import paho.mqtt.client as mqtt
from threading import Thread
import time
from log import logger
import paho.mqtt.subscribe as subscribe
import json
import random
import string
class Switch(Thread):
def __init__(self, broker_ip):
Thread.__init__(self)
self.broker_ip = broker_ip
self.groups = {}
self.drivers = {
"leds" : {},
"sensors": {},
"blinds": {}
}
self.diagnostic = {
"config": {},
"events": {}
}
self.name = "Switch" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12))
def on_disconnect(self, client, userdata, rc):
if rc != 0:
logger.warning("Unexpected client disconnect for %r, will reconnect", self.name)
def run(self):
self.client = mqtt.Client(self.name)
self.client.on_message = self.event_received
self.client.on_disconnect = self.on_disconnect
self.client.connect(self.broker_ip)
self.client.loop_start()
subscribe.callback(self.event_received, "#", hostname=self.broker_ip)
while self.is_alive:
time.sleep(1)
self.client.loop_stop()
def event_received(self, client, userdata, message):
try:
data = message.payload.decode("utf-8")
logger.debug("received url %r %r", message.topic, str(data))
if message.topic.endswith("/setup/hello"):
data = json.loads(data)
topic_url = data["topic"] + "/setup/config"
config = {}
if data["type"] == "led":
config["iMax"] = 700
self.client.publish("/write/" + topic_url, json.dumps(config))
except:
logger.exception("Invalid value received")
def create_group(self, leds, sensors, blinds, group_id):
if group_id in self.groups:
return False
group = Group(self.broker_ip, group_id)
self.groups[group_id] = group
for led in leds:
group.add_led(led)
for sensor in sensors:
group.add_sensor(sensor)
for blind in blinds:
group.add_blind(blind)
group.start()
self.diagnostic['events'][time.time()] = "Group " + str(group_id) + "has been created and contains " + json.dumps(group.serialize())
return True
def add_driver_to_group(self, group_id, driver_type, mac):
if group_id not in self.groups:
return False
group = self.groups[group_id]
if driver_type == "led":
led = self.get_led(mac)
if not led:
return False
return group.add_led(led)
elif driver_type == "sensor":
sensor = self.get_sensor(mac)
if not sensor:
return False
return group.add_sensor(sensor)
elif driver_type == "blind":
blind = self.get_blind(mac)
if not blind:
return False
return group.add_blind(blind)
self.diagnostic['events'][time.time()] = "Driver " + driver_type + " : " + mac + "has been been added to " + group_id
return False
def get_group_id(self, group_id):
if group_id in self.groups:
return self.groups[group_id]
return {}
def list_groups(self):
return self.groups.values()
def update_group_rules(self, group_id, rule_id, value):
if group_id not in self.groups:
return False
if rule_id == "brightness":
self.groups[group_id].set_brightness(value)
elif rule_id == "temperature":
self.groups[group_id].set_temperature(value)
elif rule_id == "presence":
self.groups[group_id].set_presence(value)
self.diagnostic['events'][time.time()] = "Rule " + rule_id + " is set to " + str(value) + " for " + str(group_id)
return True
def list_leds(self):
return self.drivers["leds"].values()
def get_led(self, led_id):
if led_id in self.drivers["leds"]:
return self.drivers["leds"][led_id]
return None
def plug_led(self, led):
self.drivers["leds"][led.mac] = led
self.diagnostic['events'][time.time()] = "New led " + led.mac + " has been plugged into the switch"
def unplug_led(self, led):
if led.mac in self.drivers["leds"]:
del self.drivers["leds"][led.mac]
self.diagnostic['events'][time.time()] = "Led " + led.mac + " has been unplugged from the switch"
def list_sensors(self):
return self.drivers["sensors"].values()
def get_sensor(self, sensor_id):
if sensor_id in self.drivers["sensors"]:
return self.drivers["sensors"][sensor_id]
return None
def plug_sensor(self, sensor):
self.drivers["sensors"][sensor.mac] = sensor
self.diagnostic['events'][time.time()] = "New sensor " + sensor.mac + " has been plugged into the switch"
def unplug_sensor(self, sensor):
if sensor.mac in self.drivers["sensors"]:
del self.drivers["sensors"][sensor.mac]
self.diagnostic['events'][time.time()] = "Sensor " + sensor.mac + " has been unplugged from the switch"
def switch_led_mode(self, led_id, auto=True):
if led_id not in self.drivers["leds"]:
return False
led = self.drivers["leds"][led_id]
url = "/write/" + led.base_topic + "/status/auto"
logger.info("Send switch mode to %r for %r", auto, url)
status = "auto"
if not auto:
status = "manual"
self.diagnostic['events'][time.time()] = "Switch led " + led.mac + " into mode " + status
self.client.publish(url, "%s" % auto)
return True
def list_blinds(self):
return self.drivers["blinds"].values()
def get_blind(self, blind_id):
if blind_id in self.drivers["blinds"]:
return self.drivers["blinds"][blind_id]
return None
def plug_blind(self, blind):
self.drivers["blinds"][blind.mac] = blind
self.diagnostic['events'][time.time()] = "New blind " + blind.mac + " has been plugged into the switch"
def unplug_blind(self, blind):
if blind.mac in self.drivers["blinds"]:
del self.drivers["blinds"][blind.mac]
self.diagnostic['events'][time.time()] = "Blind " + blind.mac + " has been unplugged from the switch"
def get_diagnostic(self):
self.diagnostic["config"]["groups"] = [group.serialize() for group in self.groups.values()]
return self.diagnostic
def set_manual_led_brightness(self, led_id, brightness=0):
if led_id not in self.drivers["leds"]:
return False
led = self.drivers["leds"][led_id]
url = "/write/" + led.base_topic + "/base/setpointManual"
logger.info("Send setpoint to %r for %r", brightness, url)
self.diagnostic['events'][time.time()] = "Force led " + led.mac + " brightness " + str(brightness)
logger.info(" back %r", self.client.publish(url, str(brightness)))
return True
def switch_blind_mode(self, blind_id, auto=True):
if blind_id not in self.drivers["blinds"]:
return False
blind = self.drivers["blinds"][blind_id]
url = "/write/" + blind.base_topic + "/status/auto"
logger.info("Send switch mode to %r for %r", auto, url)
status = "auto"
if not auto:
status = "manual"
self.diagnostic['events'][time.time()] = "Switch blind " + blind.mac + " into mode " + status
self.client.publish(url, "%s" % auto)
return True
def set_manual_blind_position(self, blind_id, position, blind_number=0):
if blind_id not in self.drivers["blinds"]:
return False
blind = self.drivers["blinds"][blind_id]
if not blind_number or blind_number == 1:
url = "/write/" + blind.base_topic + "/base/blind1Manual"
logger.info("Send position to %r for %r", position, url)
self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position)
self.client.publish(url, str(position))
if not blind_number or blind_number == 2:
url = "/write/" + blind.base_topic + "/base/blind2Manual"
logger.info("Send position to %r for %r", position, url)
self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position)
self.client.publish(url, str(position))
def set_manual_blind_fin(self, blind_id, fin, blind_number=0):
if blind_id not in self.drivers["blinds"]:
return False
blind = self.drivers["blinds"][blind_id]
if not blind_number or blind_number == 1:
url = "/write/" + blind.base_topic + "/base/fin1Manual"
logger.info("Send position to %r for %r", fin, url)
self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin)
self.client.publish(url, str(fin))
if not blind_number or blind_number == 2:
url = "/write/" + blind.base_topic + "/base/fin2Manual"
logger.info("Send position to %r for %r", fin, url)
self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin)
self.client.publish(url, str(fin))
def switch_group_mode(self, group_id, auto=True):
if group_id not in self.groups:
return False
group = self.groups[group_id]
url = "/write/" + group.base_topic + "/status/auto"
logger.info("Send switch mode to %r for %r", auto, url)
status = "auto"
if not auto:
status = "manual"
self.diagnostic['events'][time.time()] = "Switch group " + str(group.group_id) + " into mode " + str(status)
self.client.publish(url, "%s" % auto)
return True
def set_group_setpoint(self, group_id, setpoint):
if group_id not in self.groups:
return False
group = self.groups[group_id]
url = "/write/" + group.base_topic + "/config/setpoint"
logger.info("Send setpoint value to %r for %r", setpoint, url)
self.diagnostic['events'][time.time()] = "Send setpoint " + str(setpoint) + " to group " + str(group.group_id)
self.client.publish(url, str(setpoint))
return True
def set_group_blind_position(self, group_id, position):
if group_id not in self.groups:
return False
group = self.groups[group_id]
url = "/write/" + group.base_topic + "/config/blindPosition"
logger.info("Send setpoint value to %r for %r", position, url)
self.diagnostic['events'][time.time()] = "Send blind position " + str(position) + " to group " + str(group.group_id)
self.client.publish(url, str(position))
return True
|
# coding=utf-8
########################################################################################################################
### Do not forget to adjust the following variables to your own plugin.
# The plugin's identifier, has to be unique
plugin_identifier = "bedlevelvisualizer"
# The plugin's python package, should be "octoprint_<plugin identifier>", has to be unique
plugin_package = "octoprint_bedlevelvisualizer"
# The plugin's human readable name. Can be overwritten within OctoPrint's internal data via __plugin_name__ in the
# plugin module
plugin_name = "Bed Visualizer"
# The plugin's version. Can be overwritten within OctoPrint's internal data via __plugin_version__ in the plugin module
plugin_version = "0.1.15"
# The plugin's description. Can be overwritten within OctoPrint's internal data via __plugin_description__ in the plugin
# module
plugin_description = """Displays 3D mesh of bed topography report."""
# The plugin's author. Can be overwritten within OctoPrint's internal data via __plugin_author__ in the plugin module
plugin_author = "jneilliii"
# The plugin's author's mail address.
plugin_author_email = "jneilliii+octoprint@gmail.com"
# The plugin's homepage URL. Can be overwritten within OctoPrint's internal data via __plugin_url__ in the plugin module
plugin_url = "https://github.com/jneilliii/OctoPrint-BedLevelVisualizer"
# The plugin's license. Can be overwritten within OctoPrint's internal data via __plugin_license__ in the plugin module
plugin_license = "AGPLv3"
# Any additional requirements besides OctoPrint should be listed here
plugin_requires = ["numpy>=1.16.0,<=1.19.2"]
### --------------------------------------------------------------------------------------------------------------------
### More advanced options that you usually shouldn't have to touch follow after this point
### --------------------------------------------------------------------------------------------------------------------
# Additional package data to install for this plugin. The subfolders "templates", "static" and "translations" will
# already be installed automatically if they exist.
plugin_additional_data = []
# Any additional python packages you need to install with your plugin that are not contains in <plugin_package>.*
plugin_addtional_packages = []
# Any python packages within <plugin_package>.* you do NOT want to install with your plugin
plugin_ignored_packages = []
# Additional parameters for the call to setuptools.setup. If your plugin wants to register additional entry points,
# define dependency links or other things like that, this is the place to go. Will be merged recursively with the
# default setup parameters as provided by octoprint_setuptools.create_plugin_setup_parameters using
# octoprint.util.dict_merge.
#
# Example:
# plugin_requires = ["someDependency==dev"]
# additional_setup_parameters = {"dependency_links": ["https://github.com/someUser/someRepo/archive/master.zip#egg=someDependency-dev"]}
additional_setup_parameters = {}
########################################################################################################################
from setuptools import setup
try:
import octoprint_setuptools
except:
print("Could not import OctoPrint's setuptools, are you sure you are running that under "
"the same python installation that OctoPrint is installed under?")
import sys
sys.exit(-1)
setup_parameters = octoprint_setuptools.create_plugin_setup_parameters(
identifier=plugin_identifier,
package=plugin_package,
name=plugin_name,
version=plugin_version,
description=plugin_description,
author=plugin_author,
mail=plugin_author_email,
url=plugin_url,
license=plugin_license,
requires=plugin_requires,
additional_packages=plugin_addtional_packages,
ignored_packages=plugin_ignored_packages,
additional_data=plugin_additional_data
)
if len(additional_setup_parameters):
from octoprint.util import dict_merge
setup_parameters = dict_merge(setup_parameters, additional_setup_parameters)
setup(**setup_parameters)
|
#!/usr/bin/env python3
# Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
"""Implements the client for test."""
# pylint: skip-file
# pydocstyle: add-ignore=D105,D107,D401
import contextlib
import json
from typing import Any, BinaryIO, Dict, List, MutableMapping, Optional
import requests
import requests.auth
class RemoteCaller:
"""Executes the remote calls to the server."""
def __init__(self, url_prefix: str, auth: Optional[requests.auth.AuthBase] = None) -> None:
self.url_prefix = url_prefix
self.auth = auth
def test_me(
self,
query_some_parameter: str,
path_some_parameter: str) -> bytes:
"""
Is a test endpoint.
:param query_some_parameter:
:param path_some_parameter:
:return: a confirmation
"""
url = "".join([
self.url_prefix,
'/products/',
str(path_some_parameter)])
params = {} # type: Dict[str, str]
params['some_parameter'] = query_some_parameter
resp = requests.request(
method='get',
url=url,
params=params,
auth=self.auth)
with contextlib.closing(resp):
resp.raise_for_status()
return resp.content
# Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
|
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import cached_property
from logging import getLogger
from typing import Dict, List, Optional, Sequence, Tuple, Union
from caret_analyze.value_objects.message_context import MessageContext, MessageContextType
from .lttng import Lttng
from .value_objects import PublisherValueLttng, SubscriptionCallbackValueLttng
from ...common import Columns, Util
from ...exceptions import (InvalidArgumentError,
UnsupportedNodeRecordsError,
UnsupportedTypeError)
from ...infra.interface import RuntimeDataProvider
from ...infra.lttng.column_names import COLUMN_NAME
from ...record import (merge, merge_sequencial, RecordsFactory, RecordsInterface)
from ...value_objects import (CallbackChain, CallbackStructValue,
CommunicationStructValue, InheritUniqueStamp,
NodePathStructValue, PublisherStructValue, Qos,
SubscriptionCallbackStructValue,
SubscriptionStructValue,
Tilde,
TimerCallbackStructValue,
UseLatestMessage,
VariablePassingStructValue)
logger = getLogger(__name__)
class RecordsProviderLttng(RuntimeDataProvider):
"""
Records are processed and measurement results are calculated.
In addition to merging, filtering and other operations are performed here.
"""
def __init__(
self,
lttng: Lttng
) -> None:
self._lttng = lttng
self._source = FilteredRecordsSource(lttng)
self._helper = RecordsProviderLttngHelper(lttng)
def communication_records(
self,
comm_val: CommunicationStructValue
) -> RecordsInterface:
"""
Provide communication records.
Parameters
----------
comm_info : CommunicationStructInfo
communicadtion info.
Returns
-------
RecordsInterface
Columns [inter process communication case]:
- [topic_name]/rclcpp_publish_timestamp
- [topic_name]/rcl_publish_timestamp
- [topic_name]/dds_publish_timestamp
- [callback_name]/callback_start_timestamp
Columns [intra process communication case]:
- [topic_name]/rclcpp_intra_publish_timestamp
- [topic_name]/message_timestamp
- [callback_name]/callback_start_timestamp
"""
assert comm_val.subscribe_callback_name is not None
if self.is_intra_process_communication(comm_val):
return self._compose_intra_proc_comm_records(comm_val)
return self._compose_inter_proc_comm_records(comm_val)
def node_records(
self,
node_path_val: NodePathStructValue,
) -> RecordsInterface:
if node_path_val.message_context is None:
# dummy record
msg = 'message context is None. return dummy record. '
msg += f'node_name: {node_path_val.node_name}'
logger.info(msg)
return RecordsFactory.create_instance()
if node_path_val.message_context_type == MessageContextType.CALLBACK_CHAIN:
return NodeRecordsCallbackChain(self, node_path_val).to_records()
if node_path_val.message_context_type == MessageContextType.INHERIT_UNIQUE_STAMP:
return NodeRecordsInheritUniqueTimestamp(self, node_path_val).to_records()
if node_path_val.message_context_type == MessageContextType.USE_LATEST_MESSAGE:
return NodeRecordsUseLatestMessage(self, node_path_val).to_records()
if node_path_val.message_context_type == MessageContextType.TILDE:
return NodeRecordsTilde(self, node_path_val).to_records()
raise UnsupportedNodeRecordsError(
'Unknown message context. '
f'message_context = {node_path_val.message_context.context_type.type_name}'
)
def callback_records(
self,
callback: CallbackStructValue
) -> RecordsInterface:
"""
Return callback duration records.
Parameters
----------
callback_val : CallbackStructValue
target callback value.
Returns
-------
RecordsInterface
Columns
- [callback_name]/callback_start_timestamp
- [callback_name]/callback_end_timestamp
"""
callback_objects = self._helper.get_callback_objects(callback)
callback_records = self._source.callback_records(*callback_objects)
columns = [
COLUMN_NAME.CALLBACK_START_TIMESTAMP,
COLUMN_NAME.CALLBACK_END_TIMESTAMP
]
self._format(callback_records, columns)
self._rename_column(callback_records, callback.callback_name, None)
return callback_records
def subscribe_records(
self,
subscription: SubscriptionStructValue
) -> RecordsInterface:
"""
Provide subscription records.
Parameters
----------
subscription_value : SubscriptionStructValue
Target subscription value.
Returns
-------
RecordsInterface
Columns
- [callback_name]/callback_start_timestamp
- [topic_name]/message_timestamp
- [topic_name]/source_timestamp
Raises
------
InvalidArgumentError
"""
callback = subscription.callback
assert callback is not None
tilde_subscription = self._helper.get_tilde_subscription(callback)
if tilde_subscription is None:
return self._subscribe_records(subscription)
return self._subscribe_records_with_tilde(subscription)
def _subscribe_records(
self,
subscription: SubscriptionStructValue
) -> RecordsInterface:
"""
Provide subscription records.
Parameters
----------
subscription_value : SubscriptionStructValue
Target subscription value.
Returns
-------
RecordsInterface
Columns
- [callback_name]/callback_start_timestamp
- [topic_name]/message_timestamp
- [topic_name]/source_timestamp
Raises
------
InvalidArgumentError
"""
callback = subscription.callback
if callback is None:
raise InvalidArgumentError(
'callback_value is None. '
f'node_name: {subscription.node_name}'
f'callback_name: {subscription.callback_name}'
f'topic_name: {subscription.topic_name}'
)
callback_objects = self._helper.get_subscription_callback_objects(callback)
sub_records = self._source.sub_records(*callback_objects)
columns = [
COLUMN_NAME.CALLBACK_START_TIMESTAMP,
COLUMN_NAME.MESSAGE_TIMESTAMP,
COLUMN_NAME.SOURCE_TIMESTAMP,
]
self._format(sub_records, columns)
self._rename_column(
sub_records,
callback.callback_name,
subscription.topic_name
)
return sub_records
def _subscribe_records_with_tilde(
self,
subscription: SubscriptionStructValue
) -> RecordsInterface:
"""
Provide subscription records.
Parameters
----------
subscription_value : SubscriptionStructValue
Target subscription value.
Returns
-------
RecordsInterface
Columns
- [callback_name]/callback_start_timestamp
- [topic_name]/message_timestamp
- [topic_name]/source_timestamp
- [topic_name]/tilde_subscribe_timestamp
- [topic_name]/tilde_message_id
Raises
------
InvalidArgumentError
"""
callback = subscription.callback
if callback is None:
raise InvalidArgumentError(
'callback_value is None. '
f'node_name: {subscription.node_name}'
f'callback_name: {subscription.callback_name}'
f'topic_name: {subscription.topic_name}'
)
callback_objects = self._helper.get_subscription_callback_objects(callback)
sub_records = self._source.sub_records(*callback_objects)
tilde_subscription = self._helper.get_tilde_subscription(callback)
if tilde_subscription is not None:
tilde_records = self._source.tilde_subscribe_records(tilde_subscription)
sub_records = merge_sequencial(
left_records=sub_records,
right_records=tilde_records,
left_stamp_key=COLUMN_NAME.CALLBACK_START_TIMESTAMP,
right_stamp_key=COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP,
join_left_key=None,
join_right_key=None,
how='left',
columns=Columns(sub_records.columns + tilde_records.columns).as_list(),
progress_label='binding: tilde_records',
)
columns = [
COLUMN_NAME.CALLBACK_START_TIMESTAMP,
COLUMN_NAME.MESSAGE_TIMESTAMP,
COLUMN_NAME.SOURCE_TIMESTAMP,
COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP,
COLUMN_NAME.TILDE_MESSAGE_ID,
]
self._format(sub_records, columns)
self._rename_column(
sub_records,
callback.callback_name,
subscription.topic_name
)
return sub_records
def _publish_records(
self,
publisher: PublisherStructValue
) -> RecordsInterface:
"""
Return publish records.
Parameters
----------
publish : PublisherStructValue
target publisher
Returns
-------
RecordsInterface
Columns
- [topic_name]/rclcpp_publish_timestamp
- [topic_name]/rclcpp_intra_publish_timestamp
- [topic_name]/rclcpp_inter_publish_timestamp
- [topic_name]/rcl_publish_timestamp
- [topic_name]/dds_write_timestamp
- [topic_name]/message_timestamp
- [topic_name]/source_timestamp
"""
publisher_handles = self._helper.get_publisher_handles(publisher)
pub_records = self._source.publish_records(publisher_handles)
columns = [
COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCL_PUBLISH_TIMESTAMP,
COLUMN_NAME.DDS_WRITE_TIMESTAMP,
COLUMN_NAME.MESSAGE_TIMESTAMP,
COLUMN_NAME.SOURCE_TIMESTAMP,
]
self._format(pub_records, columns)
self._rename_column(pub_records, None, publisher.topic_name)
return pub_records
def publish_records(
self,
publisher: PublisherStructValue
) -> RecordsInterface:
"""
Return publish records.
Parameters
----------
publish : PublisherStructValue
target publisher
Returns
-------
RecordsInterface
Columns
- [topic_name]/rclcpp_publish_timestamp
- [topic_name]/rclcpp_intra_publish_timestamp
- [topic_name]/rclcpp_inter_publish_timestamp
- [topic_name]/rcl_publish_timestamp
- [topic_name]/dds_write_timestamp
- [topic_name]/message_timestamp
- [topic_name]/source_timestamp
---
- [topic_name]/tilde_publish_timestamp
- [topic_name]/tilde_message_id
"""
tilde_publishers = self._helper.get_tilde_publishers(publisher)
if len(tilde_publishers) == 0:
return self._publish_records(publisher)
return self._publish_records_with_tilde(publisher)
def _publish_records_with_tilde(
self,
publisher: PublisherStructValue
) -> RecordsInterface:
"""
Return publish records.
Parameters
----------
publish : PublisherStructValue
target publisher
Returns
-------
RecordsInterface
Columns
- [topic_name]/rclcpp_publish_timestamp
- [topic_name]/rclcpp_intra_publish_timestamp
- [topic_name]/rclcpp_inter_publish_timestamp
- [topic_name]/rcl_publish_timestamp
- [topic_name]/dds_write_timestamp
- [topic_name]/message_timestamp
- [topic_name]/source_timestamp
- [topic_name]/tilde_publish_timestamp
- [topic_name]/tilde_message_id
"""
publisher_handles = self._helper.get_publisher_handles(publisher)
pub_records = self._source.publish_records(publisher_handles)
tilde_publishers = self._helper.get_tilde_publishers(publisher)
tilde_records = self._source.tilde_publish_records(tilde_publishers)
pub_records = merge_sequencial(
left_records=tilde_records,
right_records=pub_records,
left_stamp_key='tilde_publish_timestamp',
right_stamp_key='rclcpp_publish_timestamp',
join_left_key=None,
join_right_key=None,
columns=Columns(tilde_records.columns + pub_records.columns).as_list(),
how='right',
progress_label='binding: tilde_records',
)
columns = [
COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCL_PUBLISH_TIMESTAMP,
COLUMN_NAME.DDS_WRITE_TIMESTAMP,
COLUMN_NAME.MESSAGE_TIMESTAMP,
COLUMN_NAME.SOURCE_TIMESTAMP,
COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP,
COLUMN_NAME.TILDE_MESSAGE_ID,
]
self._format(pub_records, columns)
self._rename_column(pub_records, None, publisher.topic_name)
return pub_records
def tilde_records(
self,
subscription: SubscriptionStructValue,
publisher: PublisherStructValue
) -> RecordsInterface:
assert subscription.callback is not None
publisher_addrs = self._helper.get_tilde_publishers(publisher)
subscription_addr = self._helper.get_tilde_subscription(subscription.callback)
assert len(publisher_addrs) > 0
assert subscription_addr is not None
pub_records = self._source.tilde_publish_records(publisher_addrs)
sub_records = self._source.tilde_subscribe_records(subscription_addr)
records = merge(
left_records=sub_records,
right_records=pub_records,
join_left_key=COLUMN_NAME.TILDE_MESSAGE_ID,
join_right_key=COLUMN_NAME.TILDE_MESSAGE_ID,
columns=Columns(sub_records.columns + pub_records.columns).as_list(),
how='left',
progress_label='binding: tilde pub and sub records'
)
columns = [
COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP,
COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP
]
self._format(records, columns)
self._rename_column(records, subscription.callback_name, subscription.topic_name)
return records
def get_rmw_implementation(self) -> str:
return self._lttng.get_rmw_impl()
def get_qos(
self,
pub_sub: Union[PublisherStructValue, SubscriptionStructValue]
) -> Qos:
if isinstance(pub_sub, SubscriptionStructValue):
sub_cb = pub_sub.callback
if sub_cb is None:
raise InvalidArgumentError('Failed to get callback information.'
'pub.callback is None')
sub_cb_lttng = self._helper.get_lttng_subscription(sub_cb)
return self._lttng.get_subscription_qos(sub_cb_lttng)
pubs_lttng = self._helper.get_lttng_publishers(pub_sub)
if len(pubs_lttng) == 0:
raise InvalidArgumentError('No publisher matching the criteria was found.')
if len(pubs_lttng) > 1:
logger.warning(
'Multiple publishers matching your criteria were found.'
'The value of the first publisher qos will be returned.')
return self._lttng.get_publisher_qos(pubs_lttng[0])
def variable_passing_records(
self,
variable_passing_info: VariablePassingStructValue
) -> RecordsInterface:
"""
Return variable passing records.
Parameters
----------
variable_passing_info : VariablePassingStructInfo
target variable passing info.
Returns
-------
RecordsInterface
Columns
- [callback_name]/callback_end_timestamp
- [callback_name]/callback_start_timestamp
"""
read_records: RecordsInterface = self.callback_records(
variable_passing_info.callback_read)
write_records: RecordsInterface = self.callback_records(
variable_passing_info.callback_write)
read_records.drop_columns([read_records.columns[-1]]) # callback end
write_records.drop_columns([write_records.columns[0]]) # callback_start
columns = [
write_records.columns[0],
read_records.columns[0],
]
merged_records = merge_sequencial(
left_records=write_records,
right_records=read_records,
left_stamp_key=columns[0],
right_stamp_key=columns[1],
join_left_key=None,
join_right_key=None,
columns=columns,
how='left_use_latest',
progress_label='binding: callback_end and callback_start'
)
merged_records.sort(columns[0])
self._format(merged_records, columns)
return merged_records
def is_intra_process_communication(
self,
communication_value: CommunicationStructValue
) -> Optional[bool]:
intra_record = self._compose_intra_proc_comm_records(communication_value)
return len(intra_record) > 0
def _compose_intra_proc_comm_records(
self,
comm_info: CommunicationStructValue,
) -> RecordsInterface:
"""
Compose intra process communication records.
Parameters
----------
comm_info : CommunicationStructInfo
Target communication info.
Returns
-------
RecordsInterface
Columns
- [topic_name]/rclcpp_publish_timestamp
- [callback_name]/callback_start_timestamp
"""
publisher = comm_info.publisher
subscription_cb = comm_info.subscribe_callback
assert subscription_cb is not None
assert isinstance(subscription_cb, SubscriptionCallbackStructValue)
publisher_handles = self._helper.get_publisher_handles(publisher)
callback_object_intra = self._helper.get_subscription_callback_object_intra(
subscription_cb)
records = self._source.intra_comm_records(publisher_handles, callback_object_intra)
columns = [
COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP,
COLUMN_NAME.CALLBACK_START_TIMESTAMP,
]
self._format(records, columns)
records.rename_columns({
COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP
})
self._rename_column(records, comm_info.subscribe_callback_name, comm_info.topic_name)
return records
def _compose_inter_proc_comm_records(
self,
comm_value: CommunicationStructValue
) -> RecordsInterface:
"""
Composer intar process communication records.
Parameters
----------
comm_value : CommunicationStructValue
target communication value.
Returns
-------
RecordsInterface
Columns
- [topic_name]/rclcpp_publish_timestamp
- [topic_name]/rcl_publish_timestamp
- [topic_name]/dds_write_timestamp
- [callback_name_name]/callback_start_timestamp
"""
publisher = comm_value.publisher
subscription_cb = comm_value.subscribe_callback
assert subscription_cb is not None
assert isinstance(subscription_cb, SubscriptionCallbackStructValue)
publisher_handles = self._helper.get_publisher_handles(publisher)
callback_object = self._helper.get_subscription_callback_object_inter(subscription_cb)
records = self._source.inter_comm_records(publisher_handles, callback_object)
columns = [
COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCL_PUBLISH_TIMESTAMP,
COLUMN_NAME.DDS_WRITE_TIMESTAMP,
COLUMN_NAME.CALLBACK_START_TIMESTAMP
]
self._format(records, columns)
records.rename_columns({
COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP
})
self._rename_column(records, comm_value.subscribe_callback_name, comm_value.topic_name)
return records
@staticmethod
def _format(records: RecordsInterface, columns: List[str]):
drop = list(set(records.columns) - set(columns))
records.drop_columns(drop)
records.reindex(columns)
@staticmethod
def _rename_column(
records: RecordsInterface,
callback_name: Optional[str],
topic_name: Optional[str]
) -> None:
rename_dict = {}
if COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP}'
if COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP}'
if COLUMN_NAME.CALLBACK_START_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.CALLBACK_START_TIMESTAMP] = \
f'{callback_name}/{COLUMN_NAME.CALLBACK_START_TIMESTAMP}'
if COLUMN_NAME.CALLBACK_END_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.CALLBACK_END_TIMESTAMP] = \
f'{callback_name}/{COLUMN_NAME.CALLBACK_END_TIMESTAMP}'
if COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP}'
if COLUMN_NAME.RCL_PUBLISH_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.RCL_PUBLISH_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.RCL_PUBLISH_TIMESTAMP}'
if COLUMN_NAME.DDS_WRITE_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.DDS_WRITE_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.DDS_WRITE_TIMESTAMP}'
if COLUMN_NAME.MESSAGE_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.MESSAGE_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}'
if COLUMN_NAME.SOURCE_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.SOURCE_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.SOURCE_TIMESTAMP}'
if COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP}'
if COLUMN_NAME.TILDE_MESSAGE_ID in records.columns:
rename_dict[COLUMN_NAME.TILDE_MESSAGE_ID] = \
f'{topic_name}/{COLUMN_NAME.TILDE_MESSAGE_ID}'
if COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in records.columns:
rename_dict[COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP] = \
f'{topic_name}/{COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP}'
records.rename_columns(rename_dict)
class RecordsProviderLttngHelper:
def __init__(
self,
lttng: Lttng
) -> None:
from .bridge import LttngBridge
self._bridge = LttngBridge(lttng)
def get_callback_objects(
self,
callback: CallbackStructValue
) -> Tuple[int, Optional[int]]:
if isinstance(callback, TimerCallbackStructValue):
return self.get_timer_callback_object(callback), None
if isinstance(callback, SubscriptionCallbackStructValue):
obj = self.get_subscription_callback_object_inter(callback)
obj_intra = self.get_subscription_callback_object_intra(callback)
if obj_intra is not None:
return obj, obj_intra
return obj, None
msg = 'Failed to get callback object. '
msg += f'{callback.callback_type.type_name} is not supported.'
raise UnsupportedTypeError(msg)
def get_timer_callback_object(
self,
callback: TimerCallbackStructValue
) -> int:
callback_lttng = self._bridge.get_timer_callback(callback)
return callback_lttng.callback_object
def get_subscription_callback_objects(
self,
callback: SubscriptionCallbackStructValue
) -> Tuple[int, Optional[int]]:
return self.get_callback_objects(callback)
def get_subscription_callback_object_inter(
self,
callback: SubscriptionCallbackStructValue
) -> int:
callback_lttng = self._bridge.get_subscription_callback(callback)
return callback_lttng.callback_object
def get_subscription_callback_object_intra(
self,
callback: SubscriptionCallbackStructValue
) -> Optional[int]:
callback_lttng = self._bridge.get_subscription_callback(callback)
return callback_lttng.callback_object_intra
def get_tilde_subscription(
self,
callback: SubscriptionCallbackStructValue
) -> Optional[int]:
callback_lttng = self._bridge.get_subscription_callback(callback)
return callback_lttng.tilde_subscription
def get_publisher_handles(
self,
publisher: PublisherStructValue
) -> List[int]:
publisher_lttng = self._bridge.get_publishers(publisher)
return [pub_info.publisher_handle
for pub_info
in publisher_lttng]
def get_tilde_publishers(
self,
publisher_info: PublisherStructValue
) -> List[int]:
publisher_lttng = self._bridge.get_publishers(publisher_info)
publisher = [pub_info.tilde_publisher
for pub_info
in publisher_lttng
if pub_info.tilde_publisher is not None]
return publisher
def get_lttng_publishers(
self,
publisher: PublisherStructValue
) -> List[PublisherValueLttng]:
return self._bridge.get_publishers(publisher)
def get_lttng_subscription(
self,
callback: SubscriptionCallbackStructValue
) -> SubscriptionCallbackValueLttng:
return self._bridge.get_subscription_callback(callback)
class NodeRecordsCallbackChain:
def __init__(
self,
provider: RecordsProviderLttng,
node_path: NodePathStructValue,
) -> None:
self._provider = provider
self._validate(node_path)
self._val = node_path
def to_records(self):
chain_info = self._val.child
if isinstance(chain_info[0], CallbackStructValue):
cb_info = chain_info[0]
records = self._provider.callback_records(cb_info)
else:
var_pass_info = chain_info[0]
records = self._provider.variable_passing_records(var_pass_info)
for chain_element in chain_info[1:]:
if isinstance(chain_element, CallbackStructValue):
records_ = self._provider.callback_records(chain_element)
join_key = records_.columns[0]
records = merge(
left_records=records,
right_records=records_,
join_left_key=join_key,
join_right_key=join_key,
columns=Columns(records.columns + records_.columns),
how='left',
progress_label='binding: callback_start and callback end'
)
continue
if isinstance(chain_element, VariablePassingStructValue):
records_ = self._provider.variable_passing_records(chain_element)
# self._rename_var_pass_records(records_, chain_element)
join_key = records_.columns[0]
records = merge(
left_records=records,
right_records=records_,
join_left_key=join_key,
join_right_key=join_key,
columns=Columns(records.columns + records_.columns).as_list(),
how='left',
progress_label='binding: callback_end and callback start'
)
continue
last_element = chain_info[-1]
if isinstance(last_element, CallbackStructValue) \
and self._val.publisher is not None:
last_callback_end_name = Util.filter_items(
lambda x: COLUMN_NAME.CALLBACK_END_TIMESTAMP in x, records.columns)[-1]
records.drop_columns([last_callback_end_name])
last_callback_start_name = Util.filter_items(
lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns)[-1]
publish_records = self._provider.publish_records(self._val.publisher)
publish_column = publish_records.columns[0]
columns = records.columns + [publish_column]
records = merge_sequencial(
left_records=records,
right_records=publish_records,
join_left_key=None,
join_right_key=None,
left_stamp_key=last_callback_start_name,
right_stamp_key=publish_column,
columns=Columns(records.columns + publish_records.columns).as_list(),
how='left',
progress_label='binding: callback_start and publish',
)
records.drop_columns(list(set(records.columns) - set(columns)))
records.reindex(columns)
return records
@staticmethod
def _validate(
node_path: NodePathStructValue,
) -> None:
if node_path.callbacks is None:
raise UnsupportedNodeRecordsError('')
if node_path.callbacks is None:
raise UnsupportedNodeRecordsError('callback values is None.')
if not isinstance(node_path.message_context, CallbackChain):
msg = 'node_path.message context is not CallbackChain'
raise UnsupportedNodeRecordsError(msg)
head_callback = node_path.callbacks[0]
tail_callback = node_path.callbacks[-1]
if node_path.publish_topic_name is not None and \
tail_callback.publish_topic_names is not None and \
len(tail_callback.publish_topic_names) != 0 and \
node_path.publish_topic_name not in tail_callback.publish_topic_names:
raise UnsupportedNodeRecordsError('')
if node_path.subscribe_topic_name is not None and \
node_path.subscribe_topic_name != head_callback.subscribe_topic_name:
raise UnsupportedNodeRecordsError('')
class NodeRecordsInheritUniqueTimestamp:
def __init__(
self,
provider: RecordsProviderLttng,
node_path: NodePathStructValue,
) -> None:
if node_path.message_context is None:
raise UnsupportedNodeRecordsError('node_path.message context is None')
if not isinstance(node_path.message_context, InheritUniqueStamp):
msg = 'node_path.message context is not InheritUniqueStamp'
raise UnsupportedNodeRecordsError(msg)
self._provider = provider
self._context: InheritUniqueStamp = node_path.message_context
self._validate(node_path, self._context)
self._node_path = node_path
def to_records(self):
sub_records = self._provider.subscribe_records(self._node_path.subscription)
pub_records = self._provider.publish_records(self._node_path.publisher)
columns = [
sub_records.columns[0],
pub_records.columns[0],
]
join_left_key = f'{self._node_path.subscribe_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}'
join_right_key = f'{self._node_path.publish_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}'
pub_sub_records = merge_sequencial(
left_records=sub_records,
right_records=pub_records,
left_stamp_key=sub_records.columns[0],
right_stamp_key=pub_records.columns[0],
join_left_key=join_left_key,
join_right_key=join_right_key,
columns=Columns(sub_records.columns + pub_records.columns).as_list(),
how='left_use_latest',
progress_label='binding: inherit unique timestamp',
)
drop_columns = list(set(pub_sub_records.columns) - set(columns))
pub_sub_records.drop_columns(drop_columns)
pub_sub_records.reindex(columns)
return pub_sub_records
@staticmethod
def _validate(
node_path: NodePathStructValue,
context: InheritUniqueStamp,
) -> None:
def is_valid() -> bool:
if context.publisher_topic_name != node_path.publish_topic_name:
return False
if context.subscription_topic_name != node_path.subscribe_topic_name:
return False
return True
if is_valid():
return None
msg = f'InheritUniqueStamp cannot build records. \n{node_path} \n{context}'
raise UnsupportedNodeRecordsError(msg)
class NodeRecordsUseLatestMessage:
def __init__(
self,
provider: RecordsProviderLttng,
node_path: NodePathStructValue,
) -> None:
if node_path.message_context is None:
raise UnsupportedNodeRecordsError('node_path.message context is None')
if not isinstance(node_path.message_context, UseLatestMessage):
raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage')
self._provider = provider
self._context: UseLatestMessage = node_path.message_context
self._validate(node_path, self._context)
self._node_path = node_path
def to_records(self):
sub_records = self._provider.subscribe_records(self._node_path.subscription)
pub_records = self._provider.publish_records(self._node_path.publisher)
columns = [
sub_records.columns[0],
f'{self._node_path.publish_topic_name}/rclcpp_publish_timestamp',
]
pub_sub_records = merge_sequencial(
left_records=sub_records,
right_records=pub_records,
left_stamp_key=sub_records.columns[0],
right_stamp_key=pub_records.columns[0],
join_left_key=None,
join_right_key=None,
columns=Columns(sub_records.columns + pub_records.columns).as_list(),
how='left_use_latest',
progress_label='binding use_latest_message.'
)
drop_columns = list(set(pub_sub_records.columns) - set(columns))
pub_sub_records.drop_columns(drop_columns)
pub_sub_records.reindex(columns)
return pub_sub_records
@staticmethod
def _validate(
node_path: NodePathStructValue,
context: UseLatestMessage,
) -> None:
def is_valid() -> bool:
if context.publisher_topic_name != node_path.publish_topic_name:
return False
if context.subscription_topic_name != node_path.subscribe_topic_name:
return False
return True
if is_valid():
return None
msg = f'UseLatest cannot build records. \n{node_path} \n{context}'
raise UnsupportedNodeRecordsError(msg)
class NodeRecordsTilde:
def __init__(
self,
provider: RecordsProviderLttng,
node_path: NodePathStructValue,
) -> None:
if node_path.message_context is None:
raise UnsupportedNodeRecordsError('node_path.message context is None')
if not isinstance(node_path.message_context, Tilde):
raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage')
self._provider = provider
self._context: MessageContext = node_path.message_context
self._validate(node_path, self._context)
self._node_path = node_path
def to_records(self):
tilde_records = self._provider.tilde_records(
self._node_path.subscription, self._node_path.publisher)
sub_records = self._provider.subscribe_records(self._node_path.subscription)
pub_records = self._provider.publish_records(self._node_path.publisher)
left_stamp_key = Util.find_one(
lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, sub_records.columns)
right_stamp_key = Util.find_one(
lambda x: COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in x, sub_records.columns)
records = merge_sequencial(
left_records=sub_records,
right_records=tilde_records,
left_stamp_key=left_stamp_key,
right_stamp_key=right_stamp_key,
join_left_key=None,
join_right_key=None,
columns=Columns(sub_records.columns + tilde_records.columns).as_list(),
how='left',
progress_label='binding tilde subscribe records.'
)
left_stamp_key = Util.find_one(
lambda x: COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in x, records.columns)
right_stamp_key = Util.find_one(
lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, pub_records.columns)
records = merge_sequencial(
left_records=records,
right_records=pub_records,
left_stamp_key=left_stamp_key,
right_stamp_key=right_stamp_key,
join_left_key=None,
join_right_key=None,
columns=Columns(records.columns + pub_records.columns).as_list(),
how='left',
progress_label='binding tilde publish records.'
)
columns = [
Util.find_one(lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns),
Util.find_one(lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, records.columns),
]
drop_columns = list(set(records.columns) - set(columns))
records.drop_columns(drop_columns)
records.reindex(columns)
return records
@staticmethod
def _validate(
node_path: NodePathStructValue,
context: MessageContext,
) -> None:
def is_valid() -> bool:
if not isinstance(context, Tilde):
return False
if context.publisher_topic_name != node_path.publish_topic_name:
return False
if context.subscription_topic_name != node_path.subscribe_topic_name:
return False
return True
if is_valid():
return None
msg = f'UseLatest cannot build records. \n{node_path} \n{context}'
raise UnsupportedNodeRecordsError(msg)
class FilteredRecordsSource:
def __init__(self, lttng: Lttng):
self._lttng = lttng
def tilde_subscribe_records(
self,
tilde_subscription: int
) -> RecordsInterface:
"""
Compose filtered tilde subscribe records.
Parameters
----------
tilde_subscription : int
Returns
-------
RecordsInterface
Equivalent to the following process.
records = lttng.compose_tilde_subscribe_records()
records.filter_if(
lambda x: x.get('tilde_subscription') == tilde_subscription
)
records.drop_columns(['tilde_subscription])
"""
sub_records = RecordsFactory.create_instance(
None,
[
COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP,
COLUMN_NAME.TILDE_SUBSCRIPTION,
COLUMN_NAME.TILDE_MESSAGE_ID
]
)
if tilde_subscription is not None and \
tilde_subscription in self._grouped_tilde_sub_records:
sub_records_ = self._grouped_tilde_sub_records[tilde_subscription].clone()
sub_records.concat(sub_records_)
sub_records.drop_columns([COLUMN_NAME.TILDE_SUBSCRIPTION])
return sub_records
def sub_records(
self,
inter_callback_object: int,
intra_callback_object: Optional[int]
) -> RecordsInterface:
"""
Compose filtered subscribe records.
Parameters
----------
inter_callback_object : int
intra_callback_object : Optional[int]
Returns
-------
RecordsInterface
Equivalent to the following process.
records = lttng.compose_subscribe_records()
records.filter_if(
lambda x: x.get('callback_object') in [
inter_callback_object, intra_callback_object
]
)
"""
sub_records = RecordsFactory.create_instance(
None,
[
COLUMN_NAME.CALLBACK_START_TIMESTAMP,
COLUMN_NAME.MESSAGE_TIMESTAMP,
COLUMN_NAME.SOURCE_TIMESTAMP,
]
)
records = self._grouped_sub_records
if inter_callback_object in records:
sub_records.concat(records[inter_callback_object].clone())
if intra_callback_object is not None and intra_callback_object in records:
intra_sub_records = records[intra_callback_object].clone()
sub_records.concat(intra_sub_records)
sub_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP)
return sub_records
def inter_comm_records(
self,
publisher_handles: List[int],
callback_object: int
) -> RecordsInterface:
"""
Compose filtered inter communication records.
Parameters
----------
publisher_handles : List[int]
callback_object : int
Returns
-------
RecordsInterface
Equivalent to the following process.
records = lttng.compose_inter_proc_comm_records()
records.filter_if(
lambda x: x.get('callback_object') == callback_object and
x.get('publisher_handle') in publisher_handles
)
"""
records = RecordsFactory.create_instance(
None,
[
COLUMN_NAME.CALLBACK_OBJECT,
COLUMN_NAME.CALLBACK_START_TIMESTAMP,
COLUMN_NAME.PUBLISHER_HANDLE,
COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCL_PUBLISH_TIMESTAMP,
COLUMN_NAME.DDS_WRITE_TIMESTAMP
]
)
for publisher_handle in publisher_handles:
key = (callback_object, publisher_handle)
if key in self._grouped_inter_comm_records:
comm_records = self._grouped_inter_comm_records[key].clone()
records.concat(comm_records)
records.sort(COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP)
return records
def intra_comm_records(
self,
publisher_handles: List[int],
intra_callback_object: Optional[int]
) -> RecordsInterface:
"""
Compose filtered intra communication records.
Parameters
----------
publisher_handles : List[int]
[description]
intra_callback_object : Optional[int]
[description]
Returns
-------
RecordsInterface
Equivalent to the following process.
records = lttng.compose_intra_proc_comm_records()
records.filter_if(
lambda x: x.get('callback_object') == callback_object and
x.get('publisher_handle') in publisher_handles
)
"""
records = RecordsFactory.create_instance(
None,
[
COLUMN_NAME.CALLBACK_OBJECT,
COLUMN_NAME.CALLBACK_START_TIMESTAMP,
COLUMN_NAME.PUBLISHER_HANDLE,
COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP,
COLUMN_NAME.MESSAGE_TIMESTAMP
]
)
if intra_callback_object is not None:
for publisher_handle in publisher_handles:
key = (intra_callback_object, publisher_handle)
if key in self._grouped_intra_comm_records:
records_ = self._grouped_intra_comm_records[key].clone()
records.concat(records_)
records.sort(COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP)
return records
def publish_records(
self,
publisher_handles: List[int],
) -> RecordsInterface:
"""
Compose publish records.
Parameters
----------
publisher_handles : List[int]
Returns
-------
RecordsInterface
Equivalent to the following process.
records = lttng.compose_publish_records()
records.filter_if(
lambda x: x.get('publisher_handle') in publisher_handles
]
)
"""
records = self._grouped_publish_records
pub_records = RecordsFactory.create_instance(
None,
[
COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP,
COLUMN_NAME.RCL_PUBLISH_TIMESTAMP,
COLUMN_NAME.DDS_WRITE_TIMESTAMP,
COLUMN_NAME.MESSAGE_TIMESTAMP,
COLUMN_NAME.SOURCE_TIMESTAMP,
COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP,
COLUMN_NAME.TILDE_MESSAGE_ID,
]
)
for publisher_handle in publisher_handles:
if publisher_handle in records:
inter_pub_records = records[publisher_handle].clone()
pub_records.concat(inter_pub_records)
return pub_records
def tilde_publish_records(
self,
tilde_publishers: Sequence[int]
) -> RecordsInterface:
"""
Compose tilde publish records.
Parameters
----------
tilde_publishers : Sequence[int]
Returns
-------
RecordsInterface
Equivalent to the following process.
records = lttng.compose_tilde_publish_records()
records.filter_if(
lambda x: x.get('tilde_publisher') in tilde_publishers
)
"""
tilde_grouped_records = self._grouped_tilde_pub_records
tilde_records = RecordsFactory.create_instance(
None,
[
COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP,
COLUMN_NAME.TILDE_PUBLISHER,
COLUMN_NAME.TILDE_MESSAGE_ID,
COLUMN_NAME.TILDE_SUBSCRIPTION,
])
for tilde_publisher in tilde_publishers:
if tilde_publisher in tilde_grouped_records:
tilde_records_ = tilde_grouped_records[tilde_publisher].clone()
tilde_records.concat(tilde_records_)
tilde_records.drop_columns([COLUMN_NAME.TILDE_PUBLISHER])
return tilde_records
def _expand_key_tuple(
self,
group: Dict[Tuple[int, ...], RecordsInterface]
) -> Dict[int, RecordsInterface]:
group_: Dict[int, RecordsInterface] = {}
for key in group.keys():
assert len(key) == 1
group_[key[0]] = group[key]
return group_
def callback_records(
self,
inter_callback_object: int,
intra_callback_object: Optional[int]
) -> RecordsInterface:
"""
Compose callback records.
Parameters
----------
inter_callback_object : int
intra_callback_object : Optional[int]
Returns
-------
RecordsInterface
Equivalent to the following process.
records = lttng.compose_callback_records()
records.filter_if(
lambda x: x.['callback_object] in [inter_callback_object, intra_callback_object]
)
"""
records = self._grouped_callback_records
callback_records = RecordsFactory.create_instance(
None,
[COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.CALLBACK_END_TIMESTAMP]
)
if inter_callback_object in records:
inter_callback_records = records[inter_callback_object].clone()
callback_records.concat(inter_callback_records)
if intra_callback_object is not None and intra_callback_object in records:
intra_callback_records = records[intra_callback_object].clone()
callback_records.concat(intra_callback_records)
callback_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP)
return callback_records
@cached_property
def _grouped_callback_records(self) -> Dict[int, RecordsInterface]:
records = self._lttng.compose_callback_records()
group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT])
return self._expand_key_tuple(group)
@cached_property
def _grouped_inter_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]:
records = self._lttng.compose_inter_proc_comm_records()
return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE])
@cached_property
def _grouped_intra_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]:
records = self._lttng.compose_intra_proc_comm_records()
return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE])
@cached_property
def _grouped_publish_records(self) -> Dict[int, RecordsInterface]:
records = self._lttng.compose_publish_records()
group = records.groupby([COLUMN_NAME.PUBLISHER_HANDLE])
return self._expand_key_tuple(group)
@cached_property
def _grouped_sub_records(self) -> Dict[int, RecordsInterface]:
records = self._lttng.compose_subscribe_records()
group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT])
return self._expand_key_tuple(group)
@cached_property
def _grouped_tilde_pub_records(self) -> Dict[int, RecordsInterface]:
records = self._lttng.compose_tilde_publish_records()
group = records.groupby([COLUMN_NAME.TILDE_PUBLISHER])
return self._expand_key_tuple(group)
@cached_property
def _grouped_tilde_sub_records(self) -> Dict[int, RecordsInterface]:
records = self._lttng.compose_tilde_subscribe_records()
group = records.groupby([COLUMN_NAME.TILDE_SUBSCRIPTION])
return self._expand_key_tuple(group)
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import yaml
class HexahueMap():
def __init__(self, space_color):
pink = (255, 0, 255)
red = (255, 0, 0)
green = (0, 255, 0)
yellow = (255, 255, 0)
blue = (0, 0, 255)
sky = (0, 255, 255)
white = (255, 255, 255)
gray = (128, 128, 128)
black = (0, 0, 0)
self.hmap = {}
self.hmap[(pink, red, green, yellow, blue, sky)] = 'A'
self.hmap[(red, pink, green, yellow, blue, sky)] = 'B'
self.hmap[(red, green, pink, yellow, blue, sky)] = 'C'
self.hmap[(red, green, yellow, pink, blue, sky)] = 'D'
self.hmap[(red, green, yellow, blue, pink, sky)] = 'E'
self.hmap[(red, green, yellow, blue, sky, pink)] = 'F'
self.hmap[(green, red, yellow, blue, sky, pink)] = 'G'
self.hmap[(green, yellow, red, blue, sky, pink)] = 'H'
self.hmap[(green, yellow, blue, red, sky, pink)] = 'I'
self.hmap[(green, yellow, blue, sky, red, pink)] = 'J'
self.hmap[(green, yellow, blue, sky, pink, red)] = 'K'
self.hmap[(yellow, green, blue, sky, pink, red)] = 'L'
self.hmap[(yellow, blue, green, sky, pink, red)] = 'M'
self.hmap[(yellow, blue, sky, green, pink, red)] = 'N'
self.hmap[(yellow, blue, sky, pink, green, red)] = 'O'
self.hmap[(yellow, blue, sky, pink, red, green)] = 'P'
self.hmap[(blue, yellow, sky, pink, red, green)] = 'Q'
self.hmap[(blue, sky, yellow, pink, red, green)] = 'R'
self.hmap[(blue, sky, pink, yellow, red, green)] = 'S'
self.hmap[(blue, sky, pink, red, yellow, green)] = 'T'
self.hmap[(blue, sky, pink, red, green, yellow)] = 'U'
self.hmap[(sky, blue, pink, red, green, yellow)] = 'V'
self.hmap[(sky, pink, blue, red, green, yellow)] = 'W'
self.hmap[(sky, pink, red, blue, green, yellow)] = 'X'
self.hmap[(sky, pink, red, green, blue, yellow)] = 'Y'
self.hmap[(sky, pink, red, green, yellow, blue)] = 'Z'
self.hmap[(black, white, white, black, black, white)] = '.'
self.hmap[(white, black, black, white, white, black)] = ','
if space_color == 'black':
self.hmap[(black, black, black, black, black, black)] = ' '
elif space_color == 'white':
self.hmap[(white, white, white, white, white, white)] = ' '
elif space_color == 'all':
self.hmap[(black, black, black, black, black, black)] = ' '
self.hmap[(white, white, white, white, white, white)] = ' '
else:
raise Exception('[Error] invalid space setting: ' + space_color)
self.hmap[(black, gray, white, black, gray, white)] = '0'
self.hmap[(gray, black, white, black, gray, white)] = '1'
self.hmap[(gray, white, black, black, gray, white)] = '2'
self.hmap[(gray, white, black, gray, black, white)] = '3'
self.hmap[(gray, white, black, gray, white, black)] = '4'
self.hmap[(white, gray, black, gray, white, black)] = '5'
self.hmap[(white, black, gray, gray, white, black)] = '6'
self.hmap[(white, black, gray, white, gray, black)] = '7'
self.hmap[(white, black, gray, white, black, gray)] = '8'
self.hmap[(black, white, gray, white, black, gray)] = '9'
|
# 7 завдання
for n in range(1, 101):
print(n, "Я не буду їсти палички Бобо на уроці")
|
"""
SoftLayer.ordering
~~~~~~~~~~~~~~~~~~
Ordering Manager
:license: MIT, see LICENSE for more details.
"""
class OrderingManager(object):
"""Manages hardware devices.
:param SoftLayer.API.Client client: an API client instance
"""
def __init__(self, client):
self.client = client
def get_packages_of_type(self, package_types, mask=None):
"""Get packages that match a certain type.
Each ordering package has a type, so return all packages that match
the types we are looking for
:param list package_types: List of strings representing the package
type keynames we are interested in.
:param string mask: Mask to specify the properties we want to retrieve
"""
package_service = self.client['Product_Package']
_filter = {
'type': {
'keyName': {
'operation': 'in',
'options': [
{'name': 'data',
'value': package_types}
],
},
},
}
packages = package_service.getAllObjects(mask=mask, filter=_filter)
packages = self.filter_outlet_packages(packages)
return packages
@staticmethod
def filter_outlet_packages(packages):
"""Remove packages designated as OUTLET.
Those type of packages must be handled in a different way,
and they are not supported at the moment.
:param packages: Dictionary of packages. Name and description keys
must be present in each of them.
"""
non_outlet_packages = []
for package in packages:
if all(['OUTLET' not in package.get('description', '').upper(),
'OUTLET' not in package.get('name', '').upper()]):
non_outlet_packages.append(package)
return non_outlet_packages
@staticmethod
def get_only_active_packages(packages):
"""Return only active packages.
If a package is active, it is eligible for ordering
This will inspect the 'isActive' property on the provided packages
:param packages Dictionary of packages, isActive key must be present
"""
active_packages = []
for package in packages:
if package['isActive']:
active_packages.append(package)
return active_packages
def get_package_by_type(self, package_type, mask=None):
"""Get a single package of a given type.
Syntactic sugar to retrieve a single package of a given type.
If multiple packages share the given type, this will return the first
one returned by the API.
If no packages are found, returns None
:param package_type string representing the package type key name
we are interested in
"""
packages = self.get_packages_of_type([package_type], mask)
if len(packages) == 0:
return None
else:
return packages.pop()
def get_package_id_by_type(self, package_type):
"""Return the package ID of a Product Package with a given type.
:param package_type string representing the package type key name
we are interested in
:raises ValueError when no package of the given type is found
"""
mask = "mask[id, name, description, isActive, type[keyName]]"
package = self.get_package_by_type(package_type, mask)
if package:
return package['id']
else:
raise ValueError("No package found for type: " + package_type)
def get_quotes(self):
"""Retrieve a list of quotes.
:return a list of SoftLayer_Billing_Order_Quote
"""
quotes = self.client['Account'].getActiveQuotes()
return quotes
def get_quote_details(self, quote_id):
"""Retrieve quote details.
:param quote_id ID number of target quote
"""
quote = self.client['Billing_Order_Quote'].getObject(id=quote_id)
return quote
def get_order_container(self, quote_id):
"""Generate an order container from a quote object.
:param quote_id ID number of target quote
"""
quote = self.client['Billing_Order_Quote']
container = quote.getRecalculatedOrderContainer(id=quote_id)
return container['orderContainers'][0]
def generate_order_template(self, quote_id, extra, quantity=1):
"""Generate a complete order template.
:param int quote_id: ID of target quote
:param list extra: List of dictionaries that have extra details about
the order such as hostname or domain names for
virtual servers or hardware nodes
:param int quantity: Number of ~things~ to order
"""
container = self.get_order_container(quote_id)
container['quantity'] = quantity
# NOTE(kmcdonald): This will only work with virtualGuests and hardware.
# There has to be a better way, since this is based on
# an existing quote that supposedly knows about this
# detail
if container['packageId'] == 46:
product_type = 'virtualGuests'
else:
product_type = 'hardware'
if len(extra) != quantity:
raise ValueError("You must specify extra for each server in the "
"quote")
container[product_type] = []
for extra_details in extra:
container[product_type].append(extra_details)
container['presetId'] = None
return container
def verify_quote(self, quote_id, extra, quantity=1):
"""Verifies that a quote order is valid.
:param int quote_id: ID for the target quote
:param list hostnames: hostnames of the servers
:param string domain: domain of the new servers
:param int quantity: Quantity to override default
"""
container = self.generate_order_template(quote_id, extra,
quantity=quantity)
return self.client['Product_Order'].verifyOrder(container)
def order_quote(self, quote_id, extra, quantity=1):
"""Places an order using a quote
:param int quote_id: ID for the target quote
:param list hostnames: hostnames of the servers
:param string domain: domain of the new server
:param int quantity: Quantity to override default
"""
container = self.generate_order_template(quote_id, extra,
quantity=quantity)
return self.client['Product_Order'].placeOrder(container)
|
# BOJ 14501
import sys
si = sys.stdin.readline
t = [0] * 17
dp = [0] * 17
n = int(si())
for i in range(1, n + 1):
m, o = map(int, si().split())
t[i] = m
dp[i] = o
def solve(n):
ans = 0
for i in range(n, 0, -1):
if i + t[i] > n + 1:
dp[i] = dp[i + 1]
else:
dp[i] = max(dp[i + 1], dp[i] + dp[i + t[i]])
ans = max(ans, dp[i])
return ans
print(solve(n))
|
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from torchvision.transforms import InterpolationMode
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Resize(osize, InterpolationMode.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __scale_width(img, target_width):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), Image.BICUBIC)
|
"""
Created Oct 19, 2017
@author: Spencer Vatrt-Watts (github.com/Spenca)
"""
from __future__ import unicode_literals
from django.apps import AppConfig
class TenxConfig(AppConfig):
name = 'tenx'
|
import pyperclip
import math
class Affine_Cipher:
def __init__(self):
self.SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890 !?.'
def check_key(self, key):
keyA = key // len(self.SYMBOLS)
keyB = key % len(self.SYMBOLS)
# Weak Key Checks
if keyA == 1:
print('Cipher is weak if key A is 1. Choose a different key.')
return False
if keyB == 0:
print('Cipher is weak if key B is 0. Choose a different key.')
return False
if keyA < 0 or keyB < 0 or keyB > len(self.SYMBOLS) - 1:
print('Key A must be greater than 0 and Key B must be between 0 and {}.'.format(len(self.SYMBOLS) - 1))
return False
if math.gcd(keyA, len(self.SYMBOLS)) != 1:
print("Key A {} and the symbol set size {} are not relatively prime. Choose a different key.".format(keyA, len(self.SYMBOLS)))
return False
return True
def mod_inv(self, a, m):
if math.gcd(a, m) != 1:
return False
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
q = u3 // v3
v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2, v3
return u1 % m
def encrypt(self, plain_text, key):
keyA = key // len(self.SYMBOLS)
keyB = key % len(self.SYMBOLS)
cipher_text = []
for char in plain_text:
if char in self.SYMBOLS:
index = self.SYMBOLS.find(char)
cipher_text.append(self.SYMBOLS[(index * keyA + keyB) % len(self.SYMBOLS)])
else:
cipher_text.append(char)
return "".join(cipher_text)
def decrypt(self, cipher_text, key):
keyA = key // len(self.SYMBOLS)
keyB = key % len(self.SYMBOLS)
mod_inverse = self.mod_inv(keyA, len(self.SYMBOLS))
if mod_inverse == False:
print("MOD INV FALSE")
plain_text = []
for char in cipher_text:
if char in self.SYMBOLS:
index = self.SYMBOLS.find(char)
plain_text.append(self.SYMBOLS[(index - keyB) * mod_inverse % len(self.SYMBOLS)])
else:
plain_text.append(char)
return "".join(plain_text)
def brute_force_decrypt(self, cipher_text):
for key in range(len(self.SYMBOLS) ** 2):
keyA = key // len(self.SYMBOLS)
if math.gcd(keyA, len(self.SYMBOLS)) != 1:
continue
decrypted_text = self.decrypt(cipher_text, key)
print("Key = {}, Plain text = {}".format(key, decrypted_text))
return None
def ask_user():
print("Select an option:")
print("1. To continue")
print("2. To exit")
option = input()
return option
if __name__ == "__main__":
affine_cipher = Affine_Cipher()
while True:
try:
print("Select an option:")
print("1. Encrypt a message")
print("2. Decrypt a message")
option = input()
if option == '1':
print("Enter plain text to be encrypted: ")
plain_text = input()
print("Enter a number (key) for encryption: ")
key = int(input())
while affine_cipher.check_key(key) == False:
print("Enter the new key for encryption: ")
key = int(input())
cipher_text = affine_cipher.encrypt(plain_text, key)
print("Cipher text =", cipher_text)
pyperclip.copy(cipher_text)
pyperclip.paste()
print("The cipher text has been copied to your clipboard" + "\n")
option = ask_user()
if option == '1':
continue
elif option == '2':
break
else:
print("Incorrect input.")
print("Exiting program")
break
elif option == '2':
print("Enter cipher text to be decrypted: ")
cipher_text = input()
print("Enter key for decryption: ")
print("If you do not know the key and would like to brute force the combinations, enter the word - crack")
key = input()
if key == 'crack':
affine_cipher.brute_force_decrypt(cipher_text)
else:
key = int(key)
plain_text = affine_cipher.decrypt(cipher_text, key)
print("Plain text =", plain_text)
pyperclip.copy(plain_text)
pyperclip.paste()
print("The plain text has been copied to your clipboard" + "\n")
option = ask_user()
if option == '1':
continue
elif option == '2':
print("Exiting program")
break
else:
print("Incorrect input.")
print("Exiting program")
break
else:
print("Incorrect input.")
option = ask_user()
if option == '1':
continue
elif option == '2':
print("Exiting program")
break
else:
print("Incorrect input.")
print("Exiting program")
break
except Exception as e:
option = ask_user()
if option == '1':
continue
elif option == '2':
print("Exiting program")
break
else:
print("Incorrect input.")
print("Exiting program")
break
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('stocks', '0003_auto_20151129_1623'),
]
operations = [
migrations.AlterField(
model_name='floor',
name='floorPlayer',
field=models.ForeignKey(to='stocks.Player', related_name='FloorPlayer'),
),
migrations.AlterField(
model_name='stock',
name='last_updated',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 29, 22, 5, 30, 24205, tzinfo=utc)),
),
]
|
# Name: Breno Maurício de Freitas Viana
# NUSP: 11920060
# Course Code: SCC5830
# Year/Semester: 2021/1
# Assignment 5: Image Descriptors
import math
import numpy as np
import imageio
from scipy import ndimage
np.seterr(divide='ignore', invalid='ignore')
LEVELS = 256
# ----- (1) Read Parameters
# Get the location of the object image `f`
f = input().rstrip()
# Get the location of the large image `g`
g = input().rstrip()
# Get the quantisation parameter `b`
b = int(input())
# --- Load images
# Object image `f`
f = imageio.imread(f)
# Large image `g`
g = imageio.imread(g)
# ----- (2) Preprocessing and Quantisation
def luminance(img):
"""
Get a RGB image as input and return a black&white image.
"""
N, M, _ = img.shape
out = np.empty(img.shape)
out = 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]
return out.astype(np.uint8)
# --- Convert the images to black&white
f = luminance(f)
g = luminance(g)
# --- Quantise the images to `b` bits
B = 8 - b
f = f >> B
g = g >> B
# ----- (3) Image Descriptors
def nh_descriptor(f):
"""
Return the normalized histogram descriptor.
"""
hist, _ = np.histogram(f, bins=[i for i in range(2 ** b + 1)])
hist = hist / hist.sum()
dc = hist / np.linalg.norm(hist)
return dc
def ht_descriptor(f):
"""
Return the Haralick texture descriptors (intensity-level co-ocurrence matrix).
"""
# Calculate the co-occurence matrix
N, M = f.shape
C = np.zeros((LEVELS, LEVELS))
for x in range(N - 1):
for y in range(M - 1):
i = f[x, y]
j = f[x + 1, y + 1]
C[i][j] += 1
C = C / C.sum()
#
# Computing the descriptors
N, M = C.shape
#
energy = np.power(C, 2).sum()
#
epsilon = 0.001
entropy = - (C * np.log(C + epsilon)).sum()
#
A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int)
contrast = (1 / math.pow(N, 2)) * (C * A).sum()
#
mu_i, si_i = 0, 0
mu_j, si_j = 0, 0
for k in range(N):
a1 = C[k,:].sum()
mu_i += k * a1
si_i += math.pow(k - mu_i, 2) * a1
#
a2 = C[:,k].sum()
mu_j += k * a2
si_j += math.pow(k - mu_j, 2) * a2
#
A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int)
correlation = (A * C).sum() - mu_i * mu_j
correlation /= (si_i * si_j)
#
homogeneity = 0
#
A = np.fromfunction(lambda i, j: (1 + abs(i - j)), (N, M), dtype=int)
homogeneity = (C * A).sum()
#
# Return the Haralick texture descriptors
dt = np.array([energy, entropy, contrast, correlation, homogeneity])
dt = dt / np.linalg.norm(dt)
return dt
def hg_descriptor(f):
"""
Return the histogram of oriented gradients descriptor.
"""
wsx = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
wsy = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
#
f = f.astype(np.float64)
fx = ndimage.convolve(f, wsx)
fy = ndimage.convolve(f, wsy)
#
N, M = f.shape
#
div = np.sqrt(np.power(fx, 2) + np.power(fy, 2)).sum()
Mg = np.sqrt(np.power(fx, 2) + np.power(fy, 2)) / div
#
sigma = np.zeros(f.shape)
sigma = np.arctan(fy / fx) + np.pi / 2
sigma = np.degrees(sigma)
sigma = np.digitize(sigma, np.arange(0, 180, 20))
sigma = sigma.astype(np.uint8)
#
dg = np.zeros(9)
for x in range(N):
for y in range(M):
dg[sigma[x][y] - 1] += Mg[x][y]
#
dg = dg / np.linalg.norm(dg)
return dg
# --- Compute the image descriptors
# Calculate the object image descriptors
dc = nh_descriptor(f)
dt = ht_descriptor(f)
dg = hg_descriptor(f)
d = np.concatenate((dc, dt, dg))
# ----- (4) Finding Our Object
def distance(d, di):
"""
Calculate the distance of two descriptors.
"""
return math.sqrt(np.power(d - di, 2).sum())
# --- Search for the object image location in the original image
size = f.shape[0]
step = size // 2
N, M = g.shape
N = N // step
M = M // step
dist = np.iinfo(np.uint8).max
pos_x = None
pos_y = None
for i in range(N - 1):
for j in range(M - 1):
# Calculate the window
window = g[i*step:i*step+size, j*step:j*step+size]
# Calculate the descriptors of the window
window_dc = nh_descriptor(window)
window_dt = ht_descriptor(window)
window_dg = hg_descriptor(window)
window_d = np.concatenate((window_dc, window_dt, window_dg))
# Calculate the distance between the window and the object image
ndist = distance(d, window_d)
if dist > ndist:
dist = ndist
pos_x, pos_y = i, j
# --- Print the found location
print(pos_x, pos_y)
|
"""Test trunk lock."""
import pytest
from tests.tesla_mock import TeslaMock
from teslajsonpy.controller import Controller
from teslajsonpy.trunk import TrunkLock
def test_has_battery(monkeypatch):
"""Test has_battery()."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_lock = TrunkLock(_data, _controller)
assert not _lock.has_battery()
def test_is_locked_on_init(monkeypatch):
"""Test is_locked() after initialization."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_lock = TrunkLock(_data, _controller)
assert _lock is not None
assert not _lock.is_locked()
@pytest.mark.asyncio
async def test_is_locked_after_update(monkeypatch):
"""Test is_locked() after an update."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["rt"] = 0
_lock = TrunkLock(_data, _controller)
await _lock.async_update()
assert _lock is not None
assert _lock.is_locked()
@pytest.mark.asyncio
async def test_unlock(monkeypatch):
"""Test unlock()."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["rt"] = 0
_lock = TrunkLock(_data, _controller)
await _lock.async_update()
await _lock.unlock()
assert _lock is not None
assert not _lock.is_locked()
@pytest.mark.asyncio
async def test_unlock_already_unlocked(monkeypatch):
"""Test unlock() when already unlocked."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["rt"] = 123
_lock = TrunkLock(_data, _controller)
await _lock.async_update()
await _lock.unlock()
assert _lock is not None
assert not _lock.is_locked()
# Reset to default for next tests
_data["vehicle_state"]["rt"] = 0
@pytest.mark.asyncio
async def test_lock(monkeypatch):
"""Test lock()."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["rt"] = 123
_lock = TrunkLock(_data, _controller)
await _lock.async_update()
await _lock.lock()
assert _lock is not None
assert _lock.is_locked()
# Reset to default for next tests
_data["vehicle_state"]["rt"] = 0
@pytest.mark.asyncio
async def test_lock_already_locked(monkeypatch):
"""Test lock() when already locked."""
_mock = TeslaMock(monkeypatch)
_controller = Controller(None)
_data = _mock.data_request_vehicle()
_data["vehicle_state"]["rt"] = 0
_lock = TrunkLock(_data, _controller)
await _lock.async_update()
await _lock.lock()
assert _lock is not None
assert _lock.is_locked()
|
import sys
from cx_Freeze import setup, Executable
setup(
name='YtMusic-Lib-Tracker',
url='https://github.com/czifumasa/ytmusic-lib-tracker',
author='Łukasz Lenart',
author_email='lukasz.lenart912@gmail.com',
version='0.1',
license='MIT',
description='Useful tools for youtube music. Exporting library to csv, tracking changes in library, summary of transfer from GPM',
long_description=open('README.md').read(),
options={"build_exe": {
'packages': ['ytmusicapi', 'unidecode'],
'excludes': ['tkinter', 'test', 'unittest', 'pydoc_data'],
'include_files': ['config.ini'],
'optimize': 2,
}},
executables=[Executable('ytmusiclibtracker.py', base='console', icon='ytmlt.ico', targetName='YTMusicLibTracker')]
)
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import RPi.GPIO as GPIO
import time
CS = 5
Clock = 25
Address = 24
DataOut = 23
Button = 7
class TRSensor(object):
def __init__(self,numSensors = 5):
self.numSensors = numSensors
self.calibratedMin = [0] * self.numSensors
self.calibratedMax = [1023] * self.numSensors
self.last_value = 0
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(Clock,GPIO.OUT)
GPIO.setup(Address,GPIO.OUT)
GPIO.setup(CS,GPIO.OUT)
GPIO.setup(DataOut,GPIO.IN,GPIO.PUD_UP)
GPIO.setup(Button,GPIO.IN,GPIO.PUD_UP)
"""
Reads the sensor values into an array. There *MUST* be space
for as many values as there were sensors specified in the constructor.
Example usage:
unsigned int sensor_values[8];
sensors.read(sensor_values);
The values returned are a measure of the reflectance in abstract units,
with higher values corresponding to lower reflectance (e.g. a black
surface or a void).
"""
def AnalogRead(self):
value = [0]*(self.numSensors+1)
#Read Channel0~channel6 AD value
for j in range(0,self.numSensors+1):
GPIO.output(CS, GPIO.LOW)
for i in range(0,4):
#sent 4-bit Address
if(((j) >> (3 - i)) & 0x01):
GPIO.output(Address,GPIO.HIGH)
else:
GPIO.output(Address,GPIO.LOW)
#read MSB 4-bit data
value[j] <<= 1
if(GPIO.input(DataOut)):
value[j] |= 0x01
GPIO.output(Clock,GPIO.HIGH)
GPIO.output(Clock,GPIO.LOW)
for i in range(0,6):
#read LSB 8-bit data
value[j] <<= 1
if(GPIO.input(DataOut)):
value[j] |= 0x01
GPIO.output(Clock,GPIO.HIGH)
GPIO.output(Clock,GPIO.LOW)
#no mean ,just delay
# for i in range(0,6):
# GPIO.output(Clock,GPIO.HIGH)
# GPIO.output(Clock,GPIO.LOW)
time.sleep(0.0001)
GPIO.output(CS,GPIO.HIGH)
# print value[1:]
return value[1:]
"""
Reads the sensors 10 times and uses the results for
calibration. The sensor values are not returned; instead, the
maximum and minimum values found over time are stored internally
and used for the readCalibrated() method.
"""
def calibrate(self):
max_sensor_values = [0]*self.numSensors
min_sensor_values = [0]*self.numSensors
for j in range(0,10):
sensor_values = self.AnalogRead()
for i in range(0,self.numSensors):
# set the max we found THIS time
if((j == 0) or max_sensor_values[i] < sensor_values[i]):
max_sensor_values[i] = sensor_values[i]
# set the min we found THIS time
if((j == 0) or min_sensor_values[i] > sensor_values[i]):
min_sensor_values[i] = sensor_values[i]
# record the min and max calibration values
for i in range(0,self.numSensors):
if(min_sensor_values[i] > self.calibratedMin[i]):
self.calibratedMin[i] = min_sensor_values[i]
if(max_sensor_values[i] < self.calibratedMax[i]):
self.calibratedMax[i] = max_sensor_values[i]
"""
Returns values calibrated to a value between 0 and 1000, where
0 corresponds to the minimum value read by calibrate() and 1000
corresponds to the maximum value. Calibration values are
stored separately for each sensor, so that differences in the
sensors are accounted for automatically.
"""
def readCalibrated(self):
value = 0
#read the needed values
sensor_values = self.AnalogRead()
for i in range (0,self.numSensors):
denominator = self.calibratedMax[i] - self.calibratedMin[i]
if(denominator != 0):
value = (sensor_values[i] - self.calibratedMin[i])* 1000 / denominator
if(value < 0):
value = 0
elif(value > 1000):
value = 1000
sensor_values[i] = value
#print("readCalibrated",sensor_values)
return sensor_values
"""
Operates the same as read calibrated, but also returns an
estimated position of the robot with respect to a line. The
estimate is made using a weighted average of the sensor indices
multiplied by 1000, so that a return value of 0 indicates that
the line is directly below sensor 0, a return value of 1000
indicates that the line is directly below sensor 1, 2000
indicates that it's below sensor 2000, etc. Intermediate
values indicate that the line is between two sensors. The
formula is:
0*value0 + 1000*value1 + 2000*value2 + ...
--------------------------------------------
value0 + value1 + value2 + ...
By default, this function assumes a dark line (high values)
surrounded by white (low values). If your line is light on
black, set the optional second argument white_line to true. In
this case, each sensor value will be replaced by (1000-value)
before the averaging.
"""
def readLine(self, white_line = 0):
sensor_values = self.readCalibrated()
avg = 0
sum = 0
on_line = 0
for i in range(0,self.numSensors):
value = sensor_values[i]
if(white_line):
value = 1000-value
# keep track of whether we see the line at all
if(value > 200):
on_line = 1
# only average in values that are above a noise threshold
if(value > 50):
avg += value * (i * 1000); # this is for the weighted total,
sum += value; #this is for the denominator
if(on_line != 1):
# If it last read to the left of center, return 0.
if(self.last_value < (self.numSensors - 1)*1000/2):
#print("left")
self.last_value = 0
# If it last read to the right of center, return the max.
else:
#print("right")
self.last_value = (self.numSensors - 1)*1000
else:
self.last_value = avg/sum
return self.last_value,sensor_values
# Simple example prints accel/mag data once per second:
if __name__ == '__main__':
TR = TRSensor()
print("TRSensor Example")
while True:
print(TR.AnalogRead())
time.sleep(0.2)
|
import connexion
from openapi_server.annotator.phi_types import PhiType
from openapi_server.get_annotations import get_annotations
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.text_date_annotation_request import \
TextDateAnnotationRequest # noqa: E501
from openapi_server.models.text_date_annotation_response import \
TextDateAnnotationResponse # noqa: E501
def create_text_date_annotations(): # noqa: E501
"""Annotate dates in a clinical note
Return the date annotations found in a clinical note # noqa: E501
:rtype: TextDateAnnotations
"""
res = None
status = None
if connexion.request.is_json:
try:
annotation_request = TextDateAnnotationRequest.from_dict(
connexion.request.get_json()) # noqa: E501
note = annotation_request.note
annotations = get_annotations(note, phi_type=PhiType.DATE)
res = TextDateAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
|
#-*- encoding: utf-8 -*-
import sys
import Tkinter as tk
import service
import keycode
if sys.platform == 'win32':
from ctypes import wintypes, byref, windll
import win32con
def handle_hotkey(root, callback):
msg = wintypes.MSG()
if windll.user32.GetMessageA(byref(msg), None, 0, 0) != 0:
if msg.message == win32con.WM_HOTKEY:
if msg.wParam == 1:
print 'Hotkey triggered!'
callback()
windll.user32.TranslateMessage(byref(msg))
windll.user32.DispatchMessageA(byref(msg))
root.after(1, handle_hotkey, root, callback)
# hotkey map refs: https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx
# not yet used here.
def register_hotkey(root, key, callback):
key = key.split('-')
mod = 0
if 'Ctrl' in key:
mod |= win32con.MOD_CONTROL
if 'Shift' in key:
mod |= win32con.MOD_SHIFT
if 'Alt' in key:
mod |= win32con.MOD_ALT
key = key[-1].upper()
assert key in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if windll.user32.RegisterHotKey(None, 1, mod, ord(key)) != 0:
print("Hotkey registered!")
handle_hotkey(root, callback)
else:
def register_hotkey(root, key, callback):
print 'Register hotkey failed.'
def main():
service.start()
root = tk.Tk()
root.resizable(0, 0)
root.title('STF Input')
sv = tk.StringVar()
if sys.platform == 'win32':
backspace = '\x08'
else:
backspace = '\x7f'
def send(event, sv=sv):
char = event.char
if not char:
return
text = sv.get()
if char == '\r' and text: # use <Return> to input
service.type(text)
sv.set('')
return
if char == backspace and text: # use <Backspace> to delete, <Del> not avaialable.
sv.set('')
return
if char == '\x16': # skip <Ctrl-V>
service.keyboard(char)
sv.set('')
return 'break'
if char in keycode.KEYBOARD_KEYS or char in keycode.CTRLED_KEYS:
service.keyboard(char)
entry = tk.Entry(root, textvariable=sv)
entry.pack()
entry.focus_set()
entry.bind('<Key>', send)
state = [1]
def toggle(root=root, entry=entry):
if state[0] == 0:
root.deiconify()
entry.focus_set()
state[0] = 1
else:
root.withdraw()
state[0] = 0
register_hotkey(root, 'Ctrl-Alt-Z', toggle) # not very well with IME
try:
root.mainloop()
finally:
service.stop()
if __name__ == '__main__':
main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import logging
from .lr_scheduler import WarmupMultiStepLR
def make_optimizer(cfg, model):
logger = logging.getLogger("fcos_core.trainer")
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
if key.endswith(".offset.weight") or key.endswith(".offset.bias"):
logger.info("set lr factor of {} as {}".format(
key, cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR
))
lr *= cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
if cfg.SOLVER.ADAM:
optimizer = torch.optim.Adam(params)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
|
# -*- coding: utf-8 -*-
"""Patched version of PyPi Kitchen's Python 3 getwriter function. Removes
extraneous newlines."""
import codecs
from kitchen.text.converters import to_bytes
def getwriter(encoding):
"""Return a :class:`codecs.StreamWriter` that resists tracing back.
:arg encoding: Encoding to use for transforming :class:`str` strings
into byte :class:`bytes`.
:rtype: :class:`codecs.StreamWriter`
:returns: :class:`~codecs.StreamWriter` that you can instantiate to wrap
output streams to automatically translate :class:`str` strings into
:attr:`encoding`.
This is a reimplemetation of :func:`codecs.getwriter` that returns
a :class:`~codecs.StreamWriter` that resists issuing tracebacks. The
:class:`~codecs.StreamWriter` that is returned uses
:func:`kitchen.text.converters.to_bytes` to convert :class:`str`
strings into byte :class:`bytes`. The departures from
:func:`codecs.getwriter` are:
1) The :class:`~codecs.StreamWriter` that is returned will take byte
:class:`bytes` as well as :class:`str` strings. Any byte
:class:`bytes` will be passed through unmodified.
2) The default error handler for unknown bytes is to ``replace`` the bytes
with the unknown character (``?`` in most ascii-based encodings, ``�``
in the utf encodings) whereas :func:`codecs.getwriter` defaults to
``strict``. Like :class:`codecs.StreamWriter`, the returned
:class:`~codecs.StreamWriter` can have its error handler changed in
code by setting ``stream.errors = 'new_handler_name'``
Example usage::
$ LC_ALL=C python
>>> import sys
>>> from kitchen.text.converters import getwriter
>>> UTF8Writer = getwriter('utf-8')
>>> unwrapped_stdout = sys.stdout
>>> sys.stdout = UTF8Writer(unwrapped_stdout)
>>> print 'caf\\xc3\\xa9'
café
>>> print u'caf\\xe9'
café
>>> ASCIIWriter = getwriter('ascii')
>>> sys.stdout = ASCIIWriter(unwrapped_stdout)
>>> print 'caf\\xc3\\xa9'
café
>>> print u'caf\\xe9'
caf?
.. seealso::
API docs for :class:`codecs.StreamWriter` and :func:`codecs.getwriter`
and `Print Fails <http://wiki.python.org/moin/PrintFails>`_ on the
python wiki.
.. versionadded:: kitchen 0.2a2, API: kitchen.text 1.1.0
"""
class _StreamWriter(codecs.StreamWriter):
# :W0223: We don't need to implement all methods of StreamWriter.
# This is not the actual class that gets used but a replacement for
# the actual class.
# :C0111: We're implementing an API from the stdlib. Just point
# people at that documentation instead of writing docstrings here.
# pylint:disable-msg=W0223,C0111
def __init__(self, stream, errors='replace'):
codecs.StreamWriter.__init__(self, stream, errors)
def encode(self, msg, errors='replace'):
return (to_bytes(msg, encoding=self.encoding, errors=errors),
len(msg))
_StreamWriter.encoding = encoding
return _StreamWriter
|
input = """
colored(2,g) :- not diff_col(2,g).
colored(2,y) :- not diff_col(2,y).
colored(3,g) :- not diff_col(3,g).
colored(3,y) :- not diff_col(3,y).
diff_col(2,g) :- colored(2,y).
diff_col(3,g) :- colored(3,y).
diff_col(2,y) :- colored(2,g).
diff_col(3,y) :- colored(3,g).
no_stable :- colored(2,2), colored(3,2), not no_stable.
no_stable :- colored(2,3), colored(3,3), not no_stable.
no_stable :- colored(2,g), colored(3,g), not no_stable.
no_stable :- colored(2,y), colored(3,y), not no_stable.
"""
output = """
colored(2,g) :- not diff_col(2,g).
colored(2,y) :- not diff_col(2,y).
colored(3,g) :- not diff_col(3,g).
colored(3,y) :- not diff_col(3,y).
diff_col(2,g) :- colored(2,y).
diff_col(3,g) :- colored(3,y).
diff_col(2,y) :- colored(2,g).
diff_col(3,y) :- colored(3,g).
no_stable :- colored(2,2), colored(3,2), not no_stable.
no_stable :- colored(2,3), colored(3,3), not no_stable.
no_stable :- colored(2,g), colored(3,g), not no_stable.
no_stable :- colored(2,y), colored(3,y), not no_stable.
"""
|
import logging
from datetime import datetime
import botocore.loaders
import botocore.regions
from boto3 import Session as Boto3Session
from botocore.exceptions import ClientError
from .exceptions import CLIMisconfiguredError, DownstreamError
LOG = logging.getLogger(__name__)
BOTO_CRED_KEYS = ("aws_access_key_id", "aws_secret_access_key", "aws_session_token")
LOWER_CAMEL_CRED_KEYS = ("accessKeyId", "secretAccessKey", "sessionToken")
def create_sdk_session(region_name=None):
def _known_error(msg):
raise CLIMisconfiguredError(
msg + ". Please ensure your AWS CLI is configured correctly: "
"https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html"
)
session = Boto3Session(region_name=region_name)
if session.region_name is None:
_known_error("No region specified")
if session.get_credentials() is None:
_known_error("No credentials specified")
return session
def get_temporary_credentials(session, key_names=BOTO_CRED_KEYS, role_arn=None):
sts_client = session.client(
"sts",
endpoint_url=get_service_endpoint("sts", session.region_name),
region_name=session.region_name,
)
if role_arn:
session_name = "CloudFormationContractTest-{:%Y%m%d%H%M%S}".format(
datetime.now()
)
try:
response = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName=session_name, DurationSeconds=900
)
except ClientError:
# pylint: disable=W1201
LOG.debug(
"Getting session token resulted in unknown ClientError. "
+ "Could not assume specified role '%s'.",
role_arn,
)
raise DownstreamError() from Exception(
"Could not assume specified role '{}'".format(role_arn)
)
temp = response["Credentials"]
creds = (temp["AccessKeyId"], temp["SecretAccessKey"], temp["SessionToken"])
else:
frozen = session.get_credentials().get_frozen_credentials()
if frozen.token:
creds = (frozen.access_key, frozen.secret_key, frozen.token)
else:
try:
response = sts_client.get_session_token(DurationSeconds=900)
except ClientError as e:
LOG.debug(
"Getting session token resulted in unknown ClientError", exc_info=e
)
raise DownstreamError("Could not retrieve session token") from e
temp = response["Credentials"]
creds = (temp["AccessKeyId"], temp["SecretAccessKey"], temp["SessionToken"])
return dict(zip(key_names, creds))
def get_service_endpoint(service, region):
loader = botocore.loaders.create_loader()
data = loader.load_data("endpoints")
resolver = botocore.regions.EndpointResolver(data)
endpoint_data = resolver.construct_endpoint(service, region)
return "https://" + endpoint_data["hostname"]
def get_account(session, temporary_credentials):
sts_client = session.client(
"sts",
endpoint_url=get_service_endpoint("sts", session.region_name),
region_name=session.region_name,
aws_access_key_id=temporary_credentials["accessKeyId"],
aws_secret_access_key=temporary_credentials["secretAccessKey"],
aws_session_token=temporary_credentials["sessionToken"],
)
response = sts_client.get_caller_identity()
return response.get("Account")
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CUTOFFBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_cutoffbert import CutoffBertConfig
from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings
from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder
from ..bert.modeling_bert import BertPooler as CutoffBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "CutoffBertConfig"
_TOKENIZER_FOR_DOC = "CutoffBertTokenizer"
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class CutoffBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CutoffBertConfig
load_tf_weights = load_tf_weights_in_cutoffbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CUTOFFBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
CUTOFFBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertModel(CutoffBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = CutoffBertEmbeddings(config)
self.encoder = CutoffBertEncoder(config)
self.pooler = CutoffBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled
output) + Cut-off data augmentation support.
""",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_token_id = config.cls_token_id
self.sep_token_id = config.sep_token_id
self.mask_token_id = config.mask_token_id
self.masking_prob = config.cutoff_masking_prob
self.temperature = config.cutoff_temperature
self.mask_loss_wgt = config.cutoff_mask_loss_wgt
self.js_loss_wgt = config.cutoff_js_loss_wgt
self.config = config
self.bert = CutoffBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def _apply_cutoff(self, inputs):
masked_inputs = inputs.clone()
valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)
random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()
masking_indices = random_masking_indices & valid_masking_indices
masked_inputs[masking_indices] = self.mask_token_id
return masked_inputs
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.dropout(outputs[1])
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return SequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, l = input_ids.size()
masked_input_ids = self._apply_cutoff(input_ids.clone())
flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)
flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.dropout(flatten_outputs[1])
flatten_logits = self.classifier(flatten_pooled_output)
logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)
logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:
mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))
loss += mask_loss * self.mask_loss_wgt
if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:
kl_loss_fct = KLDivLoss(reduction="batchmean")
src_logits, trg_logits = logits, masked_logits
mean_logits = (src_logits + trg_logits) * 0.5
src_loss = kl_loss_fct(
F.log_softmax(src_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
trg_loss = kl_loss_fct(
F.log_softmax(trg_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
js_loss = (src_loss + trg_loss) * 0.5
loss += js_loss * self.js_loss_wgt
if not return_dict:
return (loss, logits)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
)
|
#!/usr/bin/env python3
# Very basic bitstream to SVF converter
# This file is Copyright (c) 2018 David Shah <dave@ds0.me>
import sys
import textwrap
max_row_size = 100000
def bitreverse(x):
y = 0
for i in range(8):
if (x >> (7 - i)) & 1 == 1:
y |= (1 << i)
return y
def bit_to_svf(bit, svf):
with open(bit, 'rb') as bitf:
bs = bitf.read()
# Autodetect IDCODE from bitstream
idcode_cmd = bytes([0xE2, 0x00, 0x00, 0x00])
idcode = None
for i in range(len(bs) - 4):
if bs[i:i+4] == idcode_cmd:
idcode = bs[i+4] << 24
idcode |= bs[i+5] << 16
idcode |= bs[i+6] << 8
idcode |= bs[i+7]
break
if idcode is None:
print("Failed to find IDCODE in bitstream, check bitstream is valid")
sys.exit(1)
print("IDCODE in bitstream is 0x%08x" % idcode)
bitf.seek(0)
with open(svf, 'w') as svf:
print("""
HDR 0;
HIR 0;
TDR 0;
TIR 0;
ENDDR DRPAUSE;
ENDIR IRPAUSE;
STATE IDLE;
""", file=svf)
print("""
SIR 8 TDI (E0);
SDR 32 TDI (00000000)
TDO ({:08X})
MASK (FFFFFFFF);
""".format(idcode), file=svf)
print("""
SIR 8 TDI (1C);
SDR 510 TDI (3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF);
SIR 8 TDI (C6);
SDR 8 TDI (00);
RUNTEST IDLE 2 TCK 1.00E-02 SEC;
SIR 8 TDI (3C);
SDR 32 TDI (00000000)
TDO (00000000)
MASK (0000B000);
SIR 8 TDI (46);
SDR 8 TDI (01);
RUNTEST IDLE 2 TCK 1.00E-02 SEC;
SIR 8 TDI (7A);
RUNTEST IDLE 2 TCK 1.00E-02 SEC;
""", file=svf)
while True:
chunk = bitf.read(max_row_size//8)
if not chunk:
break
# Convert chunk to bit-reversed hex
br_chunk = [bitreverse(x) for x in chunk]
hex_chunk = ["{:02X}".format(x) for x in reversed(br_chunk)]
print("\n".join(textwrap.wrap("SDR {} TDI ({});".format(8*len(chunk), "".join(hex_chunk)), 100)), file=svf)
print("""
SIR 8 TDI (FF);
RUNTEST IDLE 100 TCK 1.00E-02 SEC;
SIR 8 TDI (C0);
RUNTEST IDLE 2 TCK 1.00E-03 SEC;
SDR 32 TDI (00000000)
TDO (00000000)
MASK (FFFFFFFF);
! Shift in ISC DISABLE(0x26) instruction
SIR 8 TDI (26);
RUNTEST IDLE 2 TCK 2.00E-01 SEC;
! Shift in BYPASS(0xFF) instruction
SIR 8 TDI (FF);
RUNTEST IDLE 2 TCK 1.00E-03 SEC;
! Shift in LSC_READ_STATUS(0x3C) instruction
SIR 8 TDI (3C);
SDR 32 TDI (00000000)
TDO (00000100)
MASK (00002100);
""", file=svf)
if __name__ == "__main__":
bit_to_svf(sys.argv[1], sys.argv[2])
|
#!/usr/bin/env python2
# -*-: coding utf-8 -*-
import ConfigParser
from coffeehack.coffeehack import CoffeeHack
from hermes_python.hermes import Hermes
import io
import Queue
CONFIGURATION_ENCODING_FORMAT = "utf-8"
CONFIG_INI = "config.ini"
MQTT_IP_ADDR = "localhost"
MQTT_PORT = 1883
MQTT_ADDR = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
class SnipsConfigParser(ConfigParser.SafeConfigParser):
def to_dict(self):
return {section: {option_name : option for option_name, option in self.items(section)} for section in self.sections()}
def read_configuration_file(configuration_file):
try:
with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f:
conf_parser = SnipsConfigParser()
conf_parser.readfp(f)
return conf_parser.to_dict()
except (IOError, ConfigParser.Error) as e:
return dict()
class Skill:
def __init__(self):
config = read_configuration_file("config.ini")
extra = config["global"].get("extra", False)
self.cafe = CoffeeHack(extra = extra)
def extract_value(val):
res = []
if val is not None:
for r in val:
res.append(r.slot_value.value.value)
return res
def extract_intensite_cafe(intent_message):
return extract_value(intent_message.slots.intensite_cafe)
def extract_nombre_cafe(intent_message):
return extract_value(intent_message.slots.nombre_cafe)
def extract_type_cafe(intent_message):
return extract_value(intent_message.slots.type_cafe)
def extract_taille_cafe(intent_message):
return extract_value(intent_message.slots.taille_cafe)
def callback(hermes, intent_message):
t = extract_type_cafe(intent_message)
s = extract_taille_cafe(intent_message)
ta = extract_intensite_cafe(intent_message)
n = extract_nombre_cafe(intent_message)
type_cafe = t[0] if len(t) else ""
taille_cafe = s[0] if len(s) else ""
intensite_cafe = ta[0] if len(ta) else ""
number = 1
if len(n):
try:
number = int(n[0])
except ValueError, e:
number = 2
print(t)
print(s)
print(ta)
hermes.skill.cafe.verser(type_cafe = type_cafe,
taille_cafe = taille_cafe,
intensite_cafe = intensite_cafe,
number = number)
def cafe_io(hermes, intent_message):
hermes.skill.cafe.cafe_io()
def cafe_nettoie(hermes, intent_message):
hermes.skill.cafe.nettoie()
def cafe_vapeur(hermes, intent_message):
hermes.skill.cafe.vapeur()
if __name__ == "__main__":
skill = Skill()
with Hermes(MQTT_ADDR) as h:
h.skill = skill
h.subscribe_intent("segar:verser", callback) \
.subscribe_intent("segar:cafe_io", cafe_io) \
.subscribe_intent("nettoie", cafe_nettoie) \
.subscribe_intent("vapeur", cafe_vapeur) \
.loop_forever()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Writing of files in the ``gettext`` MO (machine object) format.
:since: version 0.9
:see: `The Format of MO Files
<http://www.gnu.org/software/gettext/manual/gettext.html#MO-Files>`_
"""
import array
import struct
from babel.messages.catalog import Catalog, Message
__all__ = ['read_mo', 'write_mo']
__docformat__ = 'restructuredtext en'
LE_MAGIC = 0x950412deL
BE_MAGIC = 0xde120495L
def read_mo(fileobj):
"""Read a binary MO file from the given file-like object and return a
corresponding `Catalog` object.
:param fileobj: the file-like object to read the MO file from
:return: a catalog object representing the parsed MO file
:rtype: `Catalog`
:note: The implementation of this function is heavily based on the
``GNUTranslations._parse`` method of the ``gettext`` module in the
standard library.
"""
catalog = Catalog()
headers = {}
filename = getattr(fileobj, 'name', '')
buf = fileobj.read()
buflen = len(buf)
unpack = struct.unpack
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?
if magic == LE_MAGIC:
version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == BE_MAGIC:
version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary
for i in xrange(0, msgcount):
mlen, moff = unpack(ii, buf[origidx:origidx + 8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx + 8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastkey = key = None
for item in tmsg.splitlines():
item = item.strip()
if not item:
continue
if ':' in item:
key, value = item.split(':', 1)
lastkey = key = key.strip().lower()
headers[key] = value.strip()
elif lastkey:
headers[lastkey] += '\n' + item
if '\x04' in msg: # context
ctxt, msg = msg.split('\x04')
else:
ctxt = None
if '\x00' in msg: # plural forms
msg = msg.split('\x00')
tmsg = tmsg.split('\x00')
if catalog.charset:
msg = [x.decode(catalog.charset) for x in msg]
tmsg = [x.decode(catalog.charset) for x in tmsg]
else:
if catalog.charset:
msg = msg.decode(catalog.charset)
tmsg = tmsg.decode(catalog.charset)
catalog[msg] = Message(msg, tmsg, context=ctxt)
# advance to next entry in the seek tables
origidx += 8
transidx += 8
catalog.mime_headers = headers.items()
return catalog
def write_mo(fileobj, catalog, use_fuzzy=False):
"""Write a catalog to the specified file-like object using the GNU MO file
format.
>>> from babel.messages import Catalog
>>> from gettext import GNUTranslations
>>> from StringIO import StringIO
>>> catalog = Catalog(locale='en_US')
>>> catalog.add('foo', 'Voh')
>>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
>>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
>>> catalog.add('Fizz', '')
>>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
>>> buf = StringIO()
>>> write_mo(buf, catalog)
>>> buf.seek(0)
>>> translations = GNUTranslations(fp=buf)
>>> translations.ugettext('foo')
u'Voh'
>>> translations.ungettext('bar', 'baz', 1)
u'Bahr'
>>> translations.ungettext('bar', 'baz', 2)
u'Batz'
>>> translations.ugettext('fuz')
u'fuz'
>>> translations.ugettext('Fizz')
u'Fizz'
>>> translations.ugettext('Fuzz')
u'Fuzz'
>>> translations.ugettext('Fuzzes')
u'Fuzzes'
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param use_fuzzy: whether translations marked as "fuzzy" should be included
in the output
"""
messages = list(catalog)
if not use_fuzzy:
messages[1:] = [m for m in messages[1:] if not m.fuzzy]
messages.sort()
ids = strs = ''
offsets = []
for message in messages:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
if message.pluralizable:
msgid = '\x00'.join([
msgid.encode(catalog.charset) for msgid in message.id
])
msgstrs = []
for idx, string in enumerate(message.string):
if not string:
msgstrs.append(message.id[min(int(idx), 1)])
else:
msgstrs.append(string)
msgstr = '\x00'.join([
msgstr.encode(catalog.charset) for msgstr in msgstrs
])
else:
msgid = message.id.encode(catalog.charset)
if not message.string:
msgstr = message.id.encode(catalog.charset)
else:
msgstr = message.string.encode(catalog.charset)
if message.context:
msgid = '\x04'.join([message.context.encode(catalog.charset),
msgid])
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + '\x00'
strs += msgstr + '\x00'
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
keystart = 7 * 4 + 16 * len(messages)
valuestart = keystart + len(ids)
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
koffsets = []
voffsets = []
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
fileobj.write(struct.pack('Iiiiiii',
LE_MAGIC, # magic
0, # version
len(messages), # number of entries
7 * 4, # start of key index
7 * 4 + len(messages) * 8, # start of value index
0, 0 # size and offset of hash table
) + array.array("i", offsets).tostring() + ids + strs)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Manages the details on the images used in the build and the run stage."""
import json
import os.path
#: Global variable used to cache in memory the content of images.json
_data = None
def data():
"""Returns a dictionary with the static data on the images.
The dictionary is read from a JSON file lazily the first time
this function is called.
"""
global _data
if not _data:
json_dir = os.path.abspath(os.path.dirname(__file__))
json_file = os.path.join(json_dir, 'images.json')
with open(json_file) as f:
_data = json.load(f)
return _data
def build_info(image, spack_version):
"""Returns the name of the build image and its tag.
Args:
image (str): image to be used at run-time. Should be of the form
<image_name>:<image_tag> e.g. "ubuntu:18.04"
spack_version (str): version of Spack that we want to use to build
Returns:
A tuple with (image_name, image_tag) for the build image
"""
# Don't handle error here, as a wrong image should have been
# caught by the JSON schema
image_data = data()[image]
build_image = image_data['build']
# Try to check if we have a tag for this Spack version
try:
# Translate version from git to docker if necessary
build_tag = image_data['build_tags'].get(spack_version, spack_version)
except KeyError:
msg = ('the image "{0}" has no tag for Spack version "{1}" '
'[valid versions are {2}]')
msg = msg.format(build_image, spack_version,
', '.join(image_data['build_tags'].keys()))
raise ValueError(msg)
return build_image, build_tag
def package_info(image):
"""Returns the commands used to update system repositories, install
system packages and clean afterwards.
Args:
image (str): image to be used at run-time. Should be of the form
<image_name>:<image_tag> e.g. "ubuntu:18.04"
Returns:
A tuple of (update, install, clean) commands.
"""
image_data = data()[image]
update = image_data['update']
install = image_data['install']
clean = image_data['clean']
return update, install, clean
|
# Name: VolumeExtractChannel
import inviwopy as ivw
import numpy as np
class VolumeExtractChannel(ivw.Processor):
def __init__(self, id, name):
ivw.Processor.__init__(self, id, name)
self.inport = ivw.data.VolumeInport("inport")
self.addInport(self.inport, owner=False)
self.outport = ivw.data.VolumeOutport("outport")
self.addOutport(self.outport, owner=False)
self.channel = ivw.properties.IntProperty("channel", "channel", 0, 0, 4, 1)
self.addProperty(self.channel, owner=False)
@staticmethod
def processorInfo():
return ivw.ProcessorInfo(
classIdentifier = "org.inviwo.VolumeExtractChannel",
displayName = "Volume Extract Channel",
category = "Volume Operation",
codeState = ivw.CodeState.Stable,
tags = ivw.Tags.PY
)
def getProcessorInfo(self):
return VolumeExtractChannel.processorInfo()
def process(self):
volume = self.inport.getData()
if len(volume.data.shape) <= 3:
self.outport.setData(volume)
return
channels = volume.data.shape[3]
volumeSlice = volume.data[:,:,:, np.clip(self.channel.value, 0, channels-1)]
newVolume = ivw.data.Volume(volumeSlice)
newVolume.dataMap = volume.dataMap
newVolume.modelMatrix = volume.modelMatrix
newVolume.worldMatrix = volume.worldMatrix
newVolume.copyMetaDataFrom(volume)
newVolume.swizzlemask = volume.swizzlemask
newVolume.interpolation = volume.interpolation
newVolume.wrapping = volume.wrapping
self.outport.setData(newVolume)
|
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
|
# -*- test-case-name: twisted.web2.test.test_httpauth -*-
from twisted.cred import credentials, error
from twisted.web2.auth.interfaces import ICredentialFactory
from zope.interface import implements
class BasicCredentialFactory(object):
"""
Credential Factory for HTTP Basic Authentication
"""
implements(ICredentialFactory)
scheme = 'basic'
def __init__(self, realm):
self.realm = realm
def getChallenge(self, peer):
return {'realm': self.realm}
def decode(self, response, request):
try:
creds = (response + '===').decode('base64')
except:
raise error.LoginFailed('Invalid credentials')
creds = creds.split(':', 1)
if len(creds) == 2:
return credentials.UsernamePassword(*creds)
else:
raise error.LoginFailed('Invalid credentials')
|
from couchbase.management.admin import Admin
from couchbase_core.mapper import BijectiveMapping, \
StringEnum, Identity, Timedelta, Bijection, StringEnumLoose
from ..options import OptionBlockTimeOut, forward_args
from couchbase.management.generic import GenericManager
from typing import *
from couchbase_core import abstractmethod, mk_formstr
from couchbase_core.durability import Durability
from couchbase.exceptions import HTTPException, ErrorMapper, BucketAlreadyExistsException, BucketDoesNotExistException
import enum
import datetime
class BucketManagerErrorHandler(ErrorMapper):
@staticmethod
def mapping():
# type (...)->Mapping[str, CBErrorType]
return {HTTPException: {'Bucket with given name (already|still) exists': BucketAlreadyExistsException,
'Requested resource not found': BucketDoesNotExistException}}
@BucketManagerErrorHandler.wrap
class BucketManager(GenericManager):
def __init__(self, # type: BucketManager
admin_bucket # type: Admin
):
"""Bucket Manager
:param admin_bucket: Admin bucket
"""
super(BucketManager, self).__init__(admin_bucket)
def create_bucket(self, # type: BucketManager
settings, # type: CreateBucketSettings
*options, # type: CreateBucketOptions
**kwargs # type: Any
):
"""
Creates a new bucket.
:param: CreateBucketSettings settings: settings for the bucket.
:param: CreateBucketOptions options: options for setting the bucket.
:param: Any kwargs: override corresponding values in the options.
:raises: BucketAlreadyExistsException
:raises: InvalidArgumentsException
"""
# prune the missing settings...
params= settings.as_dict()#*options, **kwargs)
# insure flushEnabled is an int
params['flushEnabled'] = int(params.get('flushEnabled', 0))
# send it
return self._admin_bucket.http_request(
path='/pools/default/buckets',
method='POST',
content=mk_formstr(params),
content_type='application/x-www-form-urlencoded',
**forward_args(kwargs, *options))
def update_bucket(self, # type: BucketManager
settings, # type: BucketSettings
*options, # type: UpdateBucketOptions
**kwargs # type: Any
):
"""
Updates a bucket. Every setting must be set to what the user wants it to be after the update.
Any settings that are not set to their desired values may be reverted to default values by the server.
:param BucketSettings settings: settings for updating the bucket.
:param UpdateBucketOptions options: options for updating the bucket.
:param Any kwargs: override corresponding values in the options.
:raises: InvalidArgumentsException
:raises: BucketDoesNotExistException
"""
# prune the missing settings...
params = settings.as_dict ()#*options, **kwargs)
# insure flushEnabled is an int
params['flushEnabled'] = int(params.get('flushEnabled', 0))
# send it
return self._admin_bucket.http_request(
path='/pools/default/buckets/' + settings.name,
method='POST',
content_type='application/x-www-form-urlencoded',
content=mk_formstr(params),
**forward_args(kwargs, *options))
def drop_bucket(self, # type: BucketManager
bucket_name, # type: str
*options, # type: DropBucketOptions
**kwargs # type: Any
):
# type: (...) -> None
"""
Removes a bucket.
:param str bucket_name: the name of the bucket.
:param DropBucketOptions options: options for dropping the bucket.
:param Any kwargs: override corresponding value in the options.
:raises: BucketNotFoundException
:raises: InvalidArgumentsException
"""
return self._admin_bucket.http_request(
path='/pools/default/buckets/' + bucket_name,
method='DELETE',
**forward_args(kwargs, *options))
def get_bucket(self, # type: BucketManager
bucket_name, # type: str
*options, # type: GetBucketOptions
**kwargs # type: Any
):
# type: (...) -> BucketSettings
"""
Gets a bucket's settings.
:param str bucket_name: the name of the bucket.
:param GetBucketOptions options: options for getting the bucket.
:param Any kwargs: override corresponding values in options.
:returns: settings for the bucket. Note: the ram quota returned is in bytes
not mb so requires x / 1024 twice. Also Note: FlushEnabled is not a setting returned by the server, if flush is enabled then the doFlush endpoint will be listed and should be used to populate the field.
:rtype: BucketSettings
:raises: BucketNotFoundException
:raises: InvalidArgumentsException
"""
return BucketSettings.from_raw(
self._admin_bucket.http_request(
path='/pools/default/buckets/' + bucket_name,
method='GET',
**forward_args(kwargs, *options)
).value)
def get_all_buckets(self, # type: BucketManager
*options, # type: GetAllBucketOptions
**kwargs # type: Any
):
# type: (...) -> Iterable[BucketSettings]
"""
Gets all bucket settings. Note, # type: the ram quota returned is in bytes
not mb so requires x / 1024 twice.
:param GetAllBucketOptions options: options for getting all buckets.
:param Any kwargs: override corresponding value in options.
:returns: An iterable of settings for each bucket.
:rtype: Iterable[BucketSettings]
"""
return list(
map(lambda x: BucketSettings(**x),
self._admin_bucket.http_request(
path='/pools/default/buckets',
method='GET',
**forward_args(kwargs, *options)
).value))
def flush_bucket(self, # type: BucketManager
bucket_name, # type: str
*options, # type: FlushBucketOptions
**kwargs # type: Any
):
# using the ns_server REST interface
"""
Flushes a bucket (uses the ns_server REST interface).
:param str bucket_name: the name of the bucket.
:param FlushBucketOptions options: options for flushing the bucket.
:param Any kwargs: override corresponding value in options.
:raises: BucketNotFoundException
:raises: InvalidArgumentsException
:raises: FlushDisabledException
"""
self._admin_bucket.http_request(
path="/pools/default/buckets/{bucket_name}/controller/doFlush".format(bucket_name=bucket_name),
method='POST',
**forward_args(kwargs, *options))
class EvictionPolicyType(enum.Enum):
NOT_RECENTLY_USED = "nruEviction"
NO_EVICTION = "noEviction"
FULL = "fullEviction"
VALUE_ONLY = "valueOnly"
class EjectionMethod(enum.Enum):
FULL_EVICTION = "fullEviction"
VALUE_ONLY = "valueOnly"
class BucketType(enum.Enum):
COUCHBASE = "membase"
MEMCACHED = "memcached"
EPHEMERAL = "ephemeral"
class CompressionMode(enum.Enum):
OFF = "off"
PASSIVE = "passive"
ACTIVE = "active"
class ConflictResolutionType(enum.Enum):
TIMESTAMP = "lww"
SEQUENCE_NUMBER = "seqno"
class BucketSettings(dict):
mapping = BijectiveMapping({'flushEnabled': {'flush_enabled': Bijection(int.__bool__, bool.__int__)},
'numReplicas': {'num_replicas': Identity(int)},
'ramQuotaMB': {'ram_quota_mb': Identity(int)},
'replicaNumber': {'num_replicas': Identity(int)},
'replicaIndex': {'replica_index': Identity(bool)},
'bucketType': {'bucket_type': -StringEnumLoose(BucketType)},
'maxTTL': {'max_ttl': -Timedelta(int)},
'compressionMode': {'compression_mode': -StringEnum(CompressionMode)},
'conflictResolutionType': {
'conflict_resolution_type': -StringEnumLoose(ConflictResolutionType)},
'evictionPolicy': {'eviction_policy': -StringEnumLoose(EvictionPolicyType)},
'ejectionMethod': {'ejection_method': -StringEnumLoose(EjectionMethod)},
'name': {'name': Identity(str)},
'durabilityMinLevel': {'minimum_durability_level': Identity(str)}})
@overload
def __init__(self,
name=None, # type: str
flush_enabled=False, # type: bool
ram_quota_mb=None, # type: int
num_replicas=None, # type: int
replica_index=None, # type: bool
bucket_type=None, # type: BucketType
eviction_policy=None, # type: EvictionPolicyType
max_ttl=None, # type: Union[datetime.timedelta,float,int]
compression_mode=None # type: CompressionMode
):
# type: (...) -> None
pass
def __init__(self, **kwargs):
"""BucketSettings provides a means of mapping bucket settings into an object.
"""
if kwargs.get('bucket_type',None) == "couchbase":
kwargs['bucket_type'] = BucketType.COUCHBASE
"""
PYCBC-956
Bucket min durability setting is represented as string on the wire.
See Durability enum for string representations
"""
durability = kwargs.pop('minimum_durability_level', None)
if durability:
if isinstance(durability, Durability):
kwargs['minimum_durability_level'] = durability.to_server_str()
else:
kwargs['minimum_durability_level'] = Durability.from_server_str(durability)
super(BucketSettings, self).__init__(**self.mapping.sanitize_src(kwargs))
def as_dict(self, *options, **kwargs):
final_opts = dict(**Admin.bc_defaults)
final_opts.update(**forward_args(kwargs,*options))
params=self.mapping.to_src(self)
params.update({
'authType': 'sasl',
'saslPassword': final_opts['bucket_password']
})
return params
@classmethod
def from_raw(cls,
raw_info # type: Mapping[str, Any]
):
# type: (...) -> BucketSettings
result = cls(**cls.mapping.to_dest(raw_info))
quota = raw_info.get('quota', {})
# convert rawRAM to MB
if 'rawRAM' in quota:
result['ram_quota_mb'] = quota.get('rawRAM') / 1024 / 1024
else:
result['ram_quota_mb'] = None
controllers = raw_info.get('controllers', {})
result['flush_enabled'] = ('flush' in controllers)
return result
@property
def name(self):
# type: (...) -> str
"""Name (string) - The name of the bucket."""
return self.get('name')
@property
def flush_enabled(self):
# type: (...) -> bool
"""Whether or not flush should be enabled on the bucket. Default to false."""
return self.get('flush_enabled', False)
@property
def ram_quota_mb(self):
# type: (...) -> int
"""Ram Quota in mb for the bucket. (rawRAM in the server payload)"""
return self.get('ram_quota_mb')
@property
def num_replicas(self):
# type: (...) -> int
"""NumReplicas (int) - The number of replicas for documents."""
return self.get('replica_number')
@property
def replica_index(self):
# type: (...) -> bool
""" Whether replica indexes should be enabled for the bucket."""
return self.get('replica_index')
@property
def bucket_type(self):
# type: (...) -> BucketType
"""BucketType {couchbase (sent on wire as membase), memcached, ephemeral}
The type of the bucket. Default to couchbase."""
return self.get('bucketType')
@property
def eviction_policy(self):
# type: (...) -> EvictionPolicyType
"""{fullEviction | valueOnly}. The eviction policy to use."""
return self.get('eviction_policy')
@property
def max_ttl(self):
# type: (...) -> datetime.timedelta
"""Value for the maxTTL of new documents created without a ttl."""
return self.get('max_ttl')
@property
def compression_mode(self):
# type: (...) -> CompressionMode
"""{off | passive | active} - The compression mode to use."""
return self.get('compression_mode')
class CreateBucketSettings(BucketSettings):
@overload
def __init__(self,
name=None, # type: str
flush_enabled=False, # type: bool
ram_quota_mb=None, # type: int
num_replicas=None, # type: int
replica_index=None, # type: bool
bucket_type=None, # type: BucketType
eviction_policy=None, # type: EvictionPolicyType
max_ttl=None, # type: Union[datetime.timedelta,float,int]
compression_mode=None, # type: CompressionMode
conflict_resolution_type=None, # type: ConflictResolutionType
bucket_password=None, # type: str
ejection_method=None # type: EjectionMethod
):
"""
Bucket creation settings.
:param name: name of the bucket
:param flush_enabled: whether flush is enabled
:param ram_quota_mb: raw quota in megabytes
:param num_replicas: number of replicas
:param replica_index: whether this is a replica index
:param bucket_type: type of bucket
:param eviction_policy: policy for eviction
:param max_ttl: max time to live for bucket
:param compression_mode: compression mode
:param ejection_method: ejection method (deprecated, please use eviction_policy instead)
"""
def __init__(self, **kwargs):
BucketSettings.__init__(self, **kwargs)
@property
def conflict_resolution_type(self):
# type: (...) -> ConflictResolutionType
return self.get('conflict_resolution_type')
class CreateBucketOptions(OptionBlockTimeOut):
pass
class UpdateBucketOptions(OptionBlockTimeOut):
pass
class DropBucketOptions(OptionBlockTimeOut):
pass
class GetAllBucketOptions(OptionBlockTimeOut):
pass
class GetBucketOptions(OptionBlockTimeOut):
pass
class FlushBucketOptions(OptionBlockTimeOut):
pass
|
"""
This module implements the core class hierarchy for implementing EO tasks. An EO task is any class the inherits
from the abstract EOTask class. Each EO task has to implement the execute method; invoking __call__ on a EO task
instance invokes the execute method. EO tasks are meant primarily to operate on EO patches (i.e. instances of EOPatch).
EO task classes are generally lightweight (i.e. not too complicated), short, and do one thing well. For example, an
EO task might take as input an EOPatch containing cloud mask and return as a result the cloud coverage for that mask.
Credits:
Copyright (c) 2017-2019 Matej Aleksandrov, Matej Batič, Andrej Burja, Eva Erzin (Sinergise)
Copyright (c) 2017-2019 Grega Milčinski, Matic Lubej, Devis Peresutti, Jernej Puc, Tomislav Slijepčević (Sinergise)
Copyright (c) 2017-2019 Blaž Sovdat, Jovan Višnjić, Anže Zupanc, Lojze Žust (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import sys
import logging
import datetime
import inspect
from collections import OrderedDict
from abc import ABC, abstractmethod
import attr
from .utilities import FeatureParser
LOGGER = logging.getLogger(__name__)
class EOTask(ABC):
"""Base class for EOTask."""
def __new__(cls, *args, **kwargs):
"""Stores initialization parameters and the order to the instance attribute `init_args`."""
self = super().__new__(cls)
init_args = OrderedDict()
for arg, value in zip(inspect.getfullargspec(self.__init__).args[1: len(args) + 1], args):
init_args[arg] = repr(value)
for arg in inspect.getfullargspec(self.__init__).args[len(args) + 1:]:
if arg in kwargs:
init_args[arg] = repr(kwargs[arg])
self.private_task_config = _PrivateTaskConfig(init_args=init_args)
return self
def __mul__(self, other):
"""Creates a composite task of this and passed task."""
return CompositeTask(other, self)
def __call__(self, *eopatches, monitor=False, **kwargs):
"""Executes the task."""
# if monitor:
# return self.execute_and_monitor(*eopatches, **kwargs)
return self._execute_handling(*eopatches, **kwargs)
def execute_and_monitor(self, *eopatches, **kwargs):
""" In the current version nothing additional happens in this method
"""
return self._execute_handling(*eopatches, **kwargs)
def _execute_handling(self, *eopatches, **kwargs):
""" Handles measuring execution time and error propagation
"""
self.private_task_config.start_time = datetime.datetime.now()
try:
return_value = self.execute(*eopatches, **kwargs)
self.private_task_config.end_time = datetime.datetime.now()
return return_value
except BaseException as exception:
traceback = sys.exc_info()[2]
# Some special exceptions don't accept an error message as a parameter and raise a TypeError in such case.
try:
errmsg = 'During execution of task {}: {}'.format(self.__class__.__name__, exception)
extended_exception = type(exception)(errmsg)
except TypeError:
extended_exception = exception
raise extended_exception.with_traceback(traceback)
@abstractmethod
def execute(self, *eopatches, **kwargs):
""" Implement execute function
"""
raise NotImplementedError
@staticmethod
def _parse_features(features, new_names=False, rename_function=None, default_feature_type=None,
allowed_feature_types=None):
""" See eolearn.core.utilities.FeatureParser class.
"""
return FeatureParser(features, new_names=new_names, rename_function=rename_function,
default_feature_type=default_feature_type, allowed_feature_types=allowed_feature_types)
@attr.s(cmp=False)
class _PrivateTaskConfig:
""" A container for general EOTask parameters required during EOWorkflow and EOExecution
:param init_args: A dictionary of parameters and values used for EOTask initialization
:type init_args: OrderedDict
:param uuid: An unique hexadecimal identifier string a task gets in EOWorkflow
:type uuid: str or None
:param start_time: Time when task execution started
:type start_time: datetime.datetime or None
:param end_time: Time when task execution ended
:type end_time: datetime.datetime or None
"""
init_args = attr.ib()
uuid = attr.ib(default=None)
start_time = attr.ib(default=None)
end_time = attr.ib(default=None)
def __add__(self, other):
return _PrivateTaskConfig(init_args=OrderedDict(list(self.init_args.items()) + list(other.init_args.items())))
class CompositeTask(EOTask):
"""Creates a task that is composite of two tasks.
Note: Instead of directly using this task it might be more convenient to use `'*'` operation between tasks.
Example: `composite_task = task1 * task2`
:param eotask1: Task which will be executed first
:type eotask1: EOTask
:param eotask2: Task which will be executed on results of first task
:type eotask2: EOTask
"""
def __init__(self, eotask1, eotask2):
self.eotask1 = eotask1
self.eotask2 = eotask2
self.private_task_config = eotask1.private_task_config + eotask2.private_task_config
def execute(self, *eopatches, **kwargs):
return self.eotask2.execute(self.eotask1.execute(*eopatches, **kwargs))
|
from kol.request.GenericRequest import GenericRequest
from kol.manager import PatternManager
import kol.Error as Error
from kol.util import Report
class RespondToTradeRequest(GenericRequest):
def __init__(self, session, tradeid, items=None, meat=0, message=""):
super(RespondToTradeRequest, self).__super__(session)
self.url = session.serverURL + "makeoffer.php"
self.requestData['action'] = 'counter'
self.requestData['pwd'] = session.pwd
self.requestData['whichoffer'] = tradeid
self.requestData['offermeat'] = meat
self.requestData['memo2'] = message
ctr = 1
for item in items:
self.requestData['whichitem' + str(ctr)] = item['itemID']
self.requestData['howmany' + str(ctr)] = item['quantity']
ctr += 1
def parseResponse(self):
noMeatPattern = PatternManager.getOrCompilePattern('traderHasNotEnoughMeat')
if noMeatPattern.search(self.responseText):
raise Error.Error("You don't have as much meat as you're promising.", Error.NOT_ENOUGH_MEAT)
noItemsPattern = PatternManager.getOrCompilePattern('traderHasNotEnoughItems')
if noItemsPattern.search(self.responseText):
raise Error.Error("You don't have as many items as you're promising.", Error.NOT_ENOUGH_ITEMS)
#Not testing for an offer being cancelled due to a bug in KoL - space reserved
successPattern = PatternManager.getOrCompilePattern('tradeResponseSentSuccessfully')
if successPattern.search(self.responseText):
Report.trace("request", "Response to trade " + str(self.requestData['whichoffer']) + ' sent successfully.')
else:
raise Error.Error("Unknown error sending response to trade " + str(self.requestData['whichoffer']), Error.REQUEST_GENERIC)
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Caps(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume"
_path_str = "volume.caps"
_valid_props = {"x", "y", "z"}
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.caps.X`
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the x `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.volume.caps.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.caps.Y`
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the y `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.volume.caps.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.caps.Z`
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the z `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.volume.caps.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
:class:`plotly.graph_objects.volume.caps.X` instance or
dict with compatible properties
y
:class:`plotly.graph_objects.volume.caps.Y` instance or
dict with compatible properties
z
:class:`plotly.graph_objects.volume.caps.Z` instance or
dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Caps object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.volume.Caps`
x
:class:`plotly.graph_objects.volume.caps.X` instance or
dict with compatible properties
y
:class:`plotly.graph_objects.volume.caps.Y` instance or
dict with compatible properties
z
:class:`plotly.graph_objects.volume.caps.Z` instance or
dict with compatible properties
Returns
-------
Caps
"""
super(Caps, self).__init__("caps")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.Caps
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.Caps`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class DescribeCdnDeletedDomainsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'DescribeCdnDeletedDomains')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
import json
import logging
import sys
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from pyhive import hive
enabled = True
except ImportError, e:
enabled = False
COLUMN_NAME = 0
COLUMN_TYPE = 1
types_map = {
'BIGINT': TYPE_INTEGER,
'TINYINT': TYPE_INTEGER,
'SMALLINT': TYPE_INTEGER,
'INT': TYPE_INTEGER,
'DOUBLE': TYPE_FLOAT,
'DECIMAL': TYPE_FLOAT,
'FLOAT': TYPE_FLOAT,
'REAL': TYPE_FLOAT,
'BOOLEAN': TYPE_BOOLEAN,
'TIMESTAMP': TYPE_DATETIME,
'DATE': TYPE_DATETIME,
'CHAR': TYPE_STRING,
'STRING': TYPE_STRING,
'VARCHAR': TYPE_STRING
}
class Hive(BaseSQLQueryRunner):
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"host": {
"type": "string"
},
"port": {
"type": "number"
},
"database": {
"type": "string"
},
"username": {
"type": "string"
}
},
"required": ["host"]
}
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "hive"
def __init__(self, configuration):
super(Hive, self).__init__(configuration)
def _get_tables(self, schema):
try:
schemas_query = "show schemas"
tables_query = "show tables in %s"
columns_query = "show columns in %s"
for schema_name in filter(lambda a: len(a) > 0, map(lambda a: str(a['database_name']), self._run_query_internal(schemas_query))):
for table_name in filter(lambda a: len(a) > 0, map(lambda a: str(a['tab_name']), self._run_query_internal(tables_query % schema_name))):
columns = filter(lambda a: len(a) > 0, map(lambda a: str(a['field']), self._run_query_internal(columns_query % table_name)))
if schema_name != 'default':
table_name = '{}.{}'.format(schema_name, table_name)
schema[table_name] = {'name': table_name, 'columns': columns}
except Exception, e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return schema.values()
def run_query(self, query):
connection = None
try:
connection = hive.connect(**self.configuration.to_dict())
cursor = connection.cursor()
cursor.execute(query)
column_names = []
columns = []
for column in cursor.description:
column_name = column[COLUMN_NAME]
column_names.append(column_name)
columns.append({
'name': column_name,
'friendly_name': column_name,
'type': types_map.get(column[COLUMN_TYPE], None)
})
rows = [dict(zip(column_names, row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
cursor.close()
except KeyboardInterrupt:
connection.cancel()
error = "Query cancelled by user."
json_data = None
except Exception as e:
logging.exception(e)
raise sys.exc_info()[1], None, sys.exc_info()[2]
finally:
if connection:
connection.close()
return json_data, error
register(Hive)
|
class Buffer:
def __init__(self):
self.lst = list()
def add(self, *a):
for value in a:
self.lst.append(value)
while len(self.lst) >= 5:
s = 0
for i in range(5):
s += self.lst.pop(0)
print(s)
def get_current_part(self):
return self.lst
|
class GeometryObject(APIObject, IDisposable):
""" The common base class for all geometric primitives. """
def Dispose(self):
""" Dispose(self: APIObject,A_0: bool) """
pass
def Equals(self, obj):
"""
Equals(self: GeometryObject,obj: object) -> bool
Determines whether the specified System.Object is equal to the current
System.Object.
obj: Another object.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: GeometryObject) -> int
Gets the integer value of the geometry object as hash code
"""
pass
def ReleaseManagedResources(self, *args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: GeometryObject) """
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __eq__(self, *args):
""" x.__eq__(y) <==> x==y """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __ne__(self, *args):
pass
GraphicsStyleId = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The ElementId of the GeometryObject's GraphicsStyle
Get: GraphicsStyleId(self: GeometryObject) -> ElementId
"""
IsElementGeometry = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Indicates whether this geometry is obtained directly from an Element.
Get: IsElementGeometry(self: GeometryObject) -> bool
"""
Visibility = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The visibility.
Get: Visibility(self: GeometryObject) -> Visibility
"""
|
import os
import math
import argparse
import gym
from agents.q_agent import Q, Agent, Trainer
RECORD_PATH = os.path.join(os.path.dirname(__file__), "./upload")
def main(episodes, render, monitor):
env = gym.make("CartPole-v0")
q = Q(
env.action_space.n,
env.observation_space,
bin_size=[7, 7, 7, 7],
low_bound=[-5, -0.5, -5, -0.5],
high_bound=[5, 0.5, 5, 0.5]
)
agent = Agent(q, epsilon=0.05)
learning_decay = lambda lr, t: 1 / (t + 1) ** 0.5
epsilon_decay = lambda eps, t: 1 / (t + 1) ** 0.5
trainer = Trainer(
agent,
gamma=0.95,
learning_rate=0.1, learning_rate_decay=learning_decay,
epsilon=1.0, epsilon_decay=epsilon_decay,
max_step=250)
if monitor:
env.monitor.start(RECORD_PATH)
trainer.train(env, episode_count=episodes, render=render)
if monitor:
env.monitor.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="train & run cartpole ")
parser.add_argument("--episode", type=int, default=1000, help="episode to train")
parser.add_argument("--render", action="store_true", help="render the screen")
parser.add_argument("--monitor", action="store_true", help="monitor")
parser.add_argument("--upload", type=str, default="", help="upload key to openai gym (training is not executed)")
args = parser.parse_args()
if args.upload:
if os.path.isdir(RECORD_PATH):
gym.upload(RECORD_PATH, api_key=args.upload)
else:
main(args.episode, args.render, args.monitor)
|
import os
import subprocess
import numpy as np
from tqdm import tqdm
from typing import Dict
MAX_FREQ = 7999
def to_str(v):
if isinstance(v, tuple):
s = " ".join(str(x) for x in v)
elif isinstance(v, float) or isinstance(v, int):
s = str(v)
else:
assert False
return s
def build_sox_distortions(audio_file, params):
param_str = " ".join([k + " " + to_str(v) for k, v in params.items()])
sox_params = "sox {} -p {} ".format(audio_file, param_str)
return sox_params
def build_sox_noise(
audio_file,
amod_lowpass_cutoff=0.1,
lowpass_cutoff=MAX_FREQ,
highpass_cutoff=1,
noise_gain=-4,
):
"""
play original.wav synth whitenoise lowpass 0.1 synth whitenoise amod gain -n 0 lowpass 100 highpass 1
"""
sox_params = "sox {audio_file} -p synth whitenoise lowpass {amod_lowpass_cutoff} synth whitenoise amod gain -n {noise_gain} lowpass {lowpass_cutoff} highpass {highpass_cutoff}".format(
audio_file=audio_file,
amod_lowpass_cutoff=amod_lowpass_cutoff,
lowpass_cutoff=lowpass_cutoff,
highpass_cutoff=highpass_cutoff,
noise_gain=noise_gain,
)
return sox_params
def build_varying_amplitude_factor(audio_file, lowpass_cutoff=1, ac_gain=-9):
ac = "sox {} -p synth whitenoise lowpass {} gain -n {}".format(
audio_file, lowpass_cutoff, ac_gain
)
dc = "sox {} -p gain -90 dcshift 0.5".format(audio_file)
return "sox -m <({}) <({}) -p".format(ac, dc)
def multiply_signals(signal_a, signal_b):
return ("sox -T <({signal_a}) <({signal_b}) -p").format(
signal_a=signal_a, signal_b=signal_b,
)
def build_sox_interference(
interfere_file, interfere_signal, lowpass_cutoff=1, ac_gain=-6
):
factor = build_varying_amplitude_factor(interfere_file, lowpass_cutoff, ac_gain)
return multiply_signals(factor, interfere_signal)
def add_signals_trim_to_len(original, signals, augmented):
signals_to_add = " ".join(["<(%s)" % s for s in signals])
sox_cmd = "sox -m {signals} -b 16 {augmented} trim 0 $(soxi -D {original})".format(
signals=signals_to_add, original=original, augmented=augmented
)
return sox_cmd
def build_random_bandpass(min_low=50, min_band_width=100, max_high=1000) -> Dict:
d = {}
max_high_cutoff = MAX_FREQ
if np.random.choice([True, False], p=[0.5, 0.5]):
lowpass = int(round(np.random.uniform(low=min_low, high=MAX_FREQ)))
d["lowpass"] = lowpass
max_high_cutoff = lowpass - min_band_width
if np.random.choice([True, False], p=[0.5, 0.5]):
highpass = int(
round(np.random.uniform(low=1, high=min(max_high, max_high_cutoff)))
)
d["highpass"] = highpass
return d
def augment_with_sox(original_file, audio_files, augmented_file):
interfere_file = np.random.choice(audio_files)
min_SNR = 20 # normal:20, less:30, evenless:40
min_SIR = 5 # normal:10, less:20, evenless:30
signal_gain = round(np.random.uniform(low=-10, high=0), 2)
signal_params = {
"tempo": round(np.random.triangular(left=0.7, mode=1.0, right=1.3), 2),
"pitch": int(
round(np.random.triangular(left=-200, mode=0, right=200))
), # normal 100, less: 50, evenless: 30
"reverb": (int(round(np.random.uniform(low=0, high=50))), 50, 100, 100, 0, 0,),
"gain -n": signal_gain,
}
signal_params.update(build_random_bandpass(1000, 1000, 100))
interfere_params = {
"tempo": round(np.random.uniform(low=0.6, high=1.4), 2),
"pitch": int(round(np.random.uniform(low=-500, high=500))),
"reverb": (int(round(np.random.uniform(low=0, high=100))), 50, 100, 100, 0, 0),
"gain -n": round(np.random.uniform(low=-50, high=signal_gain - min_SIR), 2),
}
interfere_params.update(build_random_bandpass(50, 100, 1000))
# params = {'signal_params':signal_params,'interfere_params':interfere_params,'noise_power':noise_power}
# pprint(params)
signal = build_sox_distortions(original_file, signal_params)
interfere_signal = build_sox_distortions(interfere_file, interfere_params)
noise_power = round(np.random.uniform(-60, signal_gain - min_SNR), 2)
lowpass = int(round(np.random.uniform(low=100, high=MAX_FREQ)))
highpass = int(round(np.random.uniform(low=1, high=lowpass)))
noise = build_sox_noise(
original_file, np.random.uniform(0.1, 2), lowpass, highpass, noise_power
)
interf = build_sox_interference(
interfere_file,
interfere_signal,
lowpass_cutoff=np.random.uniform(0.5, 2),
ac_gain=int(round(np.random.uniform(-9, -3))),
)
sox_cmd = add_signals_trim_to_len(
original_file, [signal, noise, interf], augmented_file
)
FNULL = open(os.devnull, "w")
subprocess.call(["bash", "-c", sox_cmd], stdout=FNULL, stderr=subprocess.STDOUT)
# subprocess.call(["bash", "-c", sox_cmd])
# output = subprocess.check_output(["bash", "-c", sox_cmd])
# if len(output)>0 and 'FAIL' in output:
# print(output)
# return 1 if len(output)>0 else 0
def augment_with_specific_params():
signal_gain = 0
signal_params = dict(tempo=1.0, pitch=0, reverb=0)
signal_params["gain -n"] = 0
signal = build_sox_distortions(original, signal_params)
interfere_signal = build_sox_distortions(
interfering, dict(gain=signal_gain - 10, tempo=0.8, pitch=100, reverb=50)
)
noise = build_sox_noise(
original, noise_gain=signal_gain - 20, lowpass_cutoff=6000, highpass_cutoff=10
)
interf = build_sox_interference(interfering, interfere_signal)
sox_cmd = add_signals_trim_to_len(original, [signal, noise, interf], augmented)
subprocess.call(["bash", "-c", sox_cmd])
if __name__ == "__main__":
import librosa
original = "../../original.wav"
augmented = "/tmp/augmented.wav"
interfering = "../../interference.wav"
# augment_with_specific_params()
for k in range(9):
augment_with_sox(original, [interfering], "/tmp/augmented_%d.wav" % k)
# assert False
# path = os.environ['HOME']+"/data/asr_data/SPANISH"
# audio_files = librosa.util.find_files(path)
#
# with open('spanish_train_manifest.csv') as f:
# audio_text_files = f.readlines()
# audio_files = [x.strip().split(",")[0] for x in audio_text_files]
#
# for k in tqdm(range(100000)):
# original = np.random.choice(audio_files)
# random_augmentation(original, audio_files, augmented)
|
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import logging
import os
import sys
import tempfile
import shutil
import unittest
import re
# Import this first before manipulating sys.path to ensure it can load fine.
import logging_utils
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import test_env
test_env.setup_test_env()
from depot_tools import auto_stub
_LOG_HEADER = r'^%s \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d: ' % os.getpid()
class TestLoggingUtils(auto_stub.TestCase):
def test_Capture(self):
root = logging.RootLogger(logging.DEBUG)
with logging_utils.CaptureLogs('foo', root) as log:
root.debug('foo')
result = log.read()
self.assertTrue(re.match(_LOG_HEADER + 'DEBUG foo\n$', result), result)
def test_prepare_logging(self):
root = logging.RootLogger(logging.DEBUG)
tmp_dir = tempfile.mkdtemp(prefix='logging_utils_test')
try:
filepath = os.path.join(tmp_dir, 'test.log')
logging_utils.prepare_logging(filepath, root)
root.debug('foo')
with open(filepath, 'rb') as f:
result = f.read()
finally:
shutil.rmtree(tmp_dir)
# It'd be nice to figure out a way to ensure it's properly in UTC but it's
# tricky to do reliably.
self.assertTrue(re.match(_LOG_HEADER + 'DEBUG foo\n$', result), result)
if __name__ == '__main__':
unittest.main()
|
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.
import torch
import npcomp.frontends.pytorch as torch_mlir
import npcomp.frontends.pytorch.test as test
# RUN: %PYTHON %s | FileCheck %s
dev = torch_mlir.mlir_device()
t0 = torch.randn((4,4), device=dev)
t1 = torch.randn((4,4), device=dev)
t2 = t0 + t1
#
# Check the result tensor against the CPU
#
t0_cpu = t0.to('cpu')
t1_cpu = t1.to('cpu')
t2_cpu = t2.to('cpu')
print (t0_cpu, " +\n", t1_cpu, " =\n", t2_cpu)
# CHECK: PASS! add2 check
test.compare(t2, t0_cpu + t1_cpu, "add2")
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import composite as C
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class NetTanhGrad(nn.Cell):
def __init__(self):
super(NetTanhGrad, self).__init__()
self.tanh_grad = G.TanhGrad()
def construct(self, y, grad):
return self.tanh_grad(y, grad)
class NetTanhGradGrad(nn.Cell):
def __init__(self, forward_net):
super(NetTanhGradGrad, self).__init__()
self.forward_net = forward_net
self.gradOps = C.GradOperation(get_all=True, sens_param=True)
def construct(self, y, grad, dout):
backward_net = self.gradOps(self.forward_net)
return backward_net(y, grad, dout)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def tanh_grad_grad_base(dtype, loss):
np.random.seed(1)
shape = (4, 2)
y_np = (np.random.rand(*shape) * 2 - 1).astype(dtype)
grad_np = (np.random.rand(*shape) * 20 - 10).astype(dtype)
dout_np = (np.random.rand(*shape) * 20 - 10).astype(dtype)
y_np_32 = y_np.astype(np.float32)
grad_np_32 = grad_np.astype(np.float32)
dout_np_32 = dout_np.astype(np.float32)
dy_np = (dout_np_32 * grad_np_32 * (-2.0) * y_np_32).astype(dtype)
dgrad_np = (dout_np_32 * (1 - y_np_32 * y_np_32)).astype(dtype)
y_ms = Tensor(y_np)
grad_ms = Tensor(grad_np)
dout_ms = Tensor(dout_np)
forward_net = NetTanhGrad()
net = NetTanhGradGrad(forward_net)
dy_ms, dgrad_ms = net(y_ms, grad_ms, dout_ms)
assert np.allclose(dy_ms.asnumpy(), dy_np, loss, loss)
assert np.allclose(dgrad_ms.asnumpy(), dgrad_np, loss, loss)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tanh_grad_grad_float16():
tanh_grad_grad_base(np.float16, 1e-3)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tanh_grad_grad_float32():
tanh_grad_grad_base(np.float32, 1e-4)
|
from rest_framework.permissions import SAFE_METHODS, BasePermission
class IsAdminOrReadOnly(BasePermission):
"""
The request is authenticated as an Admin user or is Read Only
"""
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_staff
)
|
#!/usr/bin/env python3
import argparse
import io
import sys
from urllib.request import urlopen
import urllib.error
import time
import datetime
from retrying import retry
URL = "http://unreliable.labs.crossref.org/error"
ONE_SECOND=1000
ONE_HOUR=((ONE_SECOND*60)*60)
ONE_DAY=(ONE_HOUR*24)
@retry(wait_exponential_multiplier=1000,wait_exponential_max=(ONE_HOUR * 6))
def fetch(url):
global s
d = time.time() - s
print("time: " + str(d))
s = time.time()
try:
with urlopen(url) as response:
result = response.read().decode('utf8')
print("Done fetching...")
return result
except urllib.error.URLError as e:
print("Error: " + str(e))
raise e
def main():
print("Starting...")
print(fetch(ARGS.url))
print("Done")
s = time.time()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Stubbornly, but intelligently keep retrying to GET the same URL")
parser.add_argument("-u", "--url", help="the URL to be stubborn about",
type=str, default=URL)
ARGS = parser.parse_args()
main()
|
# url="http://www.baidu.com/?page=/wd=xiaopangzi"
'''
url1="www.baidu.com/?page="
url2="wd=xiaopangzi"
while(1):
for i in range(1,100):
print(url1,i,url2)
i=i+1
break
for i in range(100):
part1=www.baidu.com/?page=
res = part1 +
'''
'''
A1=[1,1,1,1,1,2,2,2,2,2,3]
a1=[]
for i in A1:
if i not in a1:
a1.append(i)
print a1
#A2=[1,2,3]
#A2.append(10)
#print A2
'''
'''
a4 = [['liuyanyun',22,['360',100]],['jingjing',12,['baidu',1]],['taotao',-1,['Google',0]]]
a4.sort(key=lambda x:x[2][1])
print(a4)
'''
'''
t=(1,2,3,7,9,0,5)
print(t[ : -1])
'''
'''
a=0
b=0
while a < 10 :
a=a+1
print("a= {}".format(a))
if a%10==0:
print("")
print(a)
'''
'''
i=0
sum=0
while i<100:
i=i+1
# if i%2==0:
sum=i+sum
print("i={},sum={}".format(i,sum))
'''
'''
#shu chu 1-100 zhi jian de ou shu he
i=0
sum=0
while i<=100:
i+=2
sum=sum+i
print(sum)
'''
'''
#1-100(while xun hua ) ji shu he he ou shu he shu chu
i=0
sum=0
a=0
while i<100:
i=i+1
if i%2==0:
sum=sum+i
else:
a=a+i
print(sum,a)
'''
'''
# 1-100(for xun huan) ji shu he he ou shu he shu chu
sum=0
a=0
for i in range(1,101):
if i%2==0:
sum=sum+i
else:
a=a+i
print(sum,a)
'''
#for i in range(1,100,2):
# print(i,end="\t")
# shu chu 10 hang 10 lie de *
#for i in range(1,11):
# for j in range(1,j):
# print(i,j)
#for i in range(1,11):
# for j in range(1,11):
# print("*")
# print()
# shu chu 10 hang 10 lie de dong xi
#for i in range(1,11):
# for j in range(1,11):
# shu chu shi hang nei rong mei hang xian shi *
'''
for i in range(1,11):
for j in range(1,11):
print("*")
if i%10==0:
print("")
'''
'''
# shu chu shi hang neirong mei hang xian shi *
for i in range(1,11):
print("*"*10)
'''
'''
#输出十行内容,每行输入不同,第一行输入×,第二行输出××,以此类推
for i in range(1,11):
print("a"*i)
'''
'''
#输出九行,第1行输出1,第二行输出12,第九行输出12345789.
for i in range(1,10):
for j in range(1,i+1):
print(j,end=" ")
print("")
#print("")
'''
"""
***
for i in range(1,10):
for j in range(1,i+1):
for z in range(1,j+1):
print(z,end=" ")
print("")
print("")
"""
'''
#输入九九乘法表
for i in range (1,10):
for j in range(1,i+1):
s=i*j
print(j,"x",i,"=",s," ",end="")
print("")
'''
'''
# 计算10个99相加后的值并输出
sum=0
for i in range(10):
sum+=99*10
print(sum)
'''
'''
#计算从1加到100的值并输出
sum = 0
for i in range(101):
sum+=i
print(sum)
'''
#计算10的阶乘(1x2x3x4x5x6x7x8x9)
'''
#python爆破密码
import crypt
def testPass(cryptPass):
salt = cryptPass[0:2]
dictFile = open("/root/Desktop/dictionary.txt","r")
for word in dictFile.readlines():
word = word.strip("\n")
cryptWord=crypt.crypt(word,salt)
if cryptWord == cryptPass:
print ("[+]Found Password: " +word)
else:
print("[-]Password {} Not found.".format(word))
def main():
passFile = open('/root/Desktop/2.txt')
for line in passFile.readlines():
if ':' in line:
user = line.split(':')[0]
cryptPass = line.split(':')[1].strip(' ')
print ("[*] Cracking Password For:" + user)
testPass(cryptPass)
if __name__ == "__main__":
main()
'''
import crypt
import itertools
def main():
flag=0
salt='AB'
cryptPass='5I64J9ZNvp2'
test=(''.join(x) for x in in itertools.productduct("qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM", repeat=7))
while flag==0:
word=next(test)
cryptWord = = crypt.crypt(wor(word,salt)
if cryptWord == cryptPass:
print('[+] Found Password:'+word)
flag=1
if __name__ == '__main__':
main()
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.auth_id_ntoken_privilege_item import AuthIdNtokenPrivilegeItem # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestAuthIdNtokenPrivilegeItem(unittest.TestCase):
"""AuthIdNtokenPrivilegeItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthIdNtokenPrivilegeItem(self):
"""Test AuthIdNtokenPrivilegeItem"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.auth_id_ntoken_privilege_item.AuthIdNtokenPrivilegeItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
import re
import warnings
from functools import partial, reduce
from typing import Any, Optional, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, \
is_dict_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, ShortType, StructField, StructType,
to_arrow_type)
from pyspark.sql.utils import AnalysisException
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import default_session, validate_arguments_and_invoke_function
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.metadata import Metadata
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.typedef import infer_pd_series_spark_type
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
class DataFrame(_Frame):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _sdf: Spark Column instance
:ivar _metadata: Metadata related to column names and index information.
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, other arguments should not be used.
If `data` is a Spark DataFrame, all other arguments except `index` should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
If `data` is a Spark DataFrame, `index` is expected to be `Metadata`.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
self._init_from_pandas(data)
elif isinstance(data, spark.DataFrame):
assert columns is None
assert dtype is None
assert not copy
self._init_from_spark(data, index)
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
self._init_from_pandas(pdf)
def _init_from_pandas(self, pdf):
metadata = Metadata.from_pandas(pdf)
reset_index = pdf.reset_index()
reset_index.columns = metadata.columns
schema = StructType([StructField(name, infer_pd_series_spark_type(col),
nullable=bool(col.isnull().any()))
for name, col in reset_index.iteritems()])
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema),
metadata)
def _init_from_spark(self, sdf, metadata=None):
self._sdf = sdf
if metadata is None:
self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames())
else:
self._metadata = metadata
@property
def _index_columns(self):
return [self._sdf.__getitem__(field)
for field in self._metadata.index_columns]
def _reduce_for_stat_function(self, sfun):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
:param sfun: either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
"""
from inspect import signature
exprs = []
num_args = len(signature(sfun).parameters)
for col in self.columns:
col_sdf = self._sdf[col]
col_type = self._sdf.schema[col].dataType
if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(col))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
return row # Return first row as a Series
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently supported only when the DataFrame has a single index.
"""
from databricks.koalas.series import Series
if len(self._metadata.index_map) != 1:
raise KeyError('Currently supported only when the DataFrame has a single index.')
return Series(self._index_columns[0], anchor=self, index=[])
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, str):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
data_columns = [column for column in self._metadata.data_columns if column not in keys]
else:
data_columns = self._metadata.data_columns
if append:
index_map = self._metadata.index_map + [(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map)
# Sync Spark's columns as well.
sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns])
if inplace:
self._metadata = metadata
self._sdf = sdf
else:
kdf = self.copy()
kdf._metadata = metadata
kdf._sdf = sdf
return kdf
def reset_index(self, level=None, drop=False, inplace=False):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
"""
# TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301
if len(self._metadata.index_map) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._metadata.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._metadata.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._metadata.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._metadata.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._metadata.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._metadata.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._metadata.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._metadata.index_map.copy()
for i in idx:
info = self._metadata.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
if drop:
new_index_map = []
metadata = self._metadata.copy(
data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns,
index_map=index_map)
columns = [name for _, name in new_index_map] + self._metadata.data_columns
if inplace:
self._metadata = metadata
self.columns = columns
else:
kdf = self.copy()
kdf._metadata = metadata
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._sdf
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns])
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
# TODO: push to OSS
pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()
for field in sdf.schema})
index_columns = self._metadata.index_columns
if len(index_columns) > 0:
append = False
for index_field in index_columns:
drop = index_field not in self._metadata.data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[self._metadata.data_columns]
index_names = self._metadata.index_names
if len(index_names) > 0:
if isinstance(pdf.index, pd.MultiIndex):
pdf.index.names = index_names
else:
pdf.index.name = index_names[0]
return pdf
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = self._metadata.data_columns
metadata = self._metadata.copy(
data_columns=(data_columns +
[name for name, _ in pairs if name not in data_columns]))
return DataFrame(sdf, metadata)
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
"""
return DataFrame(self._sdf, self._metadata.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._metadata.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is None:
raise ValueError('Currently must specify value')
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
sdf = self._sdf.fillna(value)
if inplace:
self._sdf = sdf
else:
return DataFrame(sdf, self._metadata.copy())
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
sdf = self._sdf
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [c for c in self.columns
if isinstance(sdf.schema[c].dataType, numeric_types)]
nonnumeric_columns = [c for c in self.columns
if not isinstance(sdf.schema[c].dataType, numeric_types)]
if lower is not None:
sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
if upper is not None:
sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
# Restore initial column order
sdf = sdf.select(list(self.columns))
return ks.DataFrame(sdf)
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._sdf.limit(n), self._metadata.copy())
@property
def columns(self):
"""The column labels of the DataFrame."""
return pd.Index(self._metadata.data_columns)
@columns.setter
def columns(self, names):
old_names = self._metadata.data_columns
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
sdf = self._sdf.select(self._metadata.index_columns +
[self[old_name]._scol.alias(new_name)
for (old_name, new_name) in zip(old_names, names)])
self._sdf = sdf
self._metadata = self._metadata.copy(data_columns=names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._metadata.data_columns],
index=self._metadata.data_columns)
def count(self):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
return self._reduce_for_stat_function(_Frame._count_expr)
def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [columns]
sdf = self._sdf.drop(*columns)
metadata = self._metadata.copy(
data_columns=[column for column in self.columns if column not in columns]
)
return DataFrame(sdf, metadata)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by, ascending=True, inplace=False, na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())
if inplace:
self._sdf = kdf._sdf
self._metadata = kdf._metadata
else:
return kdf
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
return self.sort_values(by=columns, ascending=False).head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
return self.sort_values(by=columns, ascending=True).head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._metadata.index_columns
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self[col]._scol.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self[col]._scol.isin(list(values)).alias(col) for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._sdf.select(_select_columns), self._metadata.copy())
def pipe(self, func, *args, **kwargs):
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ks.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
0 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
0 2.0 x
1 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
if on is None and not left_index and not right_index:
raise ValueError("At least 'on' or 'left_index' and 'right_index' have to be set")
if on is not None and (left_index or right_index):
raise ValueError("Only 'on' or 'left_index' and 'right_index' can be set")
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
if on is None:
# FIXME Move index string to constant?
on = '__index_level_0__'
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = list(self.columns & right.columns)
if duplicate_columns:
for duplicate_column_name in duplicate_columns:
left_table = left_table.withColumnRenamed(duplicate_column_name,
duplicate_column_name + left_suffix)
right_table = right_table.withColumnRenamed(duplicate_column_name,
duplicate_column_name + right_suffix)
join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns
else left_table[on + left_suffix] == right_table[on + right_suffix])
joined_table = left_table.join(right_table, join_condition, how=how)
if on in duplicate_columns:
# Merge duplicate key columns
joined_table = joined_table.withColumnRenamed(on + left_suffix, on)
joined_table = joined_table.drop(on + right_suffix)
# Remove auxiliary index
# FIXME Move index string to constant?
joined_table = joined_table.drop('__index_level_0__')
kdf = DataFrame(joined_table)
return kdf
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifing the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(sdf, self._metadata.copy())
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.iteritems():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._metadata.index_columns + list(map(lambda ser: ser._scol, results)))
return DataFrame(sdf, self._metadata.copy())
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
try:
return Series(self._sdf.__getitem__(key), anchor=self,
index=self._metadata.index_map)
except AnalysisException:
raise KeyError(key)
if np.isscalar(key) or isinstance(key, (tuple, str)):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._sdf.filter(bcol), self._metadata.copy())
raise NotImplementedError(key)
def __repr__(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_string = repr(pdf.iloc[:max_display_count])
if pdf_length > max_display_count:
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return repr_string
def _repr_html_(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_html = pdf[:max_display_count]._repr_html_()
if pdf_length > max_display_count:
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return repr_html
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Series):
assert value._kdf is self, \
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
kdf = self.assign(**{key: value})
self._sdf = kdf._sdf
self._metadata = kdf._metadata
def __getattr__(self, key: str) -> Any:
from databricks.koalas.series import Series
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map)
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
|
import argparse, subprocess, os, re
from jinja2 import Environment, FileSystemLoader
def GetBaseName(full_path):
return os.path.basename(full_path)
class PlantUMLCodeGeneration():
class StateType():
def __init__(self):
self.entry = None
self.during = None
self.exit = None
self.transitions = []
self.submachine = []
def StringMe(self):
return 'Entry: {} During: {} Exit: {} Transitions : {} Submachines: {}'.format(
str(self.entry),
str(self.during),
str(self.exit),
[transition.StringMe() for transition in self.transitions],
[submachine.StringMe() for submachine in self.submachine]
)
class TransitionType():
def __init__(self):
self.destination = None
self.conditions = None
self.actions = None
def StringMe(self):
return 'Destination: {} Condition: {} Action: {}'.format(
str(self.destination),
str(self.conditions),
str(self.actions)
)
class StateMachineType():
def __init__(self):
self.title = None
self.states = {}
self.notes = []
def StringMe(self):
return 'Title: {}\nStates: \n\t{}\nNotes: {}\n'.format(
str(self.title),
'\n\t'.join([state + ' ' + self.states[state].StringMe() for state in self.states]),
str(self.notes)
)
def __init__(self, plantuml_file):
if os.path.isfile(plantuml_file):
self.plantuml_file = plantuml_file
else:
raise Exception('File {} does not exist.'.format(plantuml_file))
def CheckUml(self):
if subprocess.call(['plantuml', '-checkonly', self.plantuml_file]) == 0:
return True
else:
return False
def GenerateCode(self, output_files, templates, no_check = False):
if (no_check == False):
if self.CheckUml() == False:
raise Exception('File {} contains UML errors.'.format(self.plantuml_file))
uml, uml_params = self.ParseStateMachine()
if len(output_files) == len(templates):
for out_file, template in zip(output_files, templates):
self.GenerateFromTemplate(out_file, template, uml, uml_params)
else:
raise Exception('Number of template and output files don\'t match.')
def ParseStateMachine(self):
uml = self.GetUMLText()
uml_params = self.ParseStateMachineAsDict(uml_text = self.GetUMLText(grouped=True))[0]
return uml, uml_params
def GetUMLText(self, grouped = False):
with open(self.plantuml_file, 'r') as plantuml_file:
uml = plantuml_file.readlines()
if grouped == False:
return uml
else:
#Group all strings containing \ at the end
uml_grouped = []
accumulated_string = ''
for line in uml:
#First strip the line to forget about leading and trailing
#spaces
line = line.strip()
#Remove aliases
line = re.sub('state\s+\".*\"\s+as','state', line)
#Accumulate all lines that end with \
if line.endswith('\\'):
accumulated_string += line[:-1]
else:
if accumulated_string == '':
uml_grouped.append(line)
else:
uml_grouped.append(accumulated_string + line)
accumulated_string = ''
return uml_grouped
def ParseStateMachineAsDict(self, uml_text, init_line = 0, submachine = False):
uml_params = self.StateMachineType()
line_num = init_line
opening_braces = 0
closing_braces = 0
while line_num < len(uml_text):
line = uml_text[line_num]
if submachine:
# Pending to refactor this
opening_braces += line.count('{')
closing_braces += line.count('}')
if closing_braces > opening_braces:
break
# Regex magic yay!
matchtransition = re.match('(\[\*\]|\w+)(?:|\s+)-->(?:|\s+)(\w+)(?:(?:|\s+)\:(.*))?',line)
matchstateaction = re.match('(?:state\s+)?(\w+)(?:|\s+)(?:(?:|\s+)\:(.*))?',line)
matchsubmachine = re.match('(?:state\s+)?(\w+)(?:|\s+)\{.*$',line)
if line.startswith('title'):
uml_params.title = line
elif line.startswith('note'):
note_match = re.match('.*\"(.*)\"', line)
if note_match:
uml_params.notes.append(self.__LineCleanup(note_match.group(1)))
elif matchtransition:
self.__AddTransition(uml_params, matchtransition)
elif matchsubmachine:
#Pending to do this in a more elegant way and not depending
# on the order of the ifs
state_name = matchstateaction.group(1)
if uml_params.states.get(state_name) == None:
uml_params.states[state_name] = self.StateType()
sub_info = self.ParseStateMachineAsDict(uml_text, init_line = line_num + 1, submachine = True)
#Set state name as title
sub_info[0].title = state_name + '_submachine'
uml_params.states[state_name].submachine.append(sub_info[0])
line_num = sub_info[1]
elif matchstateaction:
self.__AddStateActions(uml_params, matchstateaction)
line_num += 1
return uml_params, line_num
def __LineCleanup(self, line_string):
cleaned_string = re.sub(r'(?<!\\)\\n','\n',line_string)
cleaned_string = cleaned_string.replace('\\\\','\\').strip()
return cleaned_string
def __AddTransition(self, uml_params, matchtransition):
transition = self.TransitionType()
state_origin = matchtransition.group(1)
transition.destination = matchtransition.group(2)
text = matchtransition.group(3)
if text is not None:
text = text.split('\\ndo:\\n')
conditions = text[0]
transition.conditions = self.__LineCleanup(conditions)
if len(text) > 1:
actions = text[1] if text else None
transition.actions = self.__LineCleanup(actions)
#transition.actions = matchtransition.group(4)
#Check if state exits, if not, create it
if uml_params.states.get(state_origin) == None:
uml_params.states[state_origin] = self.StateType()
uml_params.states[state_origin].transitions.append(transition)
#Also, create destination state if it does not exist
if uml_params.states.get(transition.destination) == None:
uml_params.states[transition.destination] = self.StateType()
def __AddStateActions(self, uml_params, matchstateaction):
state_name = matchstateaction.group(1)
actions = matchstateaction.group(2)
if uml_params.states.get(state_name) == None:
uml_params.states[state_name] = self.StateType()
#Get entry, exit and during
if actions:
#Do a regex split
action_matches = re.split(r'(entry\:|during\:|exit\:)', actions)
#Replace \n by real \n and trim
action_matches = [self.__LineCleanup(line) for line in action_matches]
#The list will start with an empty string (or spaces) if it does not match entry
#any of the keywords. But if it starts with text it is a during
if action_matches[0].strip() != '':
uml_params.states[state_name].during = action_matches[0]
line_num = 1
while line_num < len(action_matches):
if action_matches[line_num] == 'entry:':
uml_params.states[state_name].entry = action_matches[line_num + 1]
line_num += 1
elif action_matches[line_num] == 'during:':
uml_params.states[state_name].during = action_matches[line_num + 1]
line_num += 1
elif action_matches[line_num] == 'exit:':
uml_params.states[state_name].exit = action_matches[line_num + 1]
line_num += 1
else:
raise Exception('Action {} not recognized.'.format(action_matches[line_num]))
line_num += 1
def GenerateFromTemplate(self, output_file, template_file, uml, uml_params):
env = Environment(
loader=FileSystemLoader(os.path.dirname(template_file))
)
template = env.get_template(os.path.basename(template_file))
with open(output_file, 'w') as out_file:
out_file.write(template.render(file_name=output_file, uml=uml,
uml_params=uml_params, get_submachines=self.GetSubmachineObjects,
get_basename=GetBaseName))
def GetSubmachineObjects(self, uml_object):
uml_submachines_list = []
for state in uml_object.states:
if len(uml_object.states[state].submachine) > 0:
for uml_submachine in uml_object.states[state].submachine:
#Set title of submachine as the name of state parent
uml_submachines_list.append(uml_submachine)
#Recursion to get more levels
uml_submachines_list += self.GetSubmachineObjects(uml_submachine)
return uml_submachines_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process PlantUML file to generate code')
parser.add_argument('--input','-i', required = True, dest = 'plantuml_file',
help ='Plant UML file from which to generate code')
parser.add_argument('--output','-o', required = True, dest = 'output_files',
help ='Code file generated. Separate by spaces in case of'
'more than one template', nargs='+')
parser.add_argument('--templates', '-t', dest = 'templates', default = '[templates/C_code.c,templates/C_code.h]',
help = 'Templates to be used separated by spaces', nargs='+')
parser.add_argument('--no-check', action = 'store_true',
help = 'This option is strongly discouraged. With this option'
'you are defining to not check that your PlantUML is valid.')
args = parser.parse_args()
plantuml_obj = PlantUMLCodeGeneration(args.plantuml_file)
#Transform templates to list
plantuml_obj.GenerateCode(args.output_files, args.templates)
|
"""
====================
Build image pyramids
====================
The ``pyramid_gaussian`` function takes an image and yields successive images
shrunk by a constant scale factor. Image pyramids are often used, e.g., to
implement algorithms for denoising, texture discrimination, and scale-
invariant detection.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.transform import pyramid_gaussian
image = data.astronaut()
rows, cols, dim = image.shape
pyramid = tuple(pyramid_gaussian(image, downscale=2))
composite_image = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)
composite_image[:rows, :cols, :] = pyramid[0]
i_row = 0
for p in pyramid[1:]:
n_rows, n_cols = p.shape[:2]
composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p
i_row += n_rows
fig, ax = plt.subplots()
ax.imshow(composite_image)
plt.show()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
from blank_interaction.services import BlankServiceInterface
from interaction.clients import Calculator
from interaction.types import Point
from thrift.py3.client import get_client, ClientType
from .run_interaction import run_interaction
class InteractionTest(unittest.TestCase):
def setUp(self) -> None:
self.interaction = run_interaction()
def init_client(self) -> Calculator:
return get_client(
Calculator,
port=self.interaction.getPort(),
host="::1",
client_type=ClientType.THRIFT_ROCKET_CLIENT_TYPE,
)
def tearDown(self) -> None:
self.interaction.reset()
def test_basic(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(1)
self.assertEqual(await add.getPrimitive(), 1)
point = await add.getPoint()
self.assertEqual(point.x, 0)
self.assertEqual(point.y, 0)
newPoint = Point(x=2, y=3)
await add.accumulatePoint(newPoint)
point = await add.getPoint()
self.assertEqual(point.x, 2)
self.assertEqual(point.y, 3)
await add.noop()
asyncio.run(inner_test())
def test_multiple_interactions(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(1)
self.assertEqual(await add.getPrimitive(), 1)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(2)
self.assertEqual(await add.getPrimitive(), 2)
asyncio.run(inner_test())
def test_multiple_clients(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(1)
self.assertEqual(await add.getPrimitive(), 1)
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 1), 1)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(2)
self.assertEqual(await add.getPrimitive(), 2)
asyncio.run(inner_test())
def test_terminate_unused(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
async with calc.createAddition() as _:
pass
asyncio.run(inner_test())
def test_terminate_client_error(self) -> None:
class SpecificError(Exception):
pass
async def inner_test() -> None:
try:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
add.accumulatePrimitive(1)
raise SpecificError("Generic error")
except SpecificError:
pass
else:
self.fail("Didn't throw SpecificError")
asyncio.run(inner_test())
|
from flask_socketio import SocketIO
from flask import Flask, make_response, request, session
from flask import render_template, session, url_for, redirect
from threading import RLock
from threading import Thread
from utilslib import list_to_HTML_table
from time import sleep
from ClientStorage import Clients, User
from gameObjects import Game, GameContainter, Player, ChatMmsg
from random import shuffle
#Init server
app = Flask(__name__, template_folder='templates', static_folder='static')
app.config['SECRET_KEY'] = 'lskwod=91230?=)ASD?=)("")@'
socketio = SocketIO(app, async_mode='threading')
timerLock = RLock()
asyncLock = RLock()
clients = Clients()
games = GameContainter()
debugging = False
@app.route('/', methods = ['POST', 'GET'])
@app.route('/index', methods = ['POST', 'GET'])
def index():
verbose = (False or debugging)
error = request.args.get('error')
return make_response(render_template('makeGame.html', title = "Welcome", cool = 123, error = error))
@app.route('/gameRoom', methods = ['POST', 'GET'])
def gameRoom():
global games
verbose = (False or debugging)
argumentsMakeGame = ['name', 'gameName', 'nrOfRounds', 'time', 'newGame']
argumentsJoinGame = ['name', 'gameName', 'newGame']
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
return redirect(url_for('index') + '?error=No user. Refreshing')
if (not user.gameObject):
data = request.form
#MAKE A NEW GAME
if data['newGame'] == 'yes':
if verbose: print('In server:gameRoom() nrOfRounds set!')
for key in data.keys():
argumentsMakeGame.remove(key)
if argumentsMakeGame:
return redirect(url_for('index') + '?error=Not enough arguments when creating the game')
if verbose: print('In server:gameRoom() arguments needed for making a game are present')
#Creating player and game
game = games.add_Game(gameName=data['gameName'], nrOfRounds=data['nrOfRounds'], timePerRound=data['time'])
player = game.add_Player(name=data['name'], userObject=user)
if (not player):
return redirect(url_for('index') + '?error=Player name already exists in this game...')
if verbose: print('In server:gameRoom() game created with the name {} and user/player added'.format(game.gameName))
#Join an existing game
else:
data = request.form
if verbose: print('In server:gameRoom() joining a game!')
for key in data.keys():
argumentsJoinGame.remove(key)
if argumentsJoinGame:
return redirect(url_for('index') + '?error=Not enough arguments when joining the game')
if verbose: print('In server:gameRoom() Searching for game: {}'.format(data['gameName']))
#Check if game exists
game = games.find_Game_By_Name(data['gameName'], verbose)
if (not game):
if verbose: print('The game was not found')
return redirect(url_for('index') + '?error=Game not found')
#Check if name already taken
for player in game.players:
if player.name == data['name']:
return redirect(url_for('index') + '?error=Name already taken')
player = game.add_Player(name=data['name'], userObject=user)
if verbose: print('In server:gameRoom() Player joined game')
if verbose: print('In server:gameRoom() game created and user/player added')
sendMessageToGame(game, '{} joined the game'.format(data['name']))
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
else:
if verbose: print('User alreade in game')
error = None
return make_response(render_template('gameRoom.html', title = "Game Room", gameName = user.gameObject.gameName, error = error))
@app.route('/gameRoomContent')
def gameRoomContent():
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if userNotComplete(user, verbose = (False or debugging)):
return 'ERROR: Something strange happened. Please leave game and rejoin'
game = user.gameObject
nrOfRounds = game.nrOfRounds
timePerRound = game.timePerRound
gameName = game.gameName
roundNr = game.currentRound
if (user.gameObject.get_Stage() == 'lobby'):
return render_template('lobbyContent.html',
gameName = gameName,
nrOfRounds = nrOfRounds,
timePerRound = timePerRound)
elif (user.gameObject.get_Stage() == 'roundStart'):
return render_template('roundContentStart.html',
timePerRound = timePerRound,
roundNr = roundNr,
nrOfRounds = nrOfRounds)
elif (user.gameObject.get_Stage() == 'roundSupply'):
game.spawnedThread = None
game.reset_Players_Ready()
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
print('GameContent:')
print(game.get_Search_Strings(user.playerObject))
return render_template('roundContentSupply.html',
nrOfPlayers = game.get_Nr_Of_Players(),
searchStrings = game.get_Search_Strings(user.playerObject),
nrOfEntries = game.nrOfEntry)
elif (user.gameObject.get_Stage() == 'roundVote'):
game.reset_Players_Ready()
return makeVoteContent(user)
elif (user.gameObject.get_Stage() == 'roundEnd'):
game.reset_Players_Ready()
return makeRoundEnd(user)
elif (user.gameObject.get_Stage() == 'gameSummary'):
game.reset_Players_Ready()
return render_template('gameContentSummary.html')
def makeVoteContent(user):
game = user.gameObject
playerObject = user.playerObject
notReady = False
voteEntries = game.get_Vote_Entries(playerObject)
return render_template('roundContentVote.html',
notReady = notReady,
voteEntries = voteEntries)
def makeRoundEnd(user):
game = user.gameObject
playerObject = user.playerObject
playersPoints = {}
for player in game.players:
playersPoints[player.name] = player.points
searchStrings = {}
for entry in game.entries:
searchStrings[entry.searchString] = {}
return render_template('roundContentEnd.html', playersPoints = playersPoints)
@app.route('/playerList')
def playerList():
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
verbose = (False or debugging)
if userNotComplete(user, verbose = (False or debugging)):
return redirect(url_for('index') + '?error=User not in game')
playerList = user.gameObject.get_Player_Names_And_Status()
if verbose: print('Got {} players'.format(len(playerList)))
return render_template('playerList.html', playerList = playerList)
@app.route('/chatContent')
def chatContent():
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if userNotComplete(user, verbose = (False or debugging)):
return redirect(url_for('index') + '?error=User not in game')
chat = user.gameObject.chatMessages
msgs = []
players = []
for msg in chat:
player, msg = msg.get_Player_And_Msg()
msgs.append(str(msg))
players.append(str(player))
if players:
players.reverse()
msgs.reverse()
return render_template('chat.html', players = players, chatMsg = msgs)
@app.route('/leave_Game')
def leaveGame():
verbose = (False or debugging)
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user')
return redirect(url_for('index'))
game = user.gameObject
game.remove_Player_By_User_Object(user)
name = user.playerObject.name
user.resetUser()
if len(game.players)<1:
games.removeGame(game=game, verbose = verbose)
else:
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
emitToGame(game = game, arg = ('client_warning',{'msg': name+' left the game'}), lock = timerLock)
print (len(games._games))
return redirect(url_for('index'))
@socketio.on('submit_entry')
def submitEntry(msg):
verbose = (False or debugging)
if verbose: print ('Entry reveived by the server')
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if verbose: print ('User retrieved')
if (not user):
if verbose: print('No user found when collecting the data')
return
if user.playerObject.entry:
if verbose: print('User already submitted.')
return
if verbose: print ('Setting entry for user')
user.gameObject.add_Entry(msg['searchString'], msg['suggestion'], user.playerObject)
if verbose: print('Got entry')
if user.gameObject.nrOfEntry >= user.gameObject.get_Nr_Of_Players():
emitToGame(game = user.gameObject, arg = ('refresh_div_content',{'div': 'entryList', 'cont': '/gameRoomContent'}), lock = timerLock)
@socketio.on('submit_supply')
def submitSupply(data):
verbose = (False or debugging)
if verbose: print ('\n---------------------\nSupply reveived by the server')
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user found when collecting the data')
return
game = user.gameObject
if verbose: print ('The data received is: {}'.format(data))
if verbose: print ('player {} found'.format(user.playerObject.name))
if (not data):
return
if verbose: print('')
if verbose: print('The actual data:')
for key, value in data.items():
if verbose: print('Key: {} \t Value: {}'.format(key, value))
if value == '':
continue
game.entries[int(key)].add_Autocomplete(value, user.playerObject)
if verbose: print('')
game.nrOfSupply += 1
if verbose: print ('The game has received {}nr of supplies\n---------------------\n'.format(game.nrOfSupply))
#All "supplies" are received
if user.gameObject.nrOfSupply >= user.gameObject.get_Nr_Of_Players():
if verbose: print ('We should now refresh the div content')
emitToGame(game = user.gameObject, arg = ('refresh_div_content', {'div': 'contentVote', 'cont': '/gameRoomContent'}), lock = timerLock)
#emitToGame(game = user.gameObject, arg = ('refresh_div_content',{'div': 'entryList', 'cont': '/gameRoomContent'}), lock = timerLock)
if verbose and False:
print('')
for entry in game.entries:
print('-------------------------------------------')
print('The entry with the serch string: \t {}\nHas the following autocompletes added:'.format(entry.searchString))
for supply in entry.otherAutocompletes:
print (supply.autoComplete)
print('-------------------------------------------')
print('')
@socketio.on('submit_favorite')
def submitFavorite(favorite):
print('The server received a favorite: {}'.format(favorite))
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
game = user.gameObject
autoComplete = game.get_Autocomlete_by_ID(favorite)
if (not autoComplete):
user.playerObject.points -= 1
return
user.playerObject.autocompleteVotedFor = autoComplete
if (autoComplete.isGoogle):
user.playerObject.points += 1
return
autoComplete.playerObject.points += 1
return
@socketio.on('toggle_ready')
def toggleReady(msg):
verbose = (True or debugging)
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user found when toggling ready')
return
player = user.playerObject
if (not player):
if verbose: print('No player found for the user/client.')
player.ready = not player.ready
game = player.gameObject
#A game object will always exist if there is a playerObject
emitToGame(game = game, arg = ('refresh_Player_List',{}), lock = timerLock)
playersReady = game.all_Players_Ready()
if verbose: print ('STAGE:', game.get_Stage())
#Start round
if playersReady and game.gameStarted == False and not game.spawnedThread:
game.gameStarted = True
game.reset_Players_Ready()
emitToGame(game = game, arg = ('change_content', {'url':'/gameRoomContent'}), lock = timerLock)
emitToGame(game = game, arg = ('client_message', {'msg':'Game started. Have fun!'}), lock = timerLock)
#Start timer
game.spawnedThread = RoundTimer(int(game.timePerRound), user)
game.spawnedThread.start()
return
#End round
if playersReady and game.get_Stage() == 'roundStart':
if verbose: print ('Round ended by users')
user.gameObject.end_Stage()
game.reset_Players_Ready()
if verbose: print('Current stage of game is: {}'.format(user.gameObject.get_Stage()))
emitToGame(game = user.gameObject, arg = ('round_End', {}), lock = timerLock)
emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock)
return
#End supply
if playersReady and game.get_Stage() == 'roundSupply':
user.gameObject.end_Stage()
game.reset_Players_Ready()
emitToGame(game = user.gameObject, arg = ('supply_End', {'nrOfEntries': user.gameObject.nrOfEntry}), lock = timerLock)
emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock)
return
#End vote
if playersReady and game.get_Stage() == 'roundVote':
user.gameObject.end_Stage()
game.reset_Players_Ready()
emitToGame(game = user.gameObject, arg = ('vote_End', {}), lock = timerLock)
emitToGame(game = user.gameObject, arg = ('client_message', {'msg':'Vote ended'}), lock = timerLock)
return
class RoundTimer(Thread):
def __init__(self, timeToWait, user):
Thread.__init__(self)
self.timeToWait = timeToWait
self.user = user
def run(self):
sleep(self.timeToWait)
if (not self.user.gameObject) or (self.user.gameObject.roundEnded):
return
self.user.gameObject.end_Stage()
emitToGame(game = self.user.gameObject, arg = ('round_End', {'url':'/gameRoomContent'}), lock = timerLock)
emitToGame(game = self.user.gameObject, arg = ('client_message', {'msg':'Round ended'}), lock = timerLock)
return
@socketio.on('handle_chat')
def handleChat(msg):
#update_chat
verbose = (False or debugging)
uniqueID = request.cookies.get('uniqueID')
user = clients.find_User_By_uniqueID(uniqueID)
if (not user):
if verbose: print('No user')
return redirect(url_for('index'))
game = user.gameObject
if (not game):
if verbose: print('No game found when handling chat')
return
game.add_Chat_Msg(chatMsg=msg, playerName=user.playerObject.name)
emitToGame(game=game, arg=('update_chat',{}), lock=timerLock)
@socketio.on('connected')
def client_connect():
verbose = (False or debugging)
'''
I need to identify the user. If the user reloads, the session ID will change.
A unique user-key is provisided for each new user, and the session ID is updated
when the user reconnects. The unique ID is stored in a cookie.
'''
if verbose: print('Someone connected with the IP: {}'.format(request.remote_addr))
uniqueID = request.cookies.get('uniqueID')
if verbose: print('\nUnique ID before update: {}'.format(uniqueID))
if uniqueID:
if verbose: print('Unique ID cookie found')
user = clients.find_User_By_uniqueID(uniqueID)
if user:
if verbose: print('User found')
if request.sid != user.sid:
user.sid = request.sid
if verbose: print('Updated the SID')
else:
user = clients.add_User(sid=request.sid)
if verbose: print('User created')
user.uniqueID = uniqueID
if verbose: print('Unique ID updated')
else:
if verbose: print('Made a new user')
user = clients.add_User(sid=request.sid)
if verbose: print('Emitted to server: set_cookie')
emit(arg=('set_cookie', {'name': 'uniqueID' , 'data': user.uniqueID}), uniqueID = None, lock = timerLock, user= user)
def sendMessageToGame(game, msg):
for player in game.players:
emit(arg = ('client_message', {'msg': msg}), uniqueID = None, lock = timerLock, user= player.userObject)
def emitToGame(arg, game, lock):
for player in game.players:
emit(arg = arg, uniqueID = None, lock = lock, user = player.userObject)
def emit(arg, uniqueID, lock, user = None):
'''
An emit method that requires a lock. Dunno if I need this...
TODO: Find out if i need the lock.
'''
verbose = (False or debugging)
with lock:
if verbose: print ('Did an emit')
if (not user):
userSID = clients.find_User_By_uniqueID(uniqueID).sid
else:
userSID = user.sid
socketio.emit(*arg, room = userSID)
def userNotComplete(user, verbose = (False or debugging)):
if verbose:
print('\nUser name: {}'.format(user.name))
print('User gameObject pointer {}'.format(user.gameObject))
print('User playerObject pointer {}\n'.format(user.playerObject))
if ((not user) or (not user.gameObject) or (not user.playerObject)):
return True
else:
return False
if __name__ == "__main__":
socketio.run(app, debug = False)
|
from torch.utils.data import Dataset, DataLoader
import glob
import os
import numpy as np
import cv2
import torch
from torchvision import transforms, utils
from skimage.transform import resize
class SegDataset(Dataset):
"""Segmentation Dataset"""
def __init__(self, root_dir, imageFolder, maskFolder, transform=None, seed=None, fraction=None, subset=None, imagecolormode='rgb', maskcolormode='grayscale'):
"""
Args:
root_dir (string): Directory with all the images and should have the following structure.
root
--Images
-----Img 1
-----Img N
--Mask
-----Mask 1
-----Mask N
imageFolder (string) = 'Images' : Name of the folder which contains the Images.
maskFolder (string) = 'Masks : Name of the folder which contains the Masks.
transform (callable, optional): Optional transform to be applied on a sample.
seed: Specify a seed for the train and test split
fraction: A float value from 0 to 1 which specifies the validation split fraction
subset: 'Train' or 'Test' to select the appropriate set.
imagecolormode: 'rgb' or 'grayscale'
maskcolormode: 'rgb' or 'grayscale'
"""
self.color_dict = {'rgb': 1, 'grayscale': 0}
assert(imagecolormode in ['rgb', 'grayscale'])
assert(maskcolormode in ['rgb', 'grayscale'])
self.imagecolorflag = self.color_dict[imagecolormode]
self.maskcolorflag = self.color_dict[maskcolormode]
self.root_dir = root_dir
self.transform = transform
if not fraction:
self.image_names = sorted(
glob.glob(os.path.join(self.root_dir, imageFolder, '*')))
self.mask_names = sorted(
glob.glob(os.path.join(self.root_dir, maskFolder, '*')))
else:
assert(subset in ['Train', 'Test'])
self.fraction = fraction
self.image_list = np.array(
sorted(glob.glob(os.path.join(self.root_dir, imageFolder, '*'))))
self.mask_list = np.array(
sorted(glob.glob(os.path.join(self.root_dir, maskFolder, '*'))))
if seed:
np.random.seed(seed)
indices = np.arange(len(self.image_list))
np.random.shuffle(indices)
self.image_list = self.image_list[indices]
self.mask_list = self.mask_list[indices]
if subset == 'Train':
self.image_names = self.image_list[:int(
np.ceil(len(self.image_list)*(1-self.fraction)))]
self.mask_names = self.mask_list[:int(
np.ceil(len(self.mask_list)*(1-self.fraction)))]
else:
self.image_names = self.image_list[int(
np.ceil(len(self.image_list)*(1-self.fraction))):]
self.mask_names = self.mask_list[int(
np.ceil(len(self.mask_list)*(1-self.fraction))):]
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img_name = self.image_names[idx]
if self.imagecolorflag:
image = cv2.imread(
img_name, self.imagecolorflag).transpose(2, 0, 1)
else:
image = cv2.imread(img_name, self.imagecolorflag)
msk_name = self.mask_names[idx]
if self.maskcolorflag:
mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1)
else:
mask = cv2.imread(msk_name, self.maskcolorflag)
sample = {'image': image, 'mask': mask}
if self.transform:
sample = self.transform(sample)
return sample
# Define few transformations for the Segmentation Dataloader
class Resize(object):
"""Resize image and/or masks."""
def __init__(self, imageresize, maskresize):
self.imageresize = imageresize
self.maskresize = maskresize
def __call__(self, sample):
image, mask = sample['image'], sample['mask']
if len(image.shape) == 3:
image = image.transpose(1, 2, 0)
if len(mask.shape) == 3:
mask = mask.transpose(1, 2, 0)
mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA)
#mask = 256 * resize(mask, (256, 256), anti_aliasing = True)
image = cv2.resize(image, self.imageresize, cv2.INTER_AREA)
#image = 256 * resize(image, (256, 256), anti_aliasing = True)
if len(image.shape) == 3:
image = image.transpose(2, 0, 1)
if len(mask.shape) == 3:
mask = mask.transpose(2, 0, 1)
return {'image': image,
'mask': mask}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, maskresize=None, imageresize=None):
image, mask = sample['image'], sample['mask']
if len(mask.shape) == 2:
mask = mask.reshape((1,)+mask.shape)
if len(image.shape) == 2:
image = image.reshape((1,)+image.shape)
return {'image': torch.from_numpy(image),
'mask': torch.from_numpy(mask)}
class Normalize(object):
'''Normalize image'''
def __call__(self, sample):
image, mask = sample['image'], sample['mask']
return {'image': image.type(torch.FloatTensor)/255,
'mask': mask.type(torch.FloatTensor)/255}
def get_dataloader_single_folder(data_dir, imageFolder='Images', maskFolder='Masks', fraction=0.2, batch_size=4):
"""
Create training and testing dataloaders from a single folder.
"""
data_transforms = {
'Train': transforms.Compose([Resize((256, 256), (256, 256)), ToTensor(), Normalize()]),
'Test': transforms.Compose([Resize((256,256), (256, 256)), ToTensor(), Normalize()]),
}
image_datasets = {x: SegDataset(data_dir, imageFolder=imageFolder, maskFolder=maskFolder, seed=100, fraction=fraction, subset=x, transform=data_transforms[x])
for x in ['Train', 'Test']}
dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size,
shuffle=True, num_workers=8)
for x in ['Train', 'Test']}
return dataloaders
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pytest
import vtk
import numpy as np
import sksurgeryvtk.utils.polydata_utils as pdu
import sksurgeryvtk.models.vtk_surface_model as vbs
def test_overlapping_bounds():
radius_0=10.0
radius_1=7.0
centre_1=5.0
radius_2=4.0
centre_2=15.0
radius_3=4.0
centre_3=0.0
sphere_0 = vtk.vtkSphereSource()
sphere_0.SetRadius(radius_0)
sphere_0.SetPhiResolution(12)
sphere_0.SetThetaResolution(12)
sphere_0.SetCenter(0.0, 0.0, 0.0)
sphere_0.Update()
vtk_model_0 = sphere_0.GetOutput()
sphere_1 = vtk.vtkSphereSource()
sphere_1.SetRadius(radius_1)
sphere_1.SetPhiResolution(12)
sphere_1.SetThetaResolution(21)
sphere_1.SetCenter(centre_1, 0.0, 0.0)
sphere_1.Update()
vtk_model_1 = sphere_1.GetOutput()
sphere_2 = vtk.vtkSphereSource()
sphere_2.SetRadius(radius_2)
sphere_2.SetPhiResolution(12)
sphere_2.SetThetaResolution(21)
sphere_2.SetCenter(centre_2, 0.0, 0.0)
sphere_2.Update()
vtk_model_2 = sphere_2.GetOutput()
sphere_3 = vtk.vtkSphereSource()
sphere_3.SetRadius(radius_3)
sphere_3.SetPhiResolution(12)
sphere_3.SetThetaResolution(21)
sphere_3.SetCenter(centre_3, 0.0, 0.0)
sphere_3.Update()
vtk_model_3 = sphere_3.GetOutput()
assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_1))
assert (pdu.check_overlapping_bounds( vtk_model_1, vtk_model_0))
assert (not pdu.check_overlapping_bounds( vtk_model_0, vtk_model_2))
assert (not pdu.check_overlapping_bounds( vtk_model_2, vtk_model_0))
assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_3))
assert (pdu.check_overlapping_bounds( vtk_model_3, vtk_model_0))
def test_dice_overlap():
radius_0=10.0
radius_1=7.0
centre_1=5.0
sphere_0 = vtk.vtkSphereSource()
sphere_0.SetRadius(radius_0)
sphere_0.SetPhiResolution(60)
sphere_0.SetThetaResolution(60)
sphere_0.SetCenter(0.0, 0.0, 0.0)
sphere_0.Update()
vtk_model_0 = sphere_0.GetOutput()
sphere_1 = vtk.vtkSphereSource()
sphere_1.SetRadius(radius_1)
sphere_1.SetPhiResolution(60)
sphere_1.SetThetaResolution(60)
sphere_1.SetCenter(centre_1, 0.0, 0.0)
sphere_1.Update()
vtk_model_1 = sphere_1.GetOutput()
dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1)
np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2)
np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2)
#from http://mathworld.wolfram.com/Sphere-SphereIntersection.html
cap_height_0 = ( radius_1 - radius_0 + centre_1) * ( radius_1 + radius_0 - centre_1) / (2 * centre_1)
cap_height_1 = ( radius_0 - radius_1 + centre_1) * ( radius_0 + radius_1 - centre_1) / (2 * centre_1)
cap_vol_0 = np.pi * cap_height_0**2 * ( 3 * radius_0 - cap_height_0) / 3
cap_vol_1 = np.pi * cap_height_1**2 * ( 3 * radius_1 - cap_height_1) / 3
analytic = cap_vol_0 + cap_vol_1
np.testing.assert_approx_equal(volume_01, analytic, significant=2)
np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)
def test_dice_no_overlap():
radius_0=5.5
radius_1=4.3
centre_1=12.0
sphere_0 = vtk.vtkSphereSource()
sphere_0.SetRadius(radius_0)
sphere_0.SetPhiResolution(60)
sphere_0.SetThetaResolution(60)
sphere_0.SetCenter(0.0, 0.0, 0.0)
sphere_0.Update()
vtk_model_0 = sphere_0.GetOutput()
sphere_1 = vtk.vtkSphereSource()
sphere_1.SetRadius(radius_1)
sphere_1.SetPhiResolution(60)
sphere_1.SetThetaResolution(60)
sphere_1.SetCenter(centre_1, 0.0, 0.0)
sphere_1.Update()
vtk_model_1 = sphere_1.GetOutput()
dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1)
np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2)
np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2)
analytic = 0.0
np.testing.assert_approx_equal(volume_01, analytic, significant=2)
np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)
|
import string
import random
# --- Defining Variables ---
LOWER_ALPHABET = list(string.ascii_lowercase)
DIGITS = list(string.digits)
UPPER_ALPHABET = list(string.ascii_uppercase)
SYMBOLS = list(string.punctuation)
SYMBOLS_DELETE = ['"', "'", "(", ")", ",", ".", ":", ";", "[", "]", "|", "`", "{", "}"]
for x in SYMBOLS_DELETE:
SYMBOLS.remove(x)
CHAR_TYPES = [LOWER_ALPHABET, DIGITS] # characters used as default
# --- PROGRAM INTRO ---
print("""
#############################################################
# --- Password Generator --- #
#############################################################
# Language: Python #
#############################################################
# #
# This is my very first project with Python #
# Lowercase characteres and digits are used as default #
# #
#############################################################
""")
# --- LENGTH QUESTION ---
while True:
print("Password Length (Min: 8 / Max: 48):")
pass_len = input()
try:
pass_len = int(pass_len)
if pass_len >= 8 and pass_len <= 48:
break
else:
print("\nYou should insert a number between 8 and 16.\n")
except ValueError:
# In case of the user insert a value that cannot be turned into a 'int' type
print("\nYou should insert a NUMBER between 8 and 16.\n")
# --- UPPERCASE AND SYMBOLS QUESTION FUNCTION ---
def question_checker(phrase, char_type):
"""Check if the user inserts a valid value on the upper case and symbols question.
Then append the specific char type list if he answer is "Yes"
"""
while True:
print("")
print(phrase)
answer = input().strip().capitalize()
if answer == "Yes" or answer == "No":
break
else:
print("\nInvalid Value.\n")
def char_assignment(char_check, char_type):
if char_check == "Yes":
return CHAR_TYPES.append(char_type)
else:
pass
char_assignment(answer, char_type)
# --- ASSIGNING UPPERCASE AND/OR SYMBOLS CHARACTERS INTO THE CHAR_TYPES LIST. ---
question_checker("Do you want uppercase letters? [Yes/No]", UPPER_ALPHABET)
question_checker("Do you want symbols? [Yes/No]", SYMBOLS)
# --- CREATE THE PASSWORD ---
def create_password():
password_list = []
for x in range(len(CHAR_TYPES)):
password_list.append(CHAR_TYPES[x][random.randrange(len(CHAR_TYPES[x]))]) # making at least one of all the char types appear in the password
for x in range(pass_len - len(CHAR_TYPES)):
random_chartype = random.randrange(len(CHAR_TYPES))
password_list.append(CHAR_TYPES[random_chartype][random.randrange(len(CHAR_TYPES[random_chartype]))]) # the spaces that remained will be filled with random characteres
random.shuffle(password_list)
password = "".join(password_list)
return password
# --- SHOW OUTPUT ---
def show_password():
print("\n")
print(f"Password: {create_password()} ")
print("\n")
show_password()
# --- REMAKE THE PASSWORD ---
while True:
print("Remake the password? [Yes/No]")
answer = input().strip().capitalize()
if answer == "Yes" or answer == "No":
if answer == "Yes":
show_password()
else:
print("\n")
break
else:
print("\nInvalid Value.\n")
|
"""
An implementation for the gilded rose kata as I understand it.
https://github.com/NotMyself/GildedRose
"""
from collections import namedtuple
import unittest as ut
from ruleta import Rule, ActionSet
from ruleta.combinators import ALSO
import re
ItemRecord = namedtuple("ItemRecord",["name", "quality", "quality_change", "sellin" ] )
def print_through(label, condition):
def print_through_(input_):
val=condition(input_)
print(label, val)
return val
return print_through_
def set_quality_change(val):
return lambda item_record: item_record._replace(quality_change=val)
def sellby_date_passed(item_record):
return item_record.sellin <=0
def multiply_quality_change(val):
return lambda item_record: item_record._replace(quality_change = item_record.quality_change*val )
def does_item_degrade (item_record):
return item_record.quality_change <0
def is_item_conjured(item_record ):
return bool(re.match("conjured", item_record.name))
def is_aged_brie(item_record):
return item_record.name == "Aged Brie"
def is_sulfuras(item_record):
return item_record.name == "Sulfuras"
def is_backstage_passes(item_record):
return item_record.name == "Backstage passes"
def days_until_sellby(condition):
return lambda item_record: condition(item_record.sellin)
def leq(val):
return lambda input_ : input_ <= val
def geq(val):
return lambda input_ : input_ >= val
double_degradation = Rule(does_item_degrade, multiply_quality_change(2))
def set_quality(val):
return lambda item_record: item_record._replace(quality=val)
def do_nothing(item_record):
return item_record
def compare_quality(condition ):
return lambda item_record : condition(item_record.quality)
# Rulesets
"""
The rules as written:
`
All items have a SellIn value which denotes the number of days we have to sell the item
All items have a Quality value which denotes how valuable the item is
At the end of each day our system lowers both values for every item
Once the sell by date has passed, Quality degrades twice as fast
The Quality of an item is never negative
"Aged Brie" actually increases in Quality the older it gets
The Quality of an item is never more than 50
"Sulfuras", being a legendary item, never has to be sold or decreases in Quality
"Backstage passes", like aged brie, increases in Quality as it's SellIn value approaches; Quality increases by 2 when there are 10 days or less and by 3 when there are 5 days or less but Quality drops to 0 after the concert
'
Just for clarification, an item can never have its Quality increase above 50, however "Sulfuras" is a legendary item and as such its Quality is 80 and it never alters.
"""
"""
The Rules as I understand them
The basic rules for the quality of all items are:
every day the quality degrades by 1
if the sellby date has passed the degradiation is doubled
also if the item is conjured the degradiation is doubled also/again
but when the item is "Sulfuras" then it quality never changes
but also when the item is "Aged Brie" then the quality increases by every day by 1.
but also when the item is "Backstage Passes" the the quality changes according to the following rules:
the quality increases by 1 every day
but if the sell by date is 10 days or less away, the quality increases by 2 each day
but if the sell by date is even 5 days or less away the quality increases by 3 each day instead
if the sellby date has passed, the quality is zero and never changes
independent of above rules the quality of an item would be below zero it is zero instead.
but if the items quality would be above 50 it is 50 instead.
but if the item is "Sulfuras" the quality is always 80
"""
basic_degradiation_rules= ActionSet(set_quality_change(-1))\
.also(Rule(sellby_date_passed, double_degradation))\
.also(Rule(is_item_conjured, double_degradation))
backstage_pass_rules = ActionSet(set_quality_change(+1))\
.but(Rule( days_until_sellby(leq(10) ), set_quality_change(+2)))\
.but(Rule( days_until_sellby(leq(5) ), set_quality_change(+3)))\
.but(Rule( sellby_date_passed, ALSO(set_quality(0),set_quality_change(0))))
extended_degradiation_rules = ActionSet(basic_degradiation_rules)\
.but(Rule(is_aged_brie, set_quality_change(+1)) )\
.but(Rule(is_sulfuras, set_quality_change(0)))\
.but(Rule( is_backstage_passes, backstage_pass_rules ))
bracketing_rules = ActionSet(do_nothing)\
.but(Rule(compare_quality(leq(0)), set_quality(0)))\
.but(ActionSet(Rule(compare_quality(geq(50) ), set_quality(50))))
.but(Rule(is_sulfuras, set_quality(80)))
class GildedRose:
def __init__(self, items):
self._items = items
def update_quality(self):
for i in range(0,len(self._items)):
self._items[i] = self._update_item(self._items[i])
def _update_item(self, item):
item_record = extended_degradiation_rules(
ItemRecord( item.name, item.quality, 0, item.sellin) )
item_record = bracketing_rules( item_record._replace(quality=item_record.quality+item_record.quality_change ) )
return Item(item_record.name, max(item_record.sellin-1,0), item_record.quality)
class Item:
def __init__(self, name, sellin, quality):
self.name = name
self.sellin = sellin
self.quality = quality
def __repr__(self):
return "%s, %s, %s" % (self.name, self.sellin, self.quality)
class TestGildedRose(ut.TestCase):
def test_standard_item(self):
gilded_rose = GildedRose([Item("a Sword", 100, 5)])
gilded_rose.update_quality( )
self.assertEqual( ["a Sword, 99, 4"], list(map(repr,gilded_rose._items)))
def test_conjured_item(self):
gilded_rose = GildedRose([Item("conjured Sword", 100, 5)])
gilded_rose.update_quality( )
self.assertEqual( ["conjured Sword, 99, 3"], list(map(repr,gilded_rose._items)))
def test_minimum_quality(self):
gilded_rose = GildedRose([Item("a Sword", 100, 0)])
gilded_rose.update_quality( )
self.assertEqual( ["a Sword, 99, 0"], list(map(repr,gilded_rose._items)))
def test_backstage_passes_10_days(self):
gilded_rose = GildedRose([Item("Backstage passes", 10, 5)])
gilded_rose.update_quality( )
self.assertEqual( ["Backstage passes, 9, 7"], list(map(repr,gilded_rose._items)))
def test_backstage_passes_5_days(self):
gilded_rose = GildedRose([Item("Backstage passes", 5, 5)])
gilded_rose.update_quality( )
self.assertEqual( ["Backstage passes, 4, 8"], list(map(repr,gilded_rose._items)))
def test_backstage_passes_0_days(self):
gilded_rose = GildedRose([Item("Backstage passes", 0, 5)])
gilded_rose.update_quality( )
self.assertEqual( ["Backstage passes, 0, 0"], list(map(repr,gilded_rose._items)))
if __name__ == "__main__":
ut.main()
|
# type: ignore
import json
import uuid
from json import JSONDecodeError
from typing import Tuple, Dict, List
import boto3
from melange.drivers.interfaces import Queue, Topic, MessagingDriver, Message
class AWSDriver(MessagingDriver):
def __init__(self, **kwargs):
super().__init__()
self.max_number_of_messages = kwargs.get("max_number_of_messages", 10)
self.visibility_timeout = kwargs.get("visibility_timeout", 100)
self.wait_time_seconds = kwargs.get("wait_time_seconds", 10)
def declare_topic(self, topic_name) -> Topic:
sns = boto3.resource("sns")
topic = sns.create_topic(Name=topic_name)
return topic
def get_queue(self, queue_name) -> Queue:
sqs_res = boto3.resource("sqs")
return sqs_res.get_queue_by_name(QueueName=queue_name)
def declare_queue(
self,
queue_name: str,
*topics_to_bind: Topic,
dead_letter_queue_name: str = None,
**kwargs
) -> Tuple[Queue, Queue]:
try:
queue = self.get_queue(queue_name)
except Exception:
queue = self._create_queue(queue_name, content_based_deduplication="true")
if topics_to_bind:
statements = []
for topic in topics_to_bind:
statement = {
"Sid": "Sid{}".format(uuid.uuid4()),
"Effect": "Allow",
"Principal": "*",
"Resource": queue.attributes["QueueArn"],
"Action": "sqs:SendMessage",
"Condition": {"ArnEquals": {"aws:SourceArn": topic.arn}},
}
statements.append(statement)
subscription = topic.subscribe(
Protocol="sqs",
Endpoint=queue.attributes[
"QueueArn"
], # , Attributes={"RawMessageDelivery": "true"}
)
if kwargs.get("filter_events"):
filter_policy = {"event_type": kwargs["filter_events"]}
else:
filter_policy = {}
subscription.set_attributes(
AttributeName="FilterPolicy",
AttributeValue=json.dumps(filter_policy),
)
policy = {
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": statements,
}
queue.set_attributes(Attributes={"Policy": json.dumps(policy)})
dead_letter_queue = None
if dead_letter_queue_name:
try:
dead_letter_queue = self.get_queue(dead_letter_queue_name)
except Exception:
dead_letter_queue = self._create_queue(
dead_letter_queue_name, content_based_deduplication="true"
)
redrive_policy = {
"deadLetterTargetArn": dead_letter_queue.attributes["QueueArn"],
"maxReceiveCount": "4",
}
queue.set_attributes(
Attributes={"RedrivePolicy": json.dumps(redrive_policy)}
)
return queue, dead_letter_queue
def _create_queue(self, queue_name: str, **kwargs) -> Queue:
sqs_res = boto3.resource("sqs")
fifo = queue_name.endswith(".fifo")
attributes = {}
if fifo:
attributes["FifoQueue"] = "true"
attributes["ContentBasedDeduplication"] = (
"true" if kwargs.get("content_based_deduplication") else "false"
)
queue = sqs_res.create_queue(QueueName=queue_name, Attributes=attributes)
return queue
def retrieve_messages(self, queue: Queue, attempt_id=None) -> List[Message]:
kwargs = dict(
MaxNumberOfMessages=self.max_number_of_messages,
VisibilityTimeout=self.visibility_timeout,
WaitTimeSeconds=self.wait_time_seconds,
MessageAttributeNames=["All"],
AttributeNames=["All"],
)
if attempt_id:
kwargs["ReceiveRequestAttemptId"] = attempt_id
messages = queue.receive_messages(**kwargs)
# We need to differentiate here whether the message came from SNS or SQS
return [self._construct_message(message) for message in messages]
def queue_publish(
self,
content: str,
queue,
event_type_name: str = None,
message_group_id: str = None,
message_deduplication_id: str = None,
):
kwargs = dict(MessageBody=json.dumps({"Message": content}))
if event_type_name:
kwargs["MessageAttributes"] = {
"event_type": {"DataType": "String", "StringValue": event_type_name}
}
if message_group_id:
kwargs["MessageGroupId"] = message_group_id
if message_deduplication_id:
kwargs["MessageDeduplicationId"] = message_deduplication_id
queue.send_message(**kwargs)
def publish(
self,
content: str,
topic: Topic,
event_type_name: str,
extra_attributes: Dict = None,
):
args = dict(
Message=content,
MessageAttributes={
"event_type": {"DataType": "String", "StringValue": event_type_name}
},
)
if extra_attributes:
if "subject" in extra_attributes:
args["Subject"] = extra_attributes["subject"]
if "message_attributes" in extra_attributes:
args["MessageAttributes"].update(extra_attributes["message_attributes"])
if "message_structure" in extra_attributes:
args["MessageStructure"] = extra_attributes["message_structure"]
response = topic.publish(**args)
if "MessageId" not in response:
raise ConnectionError("Could not send the event to the SNS TOPIC")
def acknowledge(self, message: Message) -> None:
message.metadata.delete()
def close_connection(self) -> None:
pass
def delete_queue(self, queue: Queue) -> None:
queue.delete()
def delete_topic(self, topic: Topic) -> None:
topic.delete()
def _construct_message(self, message) -> Message:
body = message.body
manifest = ""
try:
message_content = json.loads(body)
if "Message" in message_content:
content = message_content["Message"]
# Does the content have more attributes? If so, it is very likely that the message came from a non-raw
# SNS redirection
if "MessageAttributes" in message_content:
manifest = (
message_content["MessageAttributes"]
.get("event_type", {})
.get("Value")
or ""
)
else:
content = message_content
except JSONDecodeError:
content = body
manifest = (
manifest
or message.message_attributes.get("event_type", {}).get("StringValue")
or ""
)
return Message(message.message_id, content, message, manifest)
|
'''
Created on 23.08.2017
@author: falensaa
'''
import logging
import sys
import imsnpars.nparser.features
import imsnpars.nparser.network
import imsnpars.nparser.graph.features as gfeatures
from imsnpars.nparser.graph import task, decoder
from imsnpars.nparser.graph.mst import cle
from imsnpars.nparser.labels import task as ltask
def buildMSTDecoder(opts, featBuilder):
if opts.mst == "CLE":
mstAlg = cle.ChuLiuEdmonds()
decod = decoder.FirstOrderDecoder(featBuilder)
else:
logging.error("Unknown algorithm: %s" % opts.mst)
sys.exit()
logging.info("Graph system used: %s" % type(mstAlg))
logging.info("Decoder used: %s" % type(decod))
return mstAlg, decod
def buildGraphFeatureExtractors(featuresD, reprDim):
featIds = { ("h", "0"): gfeatures.FeatId.HEAD,
("d", "0"): gfeatures.FeatId.DEP,
("h", "1"): gfeatures.FeatId.HEAD_P_1,
("h", "2"): gfeatures.FeatId.HEAD_P_2,
("d", "1"): gfeatures.FeatId.DEP_P_1,
("d", "2"): gfeatures.FeatId.DEP_P_2,
("h", "-1"): gfeatures.FeatId.HEAD_M_1,
("h", "-2"): gfeatures.FeatId.HEAD_M_2,
("d", "-1"): gfeatures.FeatId.DEP_M_1,
("d", "-2"): gfeatures.FeatId.DEP_M_2,
("dist", "0") : gfeatures.FeatId.DIST }
mainFeatIds = {"h": gfeatures.FeatId.HEAD,
"d": gfeatures.FeatId.DEP }
featureExtractors = { }
featureBuilders = { }
for feat in featuresD:
if "+" in feat:
name, shift = feat.split("+")
elif "-" in feat:
name, shift = feat.split("-")
shift = "-" + shift
else:
name, shift = feat, "0"
featId = featIds.get((name, shift))
if featId == None:
logging.error("Unknown token id: %s" % feat)
sys.exit()
# for now there is only one builder -- distance
if featId == gfeatures.FeatId.DIST:
featureBuilders[featId] = gfeatures.DistFeatureBuilder(reprDim)
else:
mainFeature = mainFeatIds[name]
if mainFeature not in featureExtractors:
featureExtractors[mainFeature] = gfeatures.TokenFeatExtractor()
featureExtractors[mainFeature].addShift(featId, int(shift))
return featureExtractors, featureBuilders
def buildGraphParser(opts, dummyBuilder, reprBuilder):
reprDim = reprBuilder.getDim()
tokExtractors, featBuilders = buildGraphFeatureExtractors(opts.features, reprDim)
extractor = gfeatures.GraphFeatureExtractor(tokExtractors)
featIds = extractor.getFeatIds() + [ feat.getFeatId() for feat in featBuilders.values() ]
network = imsnpars.nparser.network.ParserNetwork(opts.mlpHiddenDim, opts.nonLinFun, featIds)
featBuilder = imsnpars.nparser.features.FeatReprBuilder(extractor, featBuilders, dummyBuilder, network, opts.parseLayer)
mstAlg, decod = buildMSTDecoder(opts, featBuilder)
if opts.labeler == "graph":
lblDict = ltask.LblTagDict()
parsingTask = task.NNGraphParsingTaskWithLbl(mstAlg, featBuilder, decod, network, opts.augment, lblDict)
else:
parsingTask = task.NNGraphParsingTask(mstAlg, featBuilder, decod, network, opts.augment)
return parsingTask
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.22
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1EventList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1Event]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1EventList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1EventList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1EventList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1EventList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1EventList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1EventList. # noqa: E501
items is a list of schema objects. # noqa: E501
:return: The items of this V1beta1EventList. # noqa: E501
:rtype: list[V1beta1Event]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1EventList.
items is a list of schema objects. # noqa: E501
:param items: The items of this V1beta1EventList. # noqa: E501
:type: list[V1beta1Event]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1EventList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1EventList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1EventList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1EventList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1EventList. # noqa: E501
:return: The metadata of this V1beta1EventList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1EventList.
:param metadata: The metadata of this V1beta1EventList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1EventList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1EventList):
return True
return self.to_dict() != other.to_dict()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Bout (read bank-out) extracts transactions from pdf bank statements.
_ _
(_) (_)
(_) _ _ _ _ _ _ _ _ _ (_) _ _
(_)(_)(_)(_)_ _ (_)(_)(_) _ (_) (_)(_)(_)(_)(_)
(_) (_)(_) (_)(_) (_) (_)
(_) (_)(_) (_)(_) (_) (_) _
(_) _ _ _(_)(_) _ _ _ (_)(_)_ _ _(_)_ (_)_ _(_)
(_)(_)(_)(_) (_)(_)(_) (_)(_)(_) (_) (_)(_)
"""
import io
import logging
import click
import csv
from collections import namedtuple
from datetime import datetime
logger = logging.getLogger("bout")
profiles = {}
Transaction = namedtuple("Transaction",
["id", "date", "payee", "memo", "amount"])
InvalidTransaction = namedtuple("InvalidTransaction", [])
def get_icici_csv(data_row):
"""Convert a transaction row to tuple.
Details of fields
0: 'D', # Transaction date
2: 'M', # Transaction details
3: 'T', # Deposit
4: 'T-', # Withdrawal
"""
logger.debug("get_icicicsv: Data row = {}".format(data_row))
date = data_row[0].replace('-', '/')
if _valid_date(date):
amt = "-{}".format(data_row[4])
if data_row[3] != "0":
amt = data_row[3]
return Transaction(id=0,
date=date,
payee="", # Empty for ICICI bank account
memo=data_row[2],
amount=amt)
return InvalidTransaction()
def get_icicicc_csv(data_row):
"""Convert a transaction row to tuple.
Details of fields
0: 'D', # Transaction date
2: 'M', # Transaction details
5: 'T', # Amount
"""
logger.debug("get_icicicsv: Data row = {}".format(data_row))
date = data_row[0]
if _valid_date(date, date_format="%d/%m/%Y"):
amt = "-{}".format(data_row[5])
if data_row[6] == "CR":
amt = data_row[5]
return Transaction(id=0,
date=date,
payee="", # Empty for ICICI bank account
memo=data_row[2],
amount=amt)
return InvalidTransaction()
def qif_header():
"""Print qif header."""
click.echo("!Account\nNMyAccount\nTMyBank\n^\n!Type:Bank")
def to_qif(transaction):
"""Transform a cleaned up row to qif format.
Returns:
string of a particular transaction in qif format
See wikipedia for more details of QIF format.
https://en.wikipedia.org/wiki/Quicken_Interchange_Format#Detail_items
"""
logger.debug("to_qif: Input = {}".format(transaction))
return "D{0}\nM{1}\nT{2}\n^\n\n"\
.format(transaction.date, transaction.memo, transaction.amount)
def _valid_date(date_value, date_format="%d/%m/%Y"):
"""Validate a transaction date."""
try:
transaction_date = datetime.strptime(date_value, date_format)
return transaction_date is not None
except ValueError:
return False
def _filter_csv_header(doc, header):
head_skip = False
mem = io.StringIO()
with open(doc, encoding='utf-8', mode='r') as f:
for line in f:
if line.startswith(header):
head_skip = True
continue
if head_skip and (not line or line.isspace()):
break
if head_skip and ',' in line:
mem.write(line)
mem.seek(0)
return csv.reader(mem)
@click.command()
@click.argument("doc", type=click.Path(exists=True))
@click.option("--profile", prompt="Choose a profile", default="icici",
show_default=True,
type=click.Choice(["icici", "icicicc"]),
help="Document type profile.")
@click.option("--debug", is_flag=True, show_default=True,
help="Show diagnostic messages.")
def start(doc, profile, debug):
"""Bout (read bank-out) extracts transactions from csv bank statements."""
if debug:
logging.basicConfig(level=logging.DEBUG)
logger.info("Verbose messages are enabled.")
profiles.update({"icici": get_icici_csv,
"icicicc": get_icicicc_csv})
rows = []
if profile == "icici":
header = "DATE,MODE,PARTICULARS,DEPOSITS,WITHDRAWALS,BALANCE"
rows = _filter_csv_header(doc, header)
elif profile == "icicicc":
header = "Date,Sr.No.,Transaction Details,Reward Point Header,Intl.Amount,Amount(in Rs),BillingAmountSign"
rows = _filter_csv_header(doc, header)
# row -> clean_row
# clean_row, profile -> transaction
# transaction -> qif
create_transaction = profiles[profile]
print_header = False
for r in rows:
transaction = create_transaction(r)
if type(transaction) is not InvalidTransaction:
if not print_header:
qif_header()
print_header = True
click.echo(to_qif(transaction))
if __name__ == '__main__':
start()
|
from colored import *
import staticconf
"""
You might find the colored documentation very useful:
https://pypi.python.org/pypi/colored
"""
ENABLE_COLORIZER = staticconf.read_string('enable_colorizer', default='false').lower() == 'true'
def colorizer_enabled(function):
"""do not colorize if it's not enabled"""
def wrapper(*args):
if ENABLE_COLORIZER:
return function(*args)
elif args:
return args[0]
else:
return args
return wrapper
# attr and colors
ATTR_RESET = attr('reset')
COLOR_INDEX = fg(199)
COLOR_TITLE = fg(45)
COLOR_TAG_0 = fg(10) + attr('bold')
COLOR_TAG_1 = fg(10)
COLOR_TAG_2 = fg(87)
COLOR_TAG_3 = fg(208)
COLOR_TAG_4 = fg(252)
@colorizer_enabled
def color_index(index):
return COLOR_INDEX + index + ATTR_RESET
@colorizer_enabled
def color_title(title):
return COLOR_TITLE + title + ATTR_RESET
def _color_by_score(score):
if score >= 1:
return COLOR_TAG_0
elif score >= 0.9:
return COLOR_TAG_1
elif score >= 0.8:
return COLOR_TAG_2
elif score >= 0.7:
return COLOR_TAG_3
return COLOR_TAG_4
@colorizer_enabled
def _color_tag(tag, score):
return _color_by_score(score) + tag + ATTR_RESET
def color_tags(scored_tags):
return ", ".join((_color_tag(tag, score) for tag, score in scored_tags))
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._azure_media_services import AzureMediaServices
__all__ = ['AzureMediaServices']
# `._patch.py` is used for handwritten extensions to the generated code
# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md
from ._patch import patch_sdk
patch_sdk()
|
from datetime import timedelta
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.models import User
from django.utils import timezone
from allauth.account.models import EmailAddress
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from challenges.models import Challenge
from hosts.models import ChallengeHost, ChallengeHostTeam
from participants.models import ParticipantTeam, Participant
class BaseAPITestClass(APITestCase):
def setUp(self):
self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create(
username="someuser",
email="user@test.com",
password="secret_password",
)
EmailAddress.objects.create(
user=self.user, email="user@test.com", primary=True, verified=True
)
self.invite_user = User.objects.create(
username="otheruser",
email="other@platform.com",
password="other_secret_password",
)
self.participant_team = ParticipantTeam.objects.create(
team_name="Participant Team", created_by=self.user
)
self.participant = Participant.objects.create(
user=self.user, team=self.participant_team, status=Participant.SELF
)
self.client.force_authenticate(user=self.user)
class GetParticipantTeamTest(BaseAPITestClass):
url = reverse_lazy("participants:get_participant_team_list")
def setUp(self):
super(GetParticipantTeamTest, self).setUp()
self.user2 = User.objects.create(
username="user2",
email="user2@platform.com",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="user2@platform.com",
primary=True,
verified=True,
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team,
)
def test_get_challenge(self):
expected = [
{
"id": self.participant_team.pk,
"team_name": self.participant_team.team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
"members": [
{
"member_name": self.participant.user.username,
"status": self.participant.status,
"member_id": self.participant.user.id,
},
{
"member_name": self.participant2.user.username,
"status": self.participant2.status,
"member_id": self.participant2.user.id,
},
],
}
]
response = self.client.get(self.url, {})
self.assertEqual(response.data["results"], expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateParticipantTeamTest(BaseAPITestClass):
url = reverse_lazy("participants:get_participant_team_list")
def setUp(self):
super(CreateParticipantTeamTest, self).setUp()
self.data = {"team_name": "New Participant Team"}
def test_create_participant_team_with_all_data(self):
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_participant_team_with_team_name_same_as_with_existing_team(
self
):
expected = {
"team_name": [
"participant team with this team name already exists."
]
}
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Creating team with same team name
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, expected)
def test_create_participant_team_with_no_data(self):
del self.data["team_name"]
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class GetParticularParticipantTeam(BaseAPITestClass):
def setUp(self):
super(GetParticularParticipantTeam, self).setUp()
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk},
)
self.user2 = User.objects.create(
username="user2",
email="user2@platform.com",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="user2@platform.com",
primary=True,
verified=True,
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team,
)
def test_get_particular_participant_team(self):
expected = {
"id": self.participant_team.pk,
"team_name": self.participant_team.team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
"members": [
{
"member_name": self.participant.user.username,
"status": self.participant.status,
"member_id": self.participant.user.id,
},
{
"member_name": self.participant2.user.username,
"status": self.participant2.status,
"member_id": self.participant2.user.id,
},
],
}
response = self.client.get(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_particular_participant_team_does_not_exist(self):
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk + 1},
)
expected = {"error": "ParticipantTeam does not exist"}
response = self.client.get(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
class UpdateParticularParticipantTeam(BaseAPITestClass):
def setUp(self):
super(UpdateParticularParticipantTeam, self).setUp()
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk},
)
self.partial_update_participant_team_name = (
"Partial Update Participant Team"
)
self.update_participant_team_name = "Update Test Participant Team"
self.data = {"team_name": self.update_participant_team_name}
def test_particular_participant_team_partial_update(self):
self.partial_update_data = {
"team_name": self.partial_update_participant_team_name
}
expected = {
"id": self.participant_team.pk,
"team_name": self.partial_update_participant_team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
}
response = self.client.patch(self.url, self.partial_update_data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_particular_participant_team_update(self):
expected = {
"id": self.participant_team.pk,
"team_name": self.update_participant_team_name,
"created_by": self.user.username,
"team_url": self.participant_team.team_url,
}
response = self.client.put(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_particular_participant_team_update_with_no_data(self):
self.data = {"team_name": ""}
response = self.client.put(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class DeleteParticularParticipantTeam(BaseAPITestClass):
def setUp(self):
super(DeleteParticularParticipantTeam, self).setUp()
self.url = reverse_lazy(
"participants:get_participant_team_details",
kwargs={"pk": self.participant_team.pk},
)
def test_particular_participant_team_delete(self):
response = self.client.delete(self.url, {})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class InviteParticipantToTeamTest(BaseAPITestClass):
def setUp(self):
super(InviteParticipantToTeamTest, self).setUp()
self.data = {"email": self.invite_user.email}
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": self.participant_team.pk},
)
def test_invite_participant_to_team_with_all_data(self):
expected = {"message": "User has been successfully added to the team!"}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_invite_participant_to_team_with_no_data(self):
del self.data["email"]
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_invite_self_to_team(self):
self.data = {"email": self.user.email}
expected = {"error": "User is already part of the team!"}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_invite_to_other_team_which_doesnot_belong_to_user(self):
temp_user = User.objects.create(
username="temp_user", password="test_password"
)
temp_participant_team = ParticipantTeam.objects.create(
team_name="Test Team 1", created_by=temp_user
)
expected = {"error": "You are not a member of this team!"}
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": temp_participant_team.pk},
)
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_invite_user_which_does_not_exist_to_team(self):
self.data = {"email": "userwhichdoesnotexist@platform.com"}
expected = {"error": "User does not exist with this email address!"}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_particular_participant_team_for_invite_does_not_exist(self):
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": self.participant_team.pk + 1},
)
expected = {"error": "Participant Team does not exist"}
response = self.client.post(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_invite_participant_to_team_when_user_cannot_be_invited(self):
"""
NOTE
user: host user
user1: participant 1
user2: participant 2
"""
self.user2 = User.objects.create(
username="user2",
email="user2@platform.com",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="user2@platform.com",
primary=True,
verified=True,
)
self.user3 = User.objects.create(
username="user3",
email="user3@platform.com",
password="user3_password",
)
EmailAddress.objects.create(
user=self.user3,
email="user3@platform.com",
primary=True,
verified=True,
)
self.participant_team2 = ParticipantTeam.objects.create(
team_name="Participant Team created by user 2",
created_by=self.user2,
)
self.participant_team3 = ParticipantTeam.objects.create(
team_name="Participant Team created by user 3",
created_by=self.user3,
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team2,
)
self.participant3 = Participant.objects.create(
user=self.user3,
status=Participant.ACCEPTED,
team=self.participant_team3,
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Test Challenge Host Team", created_by=self.user
)
self.challenge = Challenge.objects.create(
title="Test Challenge",
short_description="Short description for test challenge",
description="Description for test challenge",
terms_and_conditions="Terms and conditions for test challenge",
submission_guidelines="Submission guidelines for test challenge",
creator=self.challenge_host_team,
published=False,
enable_forum=True,
leaderboard_description=None,
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.client.force_authenticate(user=self.user2)
self.challenge.participant_teams.add(self.participant_team2)
self.challenge.participant_teams.add(self.participant_team3)
self.data = {"email": self.user3.email}
self.url = reverse_lazy(
"participants:invite_participant_to_team",
kwargs={"pk": self.participant_team2.pk},
)
expected = {
"error": "Sorry, the invited user has already participated "
"in atleast one of the challenges which you are already"
" a part of. Please try creating a new team and then invite."
}
response = self.client.post(self.url, self.data)
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
class DeleteParticipantFromTeamTest(BaseAPITestClass):
def setUp(self):
super(DeleteParticipantFromTeamTest, self).setUp()
self.participant = Participant.objects.create(
user=self.user, status=Participant.SELF, team=self.participant_team
)
self.user2 = User.objects.create(
username="user2",
email="user2@platform.com",
password="user2_password",
)
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team,
)
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.invite_user.pk,
},
)
def test_participant_does_not_exist_in_team(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant2.pk + 1,
},
)
expected = {"error": "Participant does not exist"}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_participant_team_does_not_exist(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk + 1,
"participant_pk": self.participant2.pk,
},
)
expected = {"error": "ParticipantTeam does not exist"}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_participant_is_admin_and_wants_to_delete_himself(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant.pk,
},
)
expected = {
"error": "You are not allowed to remove yourself since you are admin. Please delete the team if you want to do so!" # noqa: ignore=E501
}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_participant_does_not_have_permissions_to_remove_another_participant(
self
):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant2.pk,
},
)
self.user3 = User.objects.create(
username="user3",
email="user3@platform.com",
password="user3_password",
)
EmailAddress.objects.create(
user=self.user3,
email="user3@platform.com",
primary=True,
verified=True,
)
self.participant3 = Participant.objects.create(
user=self.user3,
status=Participant.ACCEPTED,
team=self.participant_team,
)
self.client.force_authenticate(user=self.user3)
expected = {
"error": "Sorry, you do not have permissions to remove this participant"
}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_when_a_participant_is_successfully_removed_from_team(self):
self.url = reverse_lazy(
"participants:delete_participant_from_team",
kwargs={
"participant_team_pk": self.participant_team.pk,
"participant_pk": self.participant2.pk,
},
)
response = self.client.delete(self.url, {})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class GetTeamsAndCorrespondingChallengesForAParticipant(BaseAPITestClass):
def setUp(self):
super(GetTeamsAndCorrespondingChallengesForAParticipant, self).setUp()
self.user2 = User.objects.create(
username="user2",
email="user2@platform.com",
password="user2_password",
)
EmailAddress.objects.create(
user=self.user2,
email="user2@platform.com",
primary=True,
verified=True,
)
self.participant_team2 = ParticipantTeam.objects.create(
team_name="Team B", created_by=self.user2
) # created by user2 and not user
self.participant2 = Participant.objects.create(
user=self.user2,
status=Participant.ACCEPTED,
team=self.participant_team2,
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Host Team 1", created_by=self.user2
)
self.challenge1 = Challenge.objects.create(
title="Test Challenge 1",
short_description="Short description for test challenge 1",
description="Description for test challenge 1",
terms_and_conditions="Terms and conditions for test challenge 1",
submission_guidelines="Submission guidelines for test challenge 1",
creator=self.challenge_host_team,
published=False,
is_registration_open=True,
enable_forum=True,
leaderboard_description="Lorem ipsum dolor sit amet, consectetur adipiscing elit",
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.challenge1.slug = "{}-{}".format(
self.challenge1.title.replace(" ", "-").lower(), self.challenge1.pk
)[:199]
self.challenge1.save()
self.challenge2 = Challenge.objects.create(
title="Test Challenge 2",
short_description="Short description for test challenge 2",
description="Description for test challenge 2",
terms_and_conditions="Terms and conditions for test challenge 2",
submission_guidelines="Submission guidelines for test challenge 2",
creator=self.challenge_host_team,
published=False,
is_registration_open=True,
enable_forum=True,
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.url = reverse_lazy(
"participants:get_teams_and_corresponding_challenges_for_a_participant",
kwargs={"challenge_pk": self.challenge1.pk},
)
self.time = timezone.now()
def test_get_teams_and_corresponding_challenges_for_a_participant(self):
self.challenge1.participant_teams.add(self.participant_team)
self.challenge1.save()
expected = {
"challenge_participant_team_list": [
{
"challenge": {
"id": self.challenge1.id,
"title": self.challenge1.title,
"description": self.challenge1.description,
"short_description": self.challenge1.short_description,
"terms_and_conditions": self.challenge1.terms_and_conditions,
"submission_guidelines": self.challenge1.submission_guidelines,
"evaluation_details": self.challenge1.evaluation_details,
"image": self.challenge1.image,
"start_date": "{0}{1}".format(
self.challenge1.start_date.isoformat(), "Z"
).replace("+00:00", ""),
"end_date": "{0}{1}".format(
self.challenge1.end_date.isoformat(), "Z"
).replace("+00:00", ""),
"creator": {
"id": self.challenge_host_team.id,
"team_name": self.challenge_host_team.team_name,
"created_by": self.challenge_host_team.created_by.username,
"team_url": self.challenge_host_team.team_url,
},
"published": self.challenge1.published,
"is_registration_open": self.challenge1.is_registration_open,
"enable_forum": self.challenge1.enable_forum,
"leaderboard_description": self.challenge1.leaderboard_description,
"anonymous_leaderboard": self.challenge1.anonymous_leaderboard,
"is_active": True,
"allowed_email_domains": [],
"blocked_email_domains": [],
"banned_email_ids": [],
"approved_by_admin": False,
"forum_url": self.challenge1.forum_url,
"is_docker_based": self.challenge1.is_docker_based,
"slug": self.challenge1.slug,
"max_docker_image_size": self.challenge1.max_docker_image_size,
"cli_version": self.challenge1.cli_version,
},
"participant_team": {
"id": self.participant_team.id,
"team_name": self.participant_team.team_name,
"created_by": self.participant_team.created_by.username,
"team_url": self.participant_team.team_url,
},
}
],
"is_challenge_host": False,
}
response = self.client.get(self.url, {})
# checking 'datetime_now' separately because of time difference in microseconds
self.assertTrue(
abs(response.data["datetime_now"] - self.time)
< timedelta(seconds=1)
)
# deleting field 'datetime_now' from response to check with expected response without time field
del response.data["datetime_now"]
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_participant_team_challenge_list(self):
self.url = reverse_lazy(
"participants:get_participant_team_challenge_list",
kwargs={"participant_team_pk": self.participant_team.pk},
)
expected = [
{
"id": self.challenge1.id,
"title": self.challenge1.title,
"description": self.challenge1.description,
"short_description": self.challenge1.short_description,
"terms_and_conditions": self.challenge1.terms_and_conditions,
"submission_guidelines": self.challenge1.submission_guidelines,
"evaluation_details": self.challenge1.evaluation_details,
"image": self.challenge1.image,
"start_date": "{0}{1}".format(
self.challenge1.start_date.isoformat(), "Z"
).replace("+00:00", ""),
"end_date": "{0}{1}".format(
self.challenge1.end_date.isoformat(), "Z"
).replace("+00:00", ""),
"creator": {
"id": self.challenge_host_team.id,
"team_name": self.challenge_host_team.team_name,
"created_by": self.challenge_host_team.created_by.username,
"team_url": self.challenge_host_team.team_url,
},
"published": self.challenge1.published,
"is_registration_open": self.challenge1.is_registration_open,
"enable_forum": self.challenge1.enable_forum,
"leaderboard_description": self.challenge1.leaderboard_description,
"anonymous_leaderboard": self.challenge1.anonymous_leaderboard,
"is_active": True,
"allowed_email_domains": [],
"blocked_email_domains": [],
"banned_email_ids": [],
"approved_by_admin": False,
"forum_url": self.challenge1.forum_url,
"is_docker_based": self.challenge1.is_docker_based,
"slug": self.challenge1.slug,
"max_docker_image_size": self.challenge1.max_docker_image_size,
"cli_version": self.challenge1.cli_version,
}
]
self.challenge1.participant_teams.add(self.participant_team)
self.challenge1.save()
response = self.client.get(self.url, {})
self.assertEqual(response.data["results"], expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_when_participant_team_hasnot_participated_in_any_challenge(self):
expected = {
"challenge_participant_team_list": [
{
"challenge": None,
"participant_team": {
"id": self.participant_team.id,
"team_name": self.participant_team.team_name,
"created_by": self.participant_team.created_by.username,
"team_url": self.participant_team.team_url,
},
}
],
"is_challenge_host": False,
}
response = self.client.get(self.url, {})
# checking 'datetime_now' separately because of time difference in microseconds
self.assertTrue(
abs(response.data["datetime_now"] - self.time)
< timedelta(seconds=1)
)
# deleting field 'datetime_now' from response to check with expected response without time field
del response.data["datetime_now"]
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_when_there_is_no_participant_team_of_user(self):
self.participant_team.delete()
expected = {
"challenge_participant_team_list": [],
"is_challenge_host": False,
}
response = self.client.get(self.url, {})
# checking 'datetime_now' separately because of time difference in microseconds
self.assertTrue(
abs(response.data["datetime_now"] - self.time)
< timedelta(seconds=1)
)
# deleting field 'datetime_now' from response to check with expected response without time field
del response.data["datetime_now"]
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class RemoveSelfFromParticipantTeamTest(BaseAPITestClass):
def setUp(self):
super(RemoveSelfFromParticipantTeamTest, self).setUp()
# user who create a challenge host team
self.user2 = User.objects.create(
username="someuser2", password="some_secret_password"
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Some Test Challenge Host Team", created_by=self.user2
)
self.challenge_host2 = ChallengeHost.objects.create(
user=self.user2,
team_name=self.challenge_host_team,
status=ChallengeHost.ACCEPTED,
permissions=ChallengeHost.ADMIN,
)
self.challenge = Challenge.objects.create(
title="Some Test Challenge",
short_description="Short description for some test challenge",
description="Description for some test challenge",
terms_and_conditions="Terms and conditions for some test challenge",
submission_guidelines="Submission guidelines for some test challenge",
creator=self.challenge_host_team,
published=False,
is_registration_open=True,
enable_forum=True,
leaderboard_description="Fusce quis sapien eget sem accumsan euismod",
anonymous_leaderboard=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
)
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk},
)
def test_when_participant_team_does_not_exist(self):
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk + 1},
)
expected = {"error": "ParticipantTeam does not exist!"}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_when_a_participant_is_successfully_removed_from_team(self):
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk},
)
response = self.client.delete(self.url, {})
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_when_participant_team_has_taken_part_in_challenges(self):
self.challenge.participant_teams.add(self.participant_team)
expected = {
"error": "Sorry, you cannot delete this team since it has taken part in challenge(s)!"
}
response = self.client.delete(self.url, {})
self.assertEqual(response.data, expected)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_participant_team_remove_when_no_participants_exists(self):
self.url = reverse_lazy(
"participants:remove_self_from_participant_team",
kwargs={"participant_team_pk": self.participant_team.pk},
)
self.client.delete(self.url, {})
participant_teams = ParticipantTeam.objects.all()
self.assertEqual(participant_teams.count(), 0)
|
# copyright 2022 @Ansaku
# Telegram @AnkiSatya
# Instagram @satya_ask
import telebot
import requests
from telebot.types import InlineKeyboardButton
# Fillout Here The BotToken it gets from botfather further queries @AnkiSatya 0n telegram
bot = telebot.TeleBot('**********************')
while True:
try:
keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(InlineKeyboardButton(text='Buat email'))
keyboard.add(InlineKeyboardButton(text='Refresh pesan'))
keyboard.add(InlineKeyboardButton(text='Tentang'))
@bot.message_handler(commands=['start'])
def start_message(message):
bot.send_message(message.chat.id,
'Hai Pengguna., Selamat datang di TempEmail Bot \nPenggunaan:\nUntuk Menghasilkan email klik tombol "Buat email"\nUntuk menyegarkan kotak masuk Anda, klik tombol "Refresh inbox". Setelah surat baru tiba, Anda akan melihat tombol dengan baris subjek, klik tombol read the message. \n\n Dev : @AnkiSatya',
reply_markup=keyboard)
@bot.message_handler(content_types=['text'])
def send_text(message):
if message.text.lower() == 'buat email':
email = requests.get("https://www.1secmail.com/api/v1/?action=genRandomMailbox&count=1").json()[0]
ekeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)
ekeyboard.add(InlineKeyboardButton(text='Buat email'))
ekeyboard.add(InlineKeyboardButton(text='Refresh pesan\n[' + str(email) + "]"))
ekeyboard.add(InlineKeyboardButton(text='Tentang'))
bot.send_message(message.chat.id, "E-Mail Sementara Anda:")
bot.send_message(message.chat.id, str(email), reply_markup=ekeyboard)
elif message.text.lower() == 'refresh pesan':
bot.send_message(message.chat.id, 'Pertama, buat email anda', reply_markup=keyboard)
elif message.text.lower() == 'tentang':
bot.send_message(message.chat.id,
'Apa itu Email Semantara?\n- Itu adalah layanan email gratis yang memungkinkan untuk menerima email di alamat sementara yang akan dihancurkan sendiri setelah waktu tertentu berlalu. Itu juga dikenal dengan nama-nama seperti tempmail, 10minutemail, 10minmail, throwaway email, fake-mail , fake email generator, burner mail atau trash-mail\n\nBagaimana Email Sementara Menjadi Lebih Aman bagi Anda?\n- Menggunakan Email sementara memungkinkan Anda untuk sepenuhnya melindungi kotak surat asli Anda dari hilangnya informasi pribadi. Alamat email sementara Anda sepenuhnya anonim. Detail Anda: informasi tentang orang Anda dan pengguna yang berkomunikasi dengan Anda, alamat IP, alamat email dilindungi dan sepenuhnya dirahasiakan.\n\n➪ Nama Bot : TempMail Bot\n➪ Pembuat : @AnkiSatya\n➪ Language : Python \n➪ Donasi : https://saweria.co/ansaku')
elif message.text.lower()[14] == "[":
email = message.text.lower()[15:message.text.lower().find("]")]
bkeyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)
bkeyboard.add(InlineKeyboardButton(text='Refresh pesan\n[' + str(email) + "]"))
bkeyboard.add(InlineKeyboardButton(text='Buat email'))
try:
data = requests.get(
"https://www.1secmail.com/api/v1/?action=getMessages&login=" + email[:email.find(
"@")] + "&domain=" + email[email.find("@") + 1:]).json()
if 'id' in data[0]:
for i in range(len(data)):
id = data[i]['id']
subject = data[i]['subject']
fromm = data[i]['from']
date = data[i]['date']
if len(subject) > 15:
subject = str(subject[0:15]) + "..."
bkeyboard.add(InlineKeyboardButton(
text=str(subject) + "\n dari: " + fromm + " in " + "[id" + str(id) + "][" + str(
email) + "]"))
bot.send_message(message.chat.id,
"Subjek: " + subject + "\n Dari: " + fromm + "\n Tanggal:" + date,
reply_markup=bkeyboard)
count = i + 1
bot.send_message(message.chat.id, "Di Sini " + str(
count) + " Pesan ditemukan\nKlik tombol di bawah untuk membaca pesan\n\n Info lebih lanjut @AnkiSatya")
else:
bot.send_message(message.chat.id, 'Tidak ditemukan', reply_markup=bkeyboard)
except BaseException:
bot.send_message(message.chat.id, 'Tidak ada pesan yang diterima...', reply_markup=bkeyboard)
elif message.text.lower().find("[id"):
try:
data = message.text.lower()[message.text.lower().find("[id"):]
id = data[data.find("[") + 3:data.find(']')]
email = data[data.find("][") + 2:-1]
msg = requests.get("https://www.1secmail.com/api/v1/?action=readMessage&login=" + email[:email.find(
"@")] + "&domain=" + email[email.find("@") + 1:] + "&id=" + id).json()
bot.send_message(message.chat.id,
'Pesan ✉️\n\n Dari: ' + msg['from'] + "\n Subjek: " + msg[
'subject'] + "\n Tanggal: " + msg[
'date'] + "\n Teks: " + msg['textBody'])
except BaseException:
pass
bot.polling(none_stop=True, interval=1, timeout=5000)
except BaseException:
pass
# Stay tuned for more : Telegram @AnkiSatya
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.