text
stringlengths 2
999k
|
|---|
# pylint: disable=missing-function-docstring
"""Tests for '_continuous.py' file"""
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import scipy.stats.distributions as distrs
from scipy.stats.kde import gaussian_kde
from scipy.integrate import quad
import pytest
from randomvars._continuous import Cont
from randomvars.tests.commontests import (
DECIMAL,
_test_equal_rand,
_test_equal_seq,
_test_from_rv_rand,
_test_from_sample_rand,
_test_input_coercion,
_test_log_fun,
_test_one_value_input,
_test_rvs_method,
declass,
h,
)
from randomvars.options import config
DISTRIBUTIONS_COMMON = {
"beta": distrs.beta(a=10, b=20),
"chi_sq": distrs.chi2(df=10),
"expon": distrs.expon(),
"f": distrs.f(dfn=20, dfd=20),
"gamma": distrs.gamma(a=10),
"laplace": distrs.laplace(),
"lognorm": distrs.lognorm(s=0.5),
"norm": distrs.norm(),
"norm2": distrs.norm(loc=10),
"norm3": distrs.norm(scale=0.1),
"norm4": distrs.norm(scale=10),
"norm5": distrs.norm(loc=10, scale=0.1),
"t": distrs.t(df=10),
"uniform": distrs.uniform(),
"uniform2": distrs.uniform(loc=10, scale=0.1),
"weibull_max": distrs.weibull_max(c=2),
"weibull_min": distrs.weibull_min(c=2),
}
DISTRIBUTIONS_INF_DENSITY = {
"inf_beta_both": distrs.beta(a=0.4, b=0.6),
"inf_beta_left": distrs.beta(a=0.5, b=2),
"inf_beta_right": distrs.beta(a=2, b=0.5),
"inf_chi_sq": distrs.chi2(df=1),
"inf_weibull_max": distrs.weibull_max(c=0.5),
"inf_weibull_min": distrs.weibull_min(c=0.5),
}
DISTRIBUTIONS_HEAVY_TAILS = {
"heavy_cauchy": distrs.cauchy(),
"heavy_lognorm": distrs.lognorm(s=1),
"heavy_t": distrs.t(df=2),
}
DISTRIBUTIONS = {
**DISTRIBUTIONS_COMMON,
**DISTRIBUTIONS_HEAVY_TAILS,
**DISTRIBUTIONS_INF_DENSITY,
}
def augment_grid(x, n_inner_points):
test_arr = [
np.linspace(x[i], x[i + 1], n_inner_points + 1, endpoint=False)
for i in np.arange(len(x) - 1)
]
test_arr.append([x[-1]])
return np.concatenate(test_arr)
def from_sample_cdf_max_error(x):
rv = Cont.from_sample(x)
density = config.estimator_cont(x)
x_grid = augment_grid(rv.x, 10)
# Efficient way of computing `quad(density, -np.inf, x_grid)`
x_grid_ext = np.concatenate([[-np.inf], x_grid])
cdf_intervals = np.array(
[
quad(density, x_l, x_r)[0]
for x_l, x_r in zip(x_grid_ext[:-1], x_grid_ext[1:])
]
)
cdf_grid = np.cumsum(cdf_intervals)
err = cdf_grid - rv.cdf(x_grid)
return np.max(np.abs(err))
def circle_fun(x, low, high):
x = np.array(x)
center = 0.5 * (high + low)
radius = 0.5 * (high - low)
res = np.zeros_like(x)
center_dist = np.abs(x - center)
is_in = center_dist <= radius
res[is_in] = np.sqrt(radius ** 2 - center_dist[is_in] ** 2)
return res
def make_circ_density(intervals):
"""Construct circular density
Density looks like half-circles with diameters lying in elements of
`intervals`. Total integral is equal to 1.
Parameters
----------
intervals : iterable with elements being 2-element iterables
Iterable of intervals with non-zero density.
Returns
-------
density : callable
Function which returns density values.
"""
def density(x):
res = np.zeros_like(x)
tot_integral = 0
for low, high in intervals:
res += circle_fun(x, low, high)
# There is only half of circle
tot_integral += np.pi * (high - low) ** 2 / 8
return res / tot_integral
return density
class TestCont:
"""Regression tests for `Cont` class"""
def test_init_errors(self):
def check_one_input(def_args, var):
with pytest.raises(TypeError, match=f"`{var}`.*numpy array"):
def_args[var] = {"a": None}
Cont(**def_args)
with pytest.raises(TypeError, match=f"`{var}`.*float"):
def_args[var] = ["a", "a"]
Cont(**def_args)
with pytest.raises(TypeError, match=f"`{var}`.*finite values"):
def_args[var] = [0, np.nan]
Cont(**def_args)
with pytest.raises(TypeError, match=f"`{var}`.*finite values"):
def_args[var] = [0, np.inf]
Cont(**def_args)
with pytest.raises(ValueError, match=f"`{var}`.*1d array"):
def_args[var] = [[0, 1]]
Cont(**def_args)
check_one_input({"y": [1, 1]}, "x")
check_one_input({"x": [0, 1]}, "y")
with pytest.raises(ValueError, match="[Ll]engths.*match"):
Cont([0, 1], [1, 1, 1])
with pytest.raises(ValueError, match="two"):
Cont([1], [1])
with pytest.warns(UserWarning, match="`x`.*not sorted.*`x` and `y`"):
rv = Cont([1, 0], [0, 2])
rv_ref = Cont([0, 1], [2, 0])
_test_equal_rand(rv, rv_ref)
with pytest.raises(ValueError, match="`y`.*negative"):
Cont([0, 1], [1, -1])
with pytest.raises(ValueError, match="`y`.*no positive"):
Cont([0, 1], [0, 0])
def test_init(self):
x_ref = np.array([0, 1, 2])
y_ref = np.array([0, 1, 0])
rv_ref = Cont(x_ref, y_ref)
# Simple case with non-numpy input
rv_1 = Cont(x=x_ref.tolist(), y=y_ref.tolist())
_test_equal_rand(rv_1, rv_ref)
# Check if `y` is normalized
rv_2 = Cont(x=x_ref, y=10 * y_ref)
_test_equal_rand(rv_2, rv_ref)
# Check if `x` and `y` are rearranged if not sorted
with pytest.warns(UserWarning, match="`x`.*not sorted"):
rv_3 = Cont(x=x_ref[[1, 0, 2]], y=y_ref[[1, 0, 2]])
_test_equal_rand(rv_3, rv_ref)
# Check if duplicated values are removed from `x`
with pytest.warns(UserWarning, match="duplicated"):
# First pair of xy-grid is taken among duplicates
rv_4 = Cont(x=x_ref[[0, 1, 1, 2]], y=y_ref[[0, 1, 2, 2]])
_test_equal_rand(rv_4, rv_ref)
def test_str(self):
rv = Cont([0, 2, 4], [0, 1, 0])
assert str(rv) == "Continuous RV with 2 intervals (support: [0.0, 4.0])"
# Uses singular noun with one interval
rv = Cont([0, 1], [1, 1])
assert str(rv) == "Continuous RV with 1 interval (support: [0.0, 1.0])"
def test_properties(self):
x = np.arange(11)
y = np.repeat(0.1, 11)
rv = Cont(x, y)
assert list(rv.params.keys()) == ["x", "y"]
assert_array_equal(rv.params["x"], x)
assert_array_equal(rv.params["y"], y)
assert_array_equal(rv.x, x)
assert_array_equal(rv.y, y)
assert rv.a == 0.0
assert rv.b == 10.0
def test_support(self):
rv = Cont([0.5, 1.5, 4.5], [0, 0.5, 0])
assert rv.support() == (0.5, 4.5)
def test_compress(self):
# Zero tails
## Left tail
_test_equal_rand(
Cont([0, 1, 2, 3], [0, 0, 0, 2]).compress(), Cont([2, 3], [0, 2])
)
_test_equal_rand(
Cont([0, 1, 2, 3], [0, 0, 1, 0]).compress(), Cont([1, 2, 3], [0, 1, 0])
)
## Right tail
_test_equal_rand(
Cont([0, 1, 2, 3], [2, 0, 0, 0]).compress(), Cont([0, 1], [2, 0])
)
_test_equal_rand(
Cont([0, 1, 2, 3], [0, 1, 0, 0]).compress(), Cont([0, 1, 2], [0, 1, 0])
)
## Both tails
_test_equal_rand(
Cont([0, 1, 2, 3, 4], [0, 0, 1, 0, 0]).compress(),
Cont([1, 2, 3], [0, 1, 0]),
)
# Extra linearity
## Non-zero slope
_test_equal_rand(
Cont([0, 1, 2, 3, 4], [0.5, 0.25, 0, 0.25, 0.5]).compress(),
Cont([0, 2, 4], [0.5, 0, 0.5]),
)
## Zero slope, non-zero y
_test_equal_rand(
Cont([0, 1, 2], [0.5, 0.5, 0.5]).compress(), Cont([0, 2], [0.5, 0.5])
)
## Zero slope, zero y, outside of tails
_test_equal_rand(
Cont([0, 1, 2, 3, 4], [1, 0, 0, 0, 1]).compress(),
Cont([0, 1, 3, 4], [1, 0, 0, 1]),
)
# All features
_test_equal_rand(
Cont(np.arange(14), [0, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 1, 0, 0]).compress(),
Cont([2, 4, 6, 8, 10, 11, 12], [0, 2, 2, 0, 0, 1, 0]),
)
# If nothing to compress, self should be returned
rv = Cont([0, 1], [1, 1])
assert rv.compress() is rv
def test_ground(self):
w = config.small_width
# Basic usage
rv = Cont([0, 1], [1, 1])
_test_equal_rand(
rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])
)
# Argument `direction`
_test_equal_rand(
rv.ground(direction="both"),
Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0]),
)
_test_equal_rand(
rv.ground(direction="left"), Cont([-w, 0, w, 1], [0, 0.5, 1, 1])
)
_test_equal_rand(
rv.ground(direction="right"), Cont([0, 1 - w, 1, 1 + w], [1, 1, 0.5, 0])
)
_test_equal_rand(rv.ground(direction="none"), rv)
# Argument `w`
w2 = 0.1
_test_equal_rand(
rv.ground(w=w2, direction="both"),
Cont([-w2, 0, w2, 1 - w2, 1, 1 + w2], [0, 0.5, 1, 1, 0.5, 0]),
)
# Close neighbors
rv2 = Cont([0, 0.25 * w, 0.5, 1 - 0.1 * w, 1], [1, 1, 1, 1, 1])
rv2_grounded = rv2.ground(direction="both")
## Check that only outer points were added
assert_array_equal(rv2_grounded.x[1:-1], rv2.x)
## Check that grounded actually happend
assert_array_equal(rv2_grounded.y[[0, -1]], 0.0)
## Check that non-edge x-values havae same y-values
assert_array_equal(rv2_grounded.pdf(rv2.x[1:-1]), rv2.pdf(rv2.x[1:-1]))
def test_ground_options(self):
rv = Cont([0, 1], [1, 1])
with config.context({"small_width": 0.1}):
w = config.small_width
_test_equal_rand(
rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])
)
def test_ground_errors(self):
rv = Cont([0, 1], [1, 1])
with pytest.raises(ValueError, match="one of"):
rv.ground(direction="aaa")
def test__coeffs_by_ind(self):
# All coefficients are returned if no `ind` is specified
rv = Cont([0, 1, 2], [0, 1, 0])
inter, slope = rv._coeffs_by_ind()
assert_array_equal(inter, [0, 2])
assert_array_equal(slope, [1, -1])
def test__grid_by_ind(self):
# All grid elements are returned if no `ind` is specified
rv = Cont([0, 1, 2], [0, 1, 0])
x_out, y_out, p_out = rv._grid_by_ind()
x_ref, y_ref = rv.x, rv.y
assert_array_equal(x_out, x_ref)
assert_array_equal(y_out, y_ref)
def test_pdf_coeffs(self):
rv = Cont([0, 1, 2], [0, 1, 0])
x = np.array([-1, 0, 0.5, 1, 1.5, 2, 2.5])
with pytest.raises(ValueError, match="one of"):
rv.pdf_coeffs(x, side="a")
_test_equal_seq(
rv.pdf_coeffs(x),
(np.array([0, 0, 0, 2, 2, 2, 0]), np.array([0, 1, 1, -1, -1, -1, 0])),
)
_test_equal_seq(
rv.pdf_coeffs(x, side="left"),
(np.array([0, 0, 0, 0, 2, 2, 0]), np.array([0, 1, 1, 1, -1, -1, 0])),
)
_test_equal_seq(
rv.pdf_coeffs(np.array([-np.inf, np.nan, np.inf])),
(np.array([0, np.nan, 0]), np.array([0, np.nan, 0])),
)
def test_from_rv_basic(self):
uniform = distrs.uniform
norm = distrs.norm
# Basic usage
rv_unif = Cont.from_rv(uniform)
rv_unif_test = Cont(x=[0, 1], y=[1, 1])
_test_equal_rand(rv_unif, rv_unif_test, decimal=DECIMAL)
# Objects of `Rand` class should be `convert()`ed
_test_from_rv_rand(cls=Cont, to_class="Cont")
# Forced support edges
rv_right = Cont.from_rv(uniform, supp=(0.5, None))
rv_right_test = Cont([0.5, 1], [2, 2])
_test_equal_rand(rv_right, rv_right_test, decimal=DECIMAL)
rv_left = Cont.from_rv(uniform, supp=(None, 0.5))
rv_left_test = Cont([0, 0.5], [2, 2])
_test_equal_rand(rv_left, rv_left_test, decimal=DECIMAL)
rv_mid = Cont.from_rv(uniform, supp=(0.25, 0.75))
rv_mid_test = Cont([0.25, 0.75], [2, 2])
_test_equal_rand(rv_mid, rv_mid_test, decimal=DECIMAL)
def test_from_rv_errors(self):
# Absence of either `cdf` or `ppf` method should result intro error
class Tmp:
pass
tmp1 = Tmp()
tmp1.ppf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)
with pytest.raises(ValueError, match="cdf"):
Cont.from_rv(tmp1)
tmp2 = Tmp()
tmp2.cdf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)
with pytest.raises(ValueError, match="ppf"):
Cont.from_rv(tmp2)
def test_from_rv_options(self):
norm = distrs.norm
# Finite support detection and usage of `small_prob` option
with config.context({"small_prob": 1e-6}):
rv_norm = Cont.from_rv(norm)
assert_array_almost_equal(
rv_norm.support(), norm.ppf([1e-6, 1 - 1e-6]), decimal=DECIMAL
)
with config.context({"small_prob": 1e-6}):
rv_norm_right = Cont.from_rv(norm, supp=(-1, None))
assert_array_almost_equal(
rv_norm_right.support(), [-1, norm.ppf(1 - 1e-6)], decimal=DECIMAL
)
with config.context({"small_prob": 1e-6}):
rv_norm_left = Cont.from_rv(norm, supp=(None, 1))
assert_array_almost_equal(
rv_norm_left.support(), [norm.ppf(1e-6), 1], decimal=DECIMAL
)
# Usage of `n_grid` option
with config.context({"n_grid": 11}):
rv_norm_small = Cont.from_rv(norm)
assert len(rv_norm_small.x) <= 20
# Usage of `cdf_tolerance` option
with config.context({"cdf_tolerance": 1e-4}):
rv_norm_1 = Cont.from_rv(norm)
with config.context({"cdf_tolerance": 1e-1}):
rv_norm_2 = Cont.from_rv(norm)
## Increasing CDF tolerance should lead to decrease of density grid
assert len(rv_norm_1.x) > len(rv_norm_2.x)
def test_from_sample_basic(self):
norm = distrs.norm()
rng = np.random.default_rng(101)
x = norm.rvs(100, random_state=rng)
rv = Cont.from_sample(x)
assert isinstance(rv, Cont)
def test_from_sample_errors(self):
with pytest.raises(TypeError, match="numpy array with float"):
Cont.from_sample(["a"])
with pytest.raises(ValueError, match="1d"):
Cont.from_sample([[1], [2]])
def test_from_sample_options(self):
norm = distrs.norm()
rng = np.random.default_rng(101)
x = norm.rvs(100, random_state=rng)
# "estimator_cont"
def uniform_estimator(x):
x_min, x_max = x.min(), x.max()
def res(x):
return np.where((x >= x_min) & (x <= x_max), 1 / (x_max - x_min), 0)
return res
with config.context({"estimator_cont": uniform_estimator}):
rv = Cont.from_sample(x)
assert len(rv.y) == 2
assert np.allclose(rv.y, rv.y[0], atol=1e-13)
# "estimator_cont" which returns allowed classes
## `Rand` class should be forwarded to `from_rv()` method
_test_from_sample_rand(
cls=Cont,
sample=x,
estimator_option="estimator_cont",
)
## "Scipy" distribution should be forwarded to `Cont.from_rv()`
rv_norm = distrs.norm()
with config.context({"estimator_cont": lambda x: rv_norm}):
rv = Cont.from_sample(np.asarray([0, 1, 2]))
rv_ref = Cont.from_rv(rv_norm)
_test_equal_rand(rv, rv_ref)
# "density_mincoverage"
with config.context({"density_mincoverage": 0.0}):
rv = Cont.from_sample(x)
## With minimal density mincoverage output range should be equal to
## sample range
assert_array_equal(rv.x[[0, -1]], [x.min(), x.max()])
# "n_grid"
with config.context({"n_grid": 11}):
rv = Cont.from_sample(x)
assert len(rv.x) <= 22
# "cdf_tolerance"
with config.context({"cdf_tolerance": 2.0}):
rv = Cont.from_sample(x)
## With very high CDF tolerance downgridding should result into grid
## with three elements. That is because CDF is approximated with
## simplest quadratic spline with single segment. That requires three
## knots.
assert len(rv.x) == 3
@pytest.mark.slow
def test_from_sample_single_value(self):
"""How well `from_sample()` handles single unique value in sample
Main problem here is how density range is initialized during estimation.
"""
zero_vec = np.zeros(10)
# Default density estimator can't handle situation with single unique
# sample value (gives `LinAlgError: singular matrix`).
# Case when sample width is zero but density is not zero
density_centered_interval = make_circ_density([(-1, 1)])
with config.context({"estimator_cont": lambda x: density_centered_interval}):
assert from_sample_cdf_max_error(zero_vec) <= 1e-4
# Case when both sample width and density are zero
density_shifted_interval = make_circ_density([(10, 20)])
with config.context({"estimator_cont": lambda x: density_shifted_interval}):
# Here currently the problem is that support is estimated way to
# wide with very small (~1e-9) non-zero density outside of [10,
# 20]. However, CDFs are still close.
assert from_sample_cdf_max_error(zero_vec) <= 2e-4
def test_pdf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
# Regular checks
x = np.array([-1, 0, 0.5, 1, 2, 3, 4])
assert_array_equal(rv.pdf(x), np.array([0, 0.5, 0.5, 0.5, 0.25, 0, 0]))
# Coercion of not ndarray input
_test_input_coercion(rv.pdf, x)
# Input around edges
x = np.array([0 - 1e-10, 0 + 1e-10, 3 - 1e-10, 3 + 1e-10])
assert_array_almost_equal(
rv.pdf(x), np.array([0, 0.5, 0.25e-10, 0]), decimal=DECIMAL
)
# Bad input
x = np.array([-np.inf, np.nan, np.inf])
assert_array_equal(rv.pdf(x), np.array([0, np.nan, 0]))
# Dirac-like random variable
rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])
x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])
## Accuracy is of order of 10 due to extreme magnitudes of values
assert_array_almost_equal(
rv_dirac.pdf(x), np.array([0, 0.5e8, 1e8, 0.5e8, 0]), decimal=-1
)
# Broadcasting
x = np.array([[-1, 0.5], [2, 4]])
assert_array_equal(rv.pdf(x), np.array([[0.0, 0.5], [0.25, 0.0]]))
# One value input
_test_one_value_input(rv.pdf, 0.5)
_test_one_value_input(rv.pdf, -1)
_test_one_value_input(rv.pdf, np.nan)
def test_logpdf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
_test_log_fun(rv.logpdf, rv.pdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])
def test_pmf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
with pytest.raises(AttributeError, match=r"Use `pdf\(\)`"):
rv.pmf(0)
def test_logpmf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
with pytest.raises(AttributeError, match=r"Use `logpdf\(\)`"):
rv.logpmf(0)
def test_cdf(self):
rv_1 = Cont([0, 1, 2], [0, 1, 0])
# Regular checks
x = np.array([-1, 0, 0.5, 1, 1.5, 2, 3])
assert_array_equal(rv_1.cdf(x), np.array([0, 0, 0.125, 0.5, 0.875, 1, 1]))
# Coercion of not ndarray input
_test_input_coercion(rv_1.cdf, x)
# Bad input
x = np.array([-np.inf, np.nan, np.inf])
assert_array_equal(rv_1.cdf(x), np.array([0, np.nan, 1]))
# Dirac-like random variable
rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])
x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])
assert_array_almost_equal(
rv_dirac.cdf(x), np.array([0, 0.125, 0.5, 0.875, 1]), decimal=DECIMAL
)
# Broadcasting
x = np.array([[-1, 0.5], [2, 4]])
assert_array_equal(rv_1.cdf(x), np.array([[0.0, 0.125], [1.0, 1.0]]))
# One value input
_test_one_value_input(rv_1.cdf, 0.5)
_test_one_value_input(rv_1.cdf, -1)
_test_one_value_input(rv_1.cdf, np.nan)
def test_logcdf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
_test_log_fun(rv.logcdf, rv.cdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])
def test_sf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
x_ref = [-1, 0.1, 3, np.inf, np.nan]
assert_array_equal(rv.sf(x_ref), 1 - rv.cdf(x_ref))
def test_logsf(self):
rv = Cont([0, 1, 3], [0.5, 0.5, 0])
_test_log_fun(rv.logsf, rv.sf, x_ref=[-1, 0.1, 3, np.inf, np.nan])
def test_ppf(self):
# `ppf()` method should be inverse to `cdf()` for every sensible input
rv_1 = Cont([0, 1, 2], [0, 1, 0])
# Regular checks
q = np.array([0, 0.125, 0.5, 0.875, 1])
assert_array_equal(rv_1.ppf(q), np.array([0, 0.5, 1, 1.5, 2]))
# Coercion of not ndarray input
_test_input_coercion(rv_1.ppf, q)
# Bad input
q = np.array([-np.inf, -h, np.nan, 1 + h, np.inf])
assert_array_equal(
rv_1.ppf(q), np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
)
# Dirac-like random variable
rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])
q = np.array([0, 0.125, 0.5, 0.875, 1])
assert_array_almost_equal(
rv_dirac.ppf(q),
np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h]),
decimal=DECIMAL,
)
# Broadcasting
q = np.array([[0, 0.5], [0.0, 1.0]])
assert_array_equal(rv_1.ppf(q), np.array([[0.0, 1.0], [0.0, 2.0]]))
# One value input
_test_one_value_input(rv_1.ppf, 0.25)
_test_one_value_input(rv_1.ppf, -1)
_test_one_value_input(rv_1.ppf, np.nan)
# Should return the smallest x-value in case of zero-density interval(s)
rv_zero_density = Cont([0, 1, 2, 3, 4, 5, 6], [0, 0.5, 0, 0, 0, 0.5, 0])
assert rv_zero_density.ppf(0.5) == 2
def test_isf(self):
rv = Cont([0, 1, 2], [0, 1, 0])
# Regular checks
q_ref = np.array([0, 0.125, 0.5, 0.875, 1])
assert_array_equal(rv.sf(rv.isf(q_ref)), q_ref)
def test_rvs(self):
rv_1 = Cont([0, 1, 2], [0, 1, 0])
_test_rvs_method(rv_1)
def test__cdf_spline(self):
rv = Cont([0, 1, 2], [0, 1, 0])
x = [-10, 0, 0.5, 1, 1.5, 2, 10]
assert_array_equal(rv._cdf_spline(x), rv.cdf(x))
def test_integrate_cdf(self):
rv = Cont([0, 1, 2], [0, 1, 0])
assert np.allclose(rv.integrate_cdf(-10, 10), quad(rv.cdf, -10, 10)[0])
def test_convert(self):
import randomvars._boolean as bool
import randomvars._discrete as disc
import randomvars._mixture as mixt
rv = Cont([0, 1, 2], [0, 1, 0])
# By default and supplying `None` should return self
assert rv.convert() is rv
assert rv.convert(None) is rv
# Converting to Bool should result into boolean with probability of
# `False` being 0 (because probability of continuous RV being exactly
# zero is 0).
out_bool = rv.convert("Bool")
assert isinstance(out_bool, bool.Bool)
assert out_bool.prob_true == 1.0
# Converting to own class should return self
out_cont = rv.convert("Cont")
assert out_cont is rv
# Converting to Disc should result into discrete RV with the same `x`
# values as in input's xy-grid
out_disc = rv.convert("Disc")
assert isinstance(out_disc, disc.Disc)
assert_array_equal(out_disc.x, rv.x)
# Converting to Mixt should result into degenerate mixture with only
# continuous component
out_mixt = rv.convert("Mixt")
assert isinstance(out_mixt, mixt.Mixt)
assert out_mixt.cont is rv
assert out_mixt.weight_cont == 1.0
# Any other target class should result into error
with pytest.raises(ValueError, match="one of"):
rv.convert("aaa")
class TestFromRVAccuracy:
"""Accuracy of `Cont.from_rv()`"""
# Output of `from_rv()` should have CDF that differs from original CDF by
# no more than `thres`
@pytest.mark.slow
@pytest.mark.parametrize(
"distr_dict,thres",
[
(DISTRIBUTIONS_COMMON, 1e-4),
(DISTRIBUTIONS_INF_DENSITY, 1e-3),
(DISTRIBUTIONS_HEAVY_TAILS, 5e-3),
],
)
def test_cdf_maxerror(self, distr_dict, thres):
test_passed = {
name: TestFromRVAccuracy.from_rv_cdf_maxerror(distr) <= thres
for name, distr in distr_dict.items()
}
assert all(test_passed.values())
def test_detected_support(self):
"""Test correct trimming of zero tails"""
rv_ref = Cont([0, 1, 2, 3, 4], [0, 0, 1, 0, 0])
rv_out = Cont.from_rv(declass(rv_ref))
_test_equal_rand(rv_out, rv_ref.compress(), decimal=4)
@staticmethod
def from_rv_cdf_maxerror(rv_base, n_inner_points=10, **kwargs):
rv_test = Cont.from_rv(rv_base, **kwargs)
x_grid = augment_grid(rv_test.x, n_inner_points)
err = rv_base.cdf(x_grid) - rv_test.cdf(x_grid)
return np.max(np.abs(err))
class TestFromSampleAccuracy:
"""Accuracy of `Cont.from_sample()`"""
# Output of `from_sample()` should differ from original density estimate by
# no more than `thres` (with default density estimator)
@pytest.mark.slow
@pytest.mark.parametrize(
"distr_dict,thres",
[
(DISTRIBUTIONS_COMMON, 1e-4),
(DISTRIBUTIONS_INF_DENSITY, 1.5e-4),
(DISTRIBUTIONS_HEAVY_TAILS, 1e-4),
],
)
def test_close_cdf(self, distr_dict, thres):
rng = np.random.default_rng(101)
test_passed = {
name: TestFromSampleAccuracy.simulated_cdf_error(distr, rng) <= thres
for name, distr in distr_dict.items()
}
assert all(test_passed.values())
@pytest.mark.slow
def test_density_range(self):
density_mincoverage = config.density_mincoverage
estimator_cont = config.estimator_cont
rng = np.random.default_rng(101)
def generate_density_coverage(distr):
x = distr.rvs(size=100, random_state=rng)
density = estimator_cont(x)
rv = Cont.from_sample(x)
return quad(density, rv.x[0], rv.x[-1])[0]
test_passed = {
distr_name: generate_density_coverage(distr) >= density_mincoverage
for distr_name, distr in DISTRIBUTIONS.items()
}
assert all(test_passed.values())
@staticmethod
def simulated_cdf_error(distr, rng):
x = distr.rvs(size=100, random_state=rng)
# Testing with `gaussian_kde` as the most used density estimator. This
# also enables to use rather fast way of computing CDF of estimated
# density via `integrate_box_1d` method.
with config.context({"estimator_cont": gaussian_kde}):
rv = Cont.from_sample(x)
density = config.estimator_cont(x)
x_grid = augment_grid(rv.x, 10)
# Interestingly enough, direct computation with `-np.inf` as left
# integration limit is both accurate and more efficient than computing
# integrals for each segment and then use `np.cumsum()`. Probably this
# is because integration of gaussian curves with infinite left limit is
# done directly through gaussian CDF.
cdf_grid = np.array(
[density.integrate_box_1d(-np.inf, cur_x) for cur_x in x_grid]
)
err = cdf_grid - rv.cdf(x_grid)
return np.max(np.abs(err))
def test__extend_range():
def extra_estimator(x):
x_min, x_max = x.min(), x.max()
prob_height = 1 / (x_max - x_min + 1)
def res(x):
return np.where(
((x_min < x) & (x < x_max)) | ((x_max + 1 < x) & (x < x_max + 2)),
prob_height,
0,
)
return res
norm = distrs.norm()
rng = np.random.default_rng(101)
x = norm.rvs(100, random_state=rng)
with config.context({"estimator_cont": extra_estimator}):
rv = Cont.from_sample(x)
assert (rv.x[0] <= x.min()) and (rv.x[-1] >= x.max())
|
from graphite.functions.params import Param, ParamTypes
def test(seriesList):
"""This is a test function"""
return seriesList
test.group = 'Test'
test.params = [
Param('seriesList', ParamTypes.bad, required=True),
]
SeriesFunctions = {
'testFunc': test,
}
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman --<mozman@gmx.at>
# Purpose: validator2 module - new validator module
# Created: 01.10.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
from .data import full11
from .data import tiny12
from .data import pattern
validator_cache = {}
def cache_key(profile, debug):
return str(profile) + str(debug)
def get_validator(profile, debug=True):
""" Validator factory """
try:
return validator_cache[cache_key(profile, debug)]
except KeyError:
if profile == 'tiny':
validator = Tiny12Validator(debug)
elif profile in ('full', 'basic', 'none'):
validator = Full11Validator(debug)
else:
raise ValueError("Unsupported profile: '%s'" % profile)
validator_cache[cache_key(profile, debug)] = validator
return validator
class Tiny12Validator(object):
profilename = "Tiny 1.2"
def __init__(self, debug=True):
self.debug = debug
self.attributes = tiny12.attributes
self.elements = tiny12.elements
self.typechecker = tiny12.TypeChecker()
def check_all_svg_attribute_values(self, elementname, attributes):
"""
Check if attributes are valid for object 'elementname' and all svg
attributes have valid types and values.
Raises ValueError.
"""
for attributename, value in attributes.items():
self.check_svg_attribute_value(elementname, attributename, value)
def check_svg_attribute_value(self, elementname, attributename, value):
"""
Check if 'attributename' is valid for object 'elementname' and 'value'
is a valid svg type and value.
Raises ValueError.
"""
self._check_valid_svg_attribute_name(elementname, attributename)
self._check_svg_value(elementname, attributename, value)
def _check_svg_value(self, elementname, attributename, value):
"""
Checks if 'value' is a valid svg-type for svg-attribute
'attributename' at svg-element 'elementname'.
Raises TypeError.
"""
attribute = self.attributes[attributename]
# check if 'value' match a valid datatype
for typename in attribute.get_types(elementname):
if self.typechecker.check(typename, value):
return
# check if 'value' is a valid constant
valuestr = str(value)
if not valuestr in attribute.get_const(elementname):
raise TypeError("'%s' is not a valid value for attribute '%s' at svg-element <%s>." % (value, attributename, elementname))
def _check_valid_svg_attribute_name(self, elementname, attributename):
""" Check if 'attributename' is a valid svg-attribute for svg-element
'elementname'.
Raises ValueError.
"""
if not self.is_valid_svg_attribute(elementname, attributename):
raise ValueError("Invalid attribute '%s' for svg-element <%s>." % (attributename, elementname))
def _get_element(self, elementname):
try:
return self.elements[elementname]
except KeyError:
raise KeyError("<%s> is not valid for selected profile: '%s'." % (elementname, self.profilename))
def check_svg_type(self, value, typename='string'):
"""
Check if 'value' matches svg type 'typename'.
Raises TypeError.
"""
if self.typechecker.check(typename, value):
return value
else:
raise TypeError("%s is not of type '%s'." % (value, typename))
def is_valid_svg_type(self, value, typename):
return self.typechecker.check(typename, value)
def is_valid_elementname(self, elementname):
""" True if 'elementname' is a valid svg-element name. """
return elementname in self.elements
def is_valid_svg_attribute(self, elementname, attributename):
""" True if 'attributename' is a valid svg-attribute for svg-element
'elementname'.
"""
element = self._get_element(elementname)
return attributename in element.valid_attributes
def is_valid_children(self, elementname, childrenname):
""" True if svg-element 'childrenname' is a valid children of
svg-element 'elementname'.
"""
element = self._get_element(elementname)
return childrenname in element.valid_children
def check_valid_children(self, elementname, childrenname):
""" Checks if svg-element 'childrenname' is a valid children of
svg-element 'elementname'.
Raises ValueError.
"""
if not self.is_valid_children(elementname, childrenname):
raise ValueError("Invalid children '%s' for svg-element <%s>." % (childrenname, elementname))
def get_coordinate(self, value):
""" Split value in (number, unit) if value has an unit or (number, None).
Raises ValueError.
"""
if value is None:
raise TypeError("Invalid type 'None'.")
if isinstance(value, (int, float)):
result = (value, None)
else:
result = pattern.coordinate.match(value.strip())
if result:
number, tmp, unit = result.groups()
number = float(number)
else:
raise ValueError("'%s' is not a valid svg-coordinate." % value)
result = (number, unit)
if self.typechecker.is_number(result[0]):
return result
else:
version = "SVG %s %s" % self.typechecker.get_version()
raise ValueError("%s is not a valid number for: %s." % (value, version))
get_length = get_coordinate
class Full11Validator(Tiny12Validator):
profilename = "Full 1.1"
def __init__(self, debug=True):
self.debug = debug
self.attributes = full11.attributes
self.elements = full11.elements
self.typechecker = full11.TypeChecker()
|
# -*- coding: utf-8 -*-
## Copyright (c) 2015-2018, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
"""
Tests for :mod:`~exatomic.interfaces.cube`
#############################################
"""
import numpy as np
from unittest import TestCase
from exatomic.base import resource, staticdir
from exatomic.interfaces.cube import Cube, uni_from_cubes
class TestCube(TestCase):
"""Tests cube reading and writing."""
def setUp(self):
self.lg = Cube(resource('mol-carbon-dz-1.cube'))
self.sm1 = Cube(resource('adf-lu-35.cube'))
self.sm2 = Cube(resource('adf-lu-36.cube'))
self.uni = uni_from_cubes(staticdir() + '/cube/', ext='*lu*cube')
def test_parse_atom(self):
self.lg.parse_atom()
self.sm1.parse_atom()
self.sm2.parse_atom()
self.assertEquals(self.lg.atom.shape[0], 1)
self.assertEquals(self.sm1.atom.shape[0], 1)
self.assertEquals(self.sm2.atom.shape[0], 1)
def test_parse_field(self):
self.lg.parse_field()
self.sm1.parse_field()
self.sm2.parse_field()
self.assertEquals(self.lg.field.shape[0], 1)
self.assertEquals(self.sm1.field.shape[0], 1)
self.assertEquals(self.sm2.field.shape[0], 1)
self.assertEquals(self.lg.field.field_values[0].shape[0], 132651)
self.assertEquals(self.sm1.field.field_values[0].shape[0], 4913)
self.assertEquals(self.sm2.field.field_values[0].shape[0], 4913)
def test_to_universe(self):
lg = self.lg.to_universe()
sm1 = self.sm1.to_universe()
sm2 = self.sm2.to_universe()
for uni in [lg, sm1, sm2]:
for attr in ['atom', 'field']:
self.assertTrue(hasattr(uni, attr))
def test_uni_from_cubes_rotate_and_write(self):
self.assertEquals(self.uni.field.shape[0], 2)
self.assertEquals(len(self.uni.field.field_values), 2)
rot = self.uni.field.rotate(0, 1, np.pi / 4)
self.assertEquals(rot.shape[0], 2)
f = Cube.from_universe(self.uni, 1)
self.assertEquals(len(f), 874)
|
"""
Test that stepping works even when the OS Plugin doesn't report
all threads at every stop.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class TestOSPluginStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfWindows
@skipIf(oslist=["freebsd"], bugnumber="llvm.org/pr48352")
def test_python_os_plugin(self):
"""Test that stepping works when the OS Plugin doesn't report all
threads at every stop"""
self.build()
self.main_file = lldb.SBFileSpec('main.cpp')
self.run_python_os_step_missing_thread(False)
@skipIfWindows
@skipIf(oslist=["freebsd"], bugnumber="llvm.org/pr48352")
def test_python_os_plugin_prune(self):
"""Test that pruning the unreported PlanStacks works"""
self.build()
self.main_file = lldb.SBFileSpec('main.cpp')
self.run_python_os_step_missing_thread(True)
def get_os_thread(self):
return self.process.GetThreadByID(0x111111111)
def is_os_thread(self, thread):
id = thread.GetID()
return id == 0x111111111
def run_python_os_step_missing_thread(self, do_prune):
"""Test that the Python operating system plugin works correctly"""
# Our OS plugin does NOT report all threads:
result = self.dbg.HandleCommand("settings set process.experimental.os-plugin-reports-all-threads false")
python_os_plugin_path = os.path.join(self.getSourceDir(),
"operating_system.py")
(target, self.process, thread, thread_bkpt) = lldbutil.run_to_source_breakpoint(
self, "first stop in thread - do a step out", self.main_file)
main_bkpt = target.BreakpointCreateBySourceRegex('Stop here and do not make a memory thread for thread_1',
self.main_file)
self.assertEqual(main_bkpt.GetNumLocations(), 1, "Main breakpoint has one location")
# There should not be an os thread before we load the plugin:
self.assertFalse(self.get_os_thread().IsValid(), "No OS thread before loading plugin")
# Now load the python OS plug-in which should update the thread list and we should have
# an OS plug-in thread overlaying thread_1 with id 0x111111111
command = "settings set target.process.python-os-plugin-path '%s'" % python_os_plugin_path
self.dbg.HandleCommand(command)
# Verify our OS plug-in threads showed up
os_thread = self.get_os_thread()
self.assertTrue(
os_thread.IsValid(),
"Make sure we added the thread 0x111111111 after we load the python OS plug-in")
# Now we are going to step-out. This should get interrupted by main_bkpt. We've
# set up the OS plugin so at this stop, we have lost the OS thread 0x111111111.
# Make sure both of these are true:
os_thread.StepOut()
stopped_threads = lldbutil.get_threads_stopped_at_breakpoint(self.process, main_bkpt)
self.assertEqual(len(stopped_threads), 1, "Stopped at main_bkpt")
thread = self.process.GetThreadByID(0x111111111)
self.assertFalse(thread.IsValid(), "No thread 0x111111111 on second stop.")
# Make sure we still have the thread plans for this thread:
# First, don't show unreported threads, that should fail:
command = "thread plan list -t 0x111111111"
result = lldb.SBCommandReturnObject()
interp = self.dbg.GetCommandInterpreter()
interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "We found no plans for the unreported thread.")
# Now do it again but with the -u flag:
command = "thread plan list -u -t 0x111111111"
result = lldb.SBCommandReturnObject()
interp.HandleCommand(command, result)
self.assertTrue(result.Succeeded(), "We found plans for the unreported thread.")
if do_prune:
# Prune the thread plan and continue, and we will run to exit.
interp.HandleCommand("thread plan prune 0x111111111", result)
self.assertTrue(result.Succeeded(), "Found the plan for 0x111111111 and pruned it")
# List again, make sure it doesn't work:
command = "thread plan list -u -t 0x111111111"
interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "We still found plans for the unreported thread.")
self.process.Continue()
self.assertEqual(self.process.GetState(), lldb.eStateExited, "We exited.")
else:
# Now we are going to continue, and when we hit the step-out breakpoint, we will
# put the OS plugin thread back, lldb will recover its ThreadPlanStack, and
# we will stop with a "step-out" reason.
self.process.Continue()
os_thread = self.get_os_thread()
self.assertTrue(os_thread.IsValid(), "The OS thread is back after continue")
self.assertIn("step out", os_thread.GetStopDescription(100), "Completed step out plan")
|
#!/usr/bin/python3
# BY NOMO
from netmiko import Netmiko
from getpass import getpass
from datetime import datetime
from pprint import pprint
import re
import os
import sys
import socket
# Vars
config_dir = "/home/reponeg/logs/asa_configs"
# Function for DNS resolution
def hostnameLookup(hostname):
try:
socket.gethostbyname(hostname)
return 1 # If lookup works
except socket.error:
return 0 # If lookup fails
def getShowRun(connection_handle, context, dirname):
output = connection_handle.send_command("changeto context " + context)
sh_run = connection_handle.send_command("show run")
hostname_simple = re.findall( r'(.+?)\.+', hostname_arg )[0]
file_path = dirname + "/" + "sh_run_" + hostname_simple +"_"+ context + ".txt"
with open(file_path, "w") as file_handle:
file_handle.write(sh_run)
return 1
# Check arguments for hostname and hostname dns resolution
if len(sys.argv) < 4:
print("\nMissing parameter. Please enter the hostname or IP address:")
print("\nUsage:", sys.argv[0], "<hostname>\n\n")
exit()
elif len(sys.argv) > 4:
print("Too many parameters. Use a single hostname.")
exit()
hostname_arg = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
dns_lookup_result = hostnameLookup(hostname_arg)
if dns_lookup_result == 0:
print("Hostname lookup for %s failed. Please check name and retry." %(hostname_arg) )
exit()
# Device
asa = {
'host': hostname_arg,
'username': username,
'password': password,
'device_type': 'cisco_asa'
}
auth_pending = True
while auth_pending:
try:
conn1 = Netmiko(**asa)
auth_pending = False
except:
print("Authentication failed. This is host " + hostname_arg)
asa['username'] = input("\nEnter your Username FOR THIS HOST): ")
asa['password'] = getpass()
try:
conn1 = Netmiko(**asa)
pass
except:
print("Failed to authenticate on " + hostname_arg + "\nTry again.")
pass
# Move to context sys to grab the list of all contexts
command = "changeto context sys"
output = conn1.send_command(command)
command = "show run | i context"
output = conn1.send_command(command).splitlines()
# Get the list
context_list = []
for line in output:
if line.startswith("context "):
context_name = line.replace("context ", "")
context_list.append(context_name)
# Start hopping contexts and retrieving running configs
for context in context_list:
getShowRun(conn1, context, config_dir)
print("Retrieved config for contexts:")
print(context_list)
print("\n")
|
#!/usr/bin/env python
import rospy
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd
from geometry_msgs.msg import TwistStamped
from std_msgs.msg import Bool
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# TODO: Create `Controller` object
# self.controller = Controller(<Arguments you wish to provide>)
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity=fuel_capacity,
brake_deadband=brake_deadband,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cmd_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb)
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb, queue_size=1)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.target_vel = None
self.target_angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# TODO: Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
# throttle, brake, steering = self.controller.control(<proposed linear velocity>,
# <proposed angular velocity>,
# <current linear velocity>,
# <dbw status>,
# <any other argument you need>)
# if <dbw is enabled>:
# self.publish(throttle, brake, steer)
# If autonomous system is enabled
if self.dbw_enabled and (self.current_vel is not None) and (self.target_vel is not None) and (
self.target_angular_vel is not None):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.target_vel,
self.target_angular_vel)
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
# Get the driving mode
self.controller.reset()
self.dbw_enabled = msg.data
if self.dbw_enabled:
rospy.logwarn('TwistController is online.')
else:
rospy.logwarn('TwistController is offline.')
def twist_cmd_cb(self, msg):
# Get the desired velocity
self.target_vel = msg.twist.linear.x
self.target_angular_vel = msg.twist.angular.z
#rospy.logwarn("target_vel %f"%self.target_vel)
def current_velocity_cb(self, msg):
# Get current velocity
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
|
import yaml
from models.fasttext import FastText
from models.attention_rnn import AttentionRNN
from models.rcnn import RCNN
from models.textcnn import TextCNN
from models.textrnn import TextRNN
from models.transformer import Transformer
from utils.logger import get_logger
def instantiate_model(model_name, vocab_size, embeddings):
multi_layer_args = yaml.load(open('./configs/multi_layer.yml'), Loader=yaml.FullLoader)
if model_name == "rcnn":
model_args = yaml.load(open('./configs/rcnn.yml'), Loader=yaml.FullLoader)
model = RCNN(vocab_size, embeddings, **{**model_args, **multi_layer_args})
elif model_name == "textcnn":
model_args = yaml.load(open('./configs/textcnn.yml'), Loader=yaml.FullLoader)
model = TextCNN(vocab_size, embeddings, **{**model_args, **multi_layer_args})
elif model_name == "textrnn":
model_args = yaml.load(open('./configs/textrnn.yml'), Loader=yaml.FullLoader)
model = TextRNN(vocab_size, embeddings, **{**model_args, **multi_layer_args})
elif model_name == "attention_rnn":
model_args = yaml.load(open('./configs/attention_rnn.yml'), Loader=yaml.FullLoader)
model = AttentionRNN(vocab_size, embeddings, **{**model_args, **multi_layer_args})
elif model_name == "transformer":
model_args = yaml.load(open('./configs/transformer.yml'), Loader=yaml.FullLoader)
model = Transformer(vocab_size, embeddings, **{**model_args, **multi_layer_args})
else:
model_args = yaml.load(open('./configs/fasttext.yml'), Loader=yaml.FullLoader)
model = FastText(vocab_size, embeddings, **{**model_args, **multi_layer_args})
logger = get_logger(__name__)
logger.info("A model of {} is instantiated.".format(model.__class__.__name__))
return model
|
import datetime
import logging
from abc import ABCMeta, abstractmethod
from decimal import Decimal
from celery.result import EagerResult, allow_join_result
from celery.backends.base import DisabledBackend
logger = logging.getLogger(__name__)
PROGRESS_STATE = 'PROGRESS'
class AbstractProgressRecorder(object):
__metaclass__ = ABCMeta
@abstractmethod
def set_progress(self, current, total, description=""):
pass
class ConsoleProgressRecorder(AbstractProgressRecorder):
def set_progress(self, current, total, description=""):
print('processed {} items of {}. {}'.format(current, total, description))
class ProgressRecorder(AbstractProgressRecorder):
def __init__(self, task):
self.task = task
def set_progress(self, current, total, description=""):
percent = 0
if total > 0:
percent = (Decimal(current) / Decimal(total)) * Decimal(100)
percent = float(round(percent, 2))
state = PROGRESS_STATE
meta = {
'pending': False,
'current': current,
'total': total,
'percent': percent,
'description': description
}
self.task.update_state(
state=state,
meta=meta
)
return state, meta
class Progress(object):
def __init__(self, result):
"""
result:
an AsyncResult or an object that mimics it to a degree
"""
self.result = result
def get_info(self):
state = self.result.state
response = {'state': state}
if state in ['SUCCESS', 'FAILURE']:
success = self.result.successful()
with allow_join_result():
response.update({
'complete': True,
'success': success,
'progress': _get_completed_progress(),
'result': self.result.get(self.result.id) if success else str(self.result.info),
})
elif state in ['RETRY', 'REVOKED']:
if state == 'RETRY':
retry = self.result.info
when = str(retry.when) if isinstance(retry.when, datetime.datetime) else str(
datetime.datetime.now() + datetime.timedelta(seconds=retry.when))
result = {'when': when, 'message': retry.message or str(retry.exc)}
else:
result = 'Task ' + str(self.result.info)
response.update({
'complete': True,
'success': False,
'progress': _get_completed_progress(),
'result': result,
})
elif state == 'IGNORED':
response.update({
'complete': True,
'success': None,
'progress': _get_completed_progress(),
'result': str(self.result.info)
})
elif state == PROGRESS_STATE:
response.update({
'complete': False,
'success': None,
'progress': self.result.info,
})
elif state in ['PENDING', 'STARTED']:
response.update({
'complete': False,
'success': None,
'progress': _get_unknown_progress(state),
})
else:
logger.error('Task %s has unknown state %s with metadata %s', self.result.id, state, self.result.info)
response.update({
'complete': True,
'success': False,
'progress': _get_unknown_progress(state),
'result': 'Unknown state {}'.format(state),
})
return response
class KnownResult(EagerResult):
"""Like EagerResult but supports non-ready states."""
def __init__(self, id, ret_value, state, traceback=None):
"""
ret_value:
result, exception, or progress metadata
"""
# set backend to get state groups (like READY_STATES in ready())
self.backend = DisabledBackend
super().__init__(id, ret_value, state, traceback)
def ready(self):
return super(EagerResult, self).ready()
def __del__(self):
# throws an exception if not overridden
pass
def _get_completed_progress():
return {
'pending': False,
'current': 100,
'total': 100,
'percent': 100,
}
def _get_unknown_progress(state):
return {
'pending': state == 'PENDING',
'current': 0,
'total': 100,
'percent': 0,
}
|
from shamrock.util.ints import uint64
from .constants import ConsensusConstants
testnet_kwargs = {
"SLOT_BLOCKS_TARGET": 32,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 16, # Must be less than half of SLOT_BLOCKS_TARGET
"MAX_SUB_SLOT_BLOCKS": 128, # Must be less than half of SUB_EPOCH_BLOCKS
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"SUB_SLOT_ITERS_STARTING": 2 ** 24,
# DIFFICULTY_STARTING is the starting difficulty for the first epoch, which is then further
# multiplied by another factor of DIFFICULTY_CONSTANT_FACTOR, to be used in the VDF iter calculation formula.
"DIFFICULTY_CONSTANT_FACTOR": 2 ** 55,
"DIFFICULTY_STARTING": 7,
"DIFFICULTY_CHANGE_MAX_FACTOR": 3, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
# These 3 constants must be changed at the same time
"SUB_EPOCH_BLOCKS": 384, # The number of blocks per sub-epoch, mainnet 3600 ~ 2 hours
"EPOCH_BLOCKS": 768, # The number of blocks per epoch, mainnet 43200 ~ 1 days. Must be multiple of SUB_EPOCH_SB
"SIGNIFICANT_BITS": 8, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_PLOT_FILTER": 9, # H(plot signature of the challenge) must start with these many zeroes
"MIN_PLOT_SIZE": 32, # 32 for mainnet
"MAX_PLOT_SIZE": 50,
"SUB_SLOT_TIME_TARGET": 64, # The target number of seconds per slot, mainnet 64
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
# Default used for tests is std_hash(b'')
"GENESIS_CHALLENGE": bytes.fromhex("e7fe471110c27b12a5f17c8cc150da370396b8d704c6dd521bc7be99d4f358f6"),
# Forks of shamrock should change this value to provide replay attack protection. This is set to mainnet genesis chall
"AGG_SIG_ME_ADDITIONAL_DATA": bytes.fromhex("2a6c4b6a3c1e7e13dcf1c77b4553e4d04f1b916a440f61503deedc0899490529"),
"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bytes.fromhex(
"d23da14695a188ae5708dd152263c4db883eb27edeb936178d4d988b8f3ce5fc"
),
"GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bytes.fromhex(
"3d8765d3a597ec1d99663f6c9816d915b9f68613ac94009884c4addaefcce6af"
),
"MAX_VDF_WITNESS_SIZE": 64,
# Size of mempool = 50x the size of block
"MEMPOOL_BLOCK_BUFFER": 50,
# Max coin amount, fits into 64 bits
"MAX_COIN_AMOUNT": uint64((1 << 64) - 1),
# Max block cost in clvm cost units
"MAX_BLOCK_COST_CLVM": 11000000000,
# The cost per byte of generator program
"COST_PER_BYTE": 12000,
"WEIGHT_PROOF_THRESHOLD": 2,
"BLOCKS_CACHE_SIZE": 43200 + (128 * 4),
"WEIGHT_PROOF_RECENT_BLOCKS": 380,
"MAX_BLOCK_COUNT_PER_REQUESTS": 32, # Allow up to 32 blocks per request
#"INITIAL_FREEZE_END_TIMESTAMP": 1627318800, # Mon Jul 26 2021 17:00:00 GMT+0000
"NETWORK_TYPE": 0,
"MAX_GENERATOR_SIZE": 1000000,
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
}
DEFAULT_CONSTANTS = ConsensusConstants(**testnet_kwargs) # type: ignore
|
"""
This module will run as an independent thread and acts as a wrapper to orchestrate the training, testing, etc.
Supervised training is coordinated here
"""
from queue import Queue
from threading import Thread
from inf import runtime_data
def initialize():
return
class Controller:
def __init__(self):
# setup a new thread here
return
def trainer_mnist(self):
return
def trainer_fashion_mnist(self):
return
def tester_mnist(self):
return
def tester_fashion_mnist(self):
return
|
import re
from os import path
from setuptools import find_namespace_packages, setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "src", "covid_health", "__init__.py")) as init:
__version__ = re.findall('__version__ = "([\w\.\-\_]+)"', init.read())[0]
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(here, "requirements.txt"), encoding="utf-8") as f:
all_reqs = f.read().split("\n")
with open(path.join(here, "requirements-dev.txt"), encoding="utf-8") as f:
dev_reqs = f.read().split("\n")
setup(
name="covid-health",
version=__version__,
description="",
long_description=long_description,
long_description_content_type="text/markdown",
author="Giacomo Barone, Buildnn",
url="https://www.buildnn.com",
license="Copyright © 2020 Giacomo Barone / Buildnn. MIT.",
classifiers=[],
package_dir={"": "src"},
packages=find_namespace_packages(
where="src", include=["*"], exclude=["*.egg-info"]
),
include_package_data=True,
keywords="",
install_requires=all_reqs,
extras_require={"dev": dev_reqs},
# dependency_links=dependency_links,
author_email="giacomo.barone@buildnn.com",
entry_points="""
[console_scripts]
covid-data=covid_health.cli:main
""",
)
|
"""
Very minimal unittests for parts of the readline module.
"""
from contextlib import ExitStack
from errno import EIO
import locale
import os
import selectors
import subprocess
import sys
import tempfile
import unittest
from test.support import import_module, unlink, temp_dir, TESTFN, verbose
from test.support.script_helper import assert_python_ok
# Skip tests if there is no readline module
readline = import_module('readline')
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
is_editline = ("EditLine wrapper" in readline._READLINE_LIBRARY_VERSION)
else:
is_editline = (readline.__doc__ and "libedit" in readline.__doc__)
def setUpModule():
if verbose:
# Python implementations other than CPython may not have
# these private attributes
if hasattr(readline, "_READLINE_VERSION"):
print(f"readline version: {readline._READLINE_VERSION:#x}")
print(f"readline runtime version: {readline._READLINE_RUNTIME_VERSION:#x}")
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
print(f"readline library version: {readline._READLINE_LIBRARY_VERSION!r}")
print(f"use libedit emulation? {is_editline}")
@unittest.skipUnless(hasattr(readline, "clear_history"),
"The history update test cannot be run because the "
"clear_history method is not available.")
class TestHistoryManipulation (unittest.TestCase):
"""
These tests were added to check that the libedit emulation on OSX and the
"real" readline have the same interface for history manipulation. That's
why the tests cover only a small subset of the interface.
"""
def testHistoryUpdates(self):
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
readline.replace_history_item(0, "replaced line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "replaced line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_current_history_length(), 2)
readline.remove_history_item(0)
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "second line")
self.assertEqual(readline.get_current_history_length(), 1)
@unittest.skipUnless(hasattr(readline, "append_history_file"),
"append_history not available")
def test_write_read_append(self):
hfile = tempfile.NamedTemporaryFile(delete=False)
hfile.close()
hfilename = hfile.name
self.addCleanup(unlink, hfilename)
# test write-clear-read == nop
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
readline.write_history_file(hfilename)
readline.clear_history()
self.assertEqual(readline.get_current_history_length(), 0)
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 2)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
# test append
readline.append_history_file(1, hfilename)
readline.clear_history()
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 3)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_history_item(3), "second line")
# test 'no such file' behaviour
os.unlink(hfilename)
with self.assertRaises(FileNotFoundError):
readline.append_history_file(1, hfilename)
# write_history_file can create the target
readline.write_history_file(hfilename)
def test_nonascii_history(self):
readline.clear_history()
try:
readline.add_history("entrée 1")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
readline.add_history("entrée 2")
readline.replace_history_item(1, "entrée 22")
readline.write_history_file(TESTFN)
self.addCleanup(os.remove, TESTFN)
readline.clear_history()
readline.read_history_file(TESTFN)
if is_editline:
# An add_history() call seems to be required for get_history_
# item() to register items from the file
readline.add_history("dummy")
self.assertEqual(readline.get_history_item(1), "entrée 1")
self.assertEqual(readline.get_history_item(2), "entrée 22")
class TestReadline(unittest.TestCase):
@unittest.skipIf(getattr(readline, '_READLINE_VERSION', 0x0601) < 0x0601
and not is_editline,
"not supported in this library version")
def test_init(self):
# Issue #19884: Ensure that the ANSI sequence "\033[1034h" is not
# written into stdout when the readline module is imported and stdout
# is redirected to a pipe.
rc, stdout, stderr = assert_python_ok('-c', 'import readline',
TERM='xterm-256color')
self.assertEqual(stdout, b'')
auto_history_script = """\
import readline
readline.set_auto_history({})
input()
print("History length:", readline.get_current_history_length())
"""
def test_auto_history_enabled(self):
output = run_pty(self.auto_history_script.format(True))
# bpo-44949: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"History length: 1", output)
def test_auto_history_disabled(self):
output = run_pty(self.auto_history_script.format(False))
# bpo-44949: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"History length: 0", output)
@unittest.skipIf(not hasattr(readline,
'set_completion_display_matches_hook'),
"function not reimplemented in pypy")
def test_nonascii(self):
loc = locale.setlocale(locale.LC_CTYPE, None)
if loc in ('C', 'POSIX'):
# bpo-29240: On FreeBSD, if the LC_CTYPE locale is C or POSIX,
# writing and reading non-ASCII bytes into/from a TTY works, but
# readline or ncurses ignores non-ASCII bytes on read.
self.skipTest(f"the LC_CTYPE locale is {loc!r}")
try:
readline.add_history("\xEB\xEF")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
script = r"""import readline
is_editline = readline.__doc__ and "libedit" in readline.__doc__
inserted = "[\xEFnserted]"
macro = "|t\xEB[after]"
set_pre_input_hook = getattr(readline, "set_pre_input_hook", None)
if is_editline or not set_pre_input_hook:
# The insert_line() call via pre_input_hook() does nothing with Editline,
# so include the extra text that would have been inserted here
macro = inserted + macro
if is_editline:
readline.parse_and_bind(r'bind ^B ed-prev-char')
readline.parse_and_bind(r'bind "\t" rl_complete')
readline.parse_and_bind(r'bind -s ^A "{}"'.format(macro))
else:
readline.parse_and_bind(r'Control-b: backward-char')
readline.parse_and_bind(r'"\t": complete')
readline.parse_and_bind(r'set disable-completion off')
readline.parse_and_bind(r'set show-all-if-ambiguous off')
readline.parse_and_bind(r'set show-all-if-unmodified off')
readline.parse_and_bind(r'Control-a: "{}"'.format(macro))
def pre_input_hook():
readline.insert_text(inserted)
readline.redisplay()
if set_pre_input_hook:
set_pre_input_hook(pre_input_hook)
def completer(text, state):
if text == "t\xEB":
if state == 0:
print("text", ascii(text))
print("line", ascii(readline.get_line_buffer()))
print("indexes", readline.get_begidx(), readline.get_endidx())
return "t\xEBnt"
if state == 1:
return "t\xEBxt"
if text == "t\xEBx" and state == 0:
return "t\xEBxt"
return None
readline.set_completer(completer)
def display(substitution, matches, longest_match_length):
print("substitution", ascii(substitution))
print("matches", ascii(matches))
readline.set_completion_display_matches_hook(display)
print("result", ascii(input()))
print("history", ascii(readline.get_history_item(1)))
"""
input = b"\x01" # Ctrl-A, expands to "|t\xEB[after]"
input += b"\x02" * len("[after]") # Move cursor back
input += b"\t\t" # Display possible completions
input += b"x\t" # Complete "t\xEBx" -> "t\xEBxt"
input += b"\r"
output = run_pty(script, input)
self.assertIn(b"text 't\\xeb'\r\n", output)
self.assertIn(b"line '[\\xefnserted]|t\\xeb[after]'\r\n", output)
self.assertIn(b"indexes 11 13\r\n", output)
if not is_editline and hasattr(readline, "set_pre_input_hook"):
self.assertIn(b"substitution 't\\xeb'\r\n", output)
self.assertIn(b"matches ['t\\xebnt', 't\\xebxt']\r\n", output)
expected = br"'[\xefnserted]|t\xebxt[after]'"
self.assertIn(b"result " + expected + b"\r\n", output)
# bpo-45195: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"history " + expected, output)
# We have 2 reasons to skip this test:
# - readline: history size was added in 6.0
# See https://cnswww.cns.cwru.edu/php/chet/readline/CHANGES
# - editline: history size is broken on OS X 10.11.6.
# Newer versions were not tested yet.
@unittest.skipIf(getattr(readline, "_READLINE_VERSION", 0x601) < 0x600,
"this readline version does not support history-size")
@unittest.skipIf(is_editline,
"editline history size configuration is broken")
def test_history_size(self):
history_size = 10
with temp_dir() as test_dir:
inputrc = os.path.join(test_dir, "inputrc")
with open(inputrc, "wb") as f:
f.write(b"set history-size %d\n" % history_size)
history_file = os.path.join(test_dir, "history")
with open(history_file, "wb") as f:
# history_size * 2 items crashes readline
data = b"".join(b"item %d\n" % i
for i in range(history_size * 2))
f.write(data)
script = """
import os
import readline
history_file = os.environ["HISTORY_FILE"]
readline.read_history_file(history_file)
input()
readline.write_history_file(history_file)
"""
env = dict(os.environ)
env["INPUTRC"] = inputrc
env["HISTORY_FILE"] = history_file
run_pty(script, input=b"last input\r", env=env)
with open(history_file, "rb") as f:
lines = f.readlines()
self.assertEqual(len(lines), history_size)
self.assertEqual(lines[-1].strip(), b"last input")
def run_pty(script, input=b"dummy input\r", env=None):
pty = import_module('pty')
output = bytearray()
[master, slave] = pty.openpty()
args = (sys.executable, '-c', script)
proc = subprocess.Popen(args, stdin=slave, stdout=slave, stderr=slave, env=env)
os.close(slave)
with ExitStack() as cleanup:
cleanup.enter_context(proc)
def terminate(proc):
try:
proc.terminate()
except ProcessLookupError:
# Workaround for Open/Net BSD bug (Issue 16762)
pass
cleanup.callback(terminate, proc)
cleanup.callback(os.close, master)
# Avoid using DefaultSelector and PollSelector. Kqueue() does not
# work with pseudo-terminals on OS X < 10.9 (Issue 20365) and Open
# BSD (Issue 20667). Poll() does not work with OS X 10.6 or 10.4
# either (Issue 20472). Hopefully the file descriptor is low enough
# to use with select().
sel = cleanup.enter_context(selectors.SelectSelector())
sel.register(master, selectors.EVENT_READ | selectors.EVENT_WRITE)
os.set_blocking(master, False)
while True:
for [_, events] in sel.select():
if events & selectors.EVENT_READ:
try:
chunk = os.read(master, 0x10000)
except OSError as err:
# Linux raises EIO when slave is closed (Issue 5380)
if err.errno != EIO:
raise
chunk = b""
if not chunk:
return output
output.extend(chunk)
if events & selectors.EVENT_WRITE:
try:
input = input[os.write(master, input):]
except OSError as err:
# Apparently EIO means the slave was closed
if err.errno != EIO:
raise
input = b"" # Stop writing
if not input:
sel.modify(master, selectors.EVENT_READ)
if __name__ == "__main__":
unittest.main()
|
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
import unit_tests.utils as ut_utils
import zaza.openstack.utilities.openstack_upgrade as openstack_upgrade
class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
async def _arun_action_on_units(self, units, cmd, model_name=None,
raise_on_failure=True):
pass
def setUp(self):
super(TestOpenStackUpgradeUtils, self).setUp()
self.patch_object(
openstack_upgrade.zaza.model,
"async_run_action_on_units")
self.async_run_action_on_units.side_effect = self._arun_action_on_units
self.patch_object(
openstack_upgrade.zaza.model,
"get_units")
self.juju_status = mock.MagicMock()
self.patch_object(
openstack_upgrade.zaza.model,
"get_status",
return_value=self.juju_status)
self.patch_object(
openstack_upgrade.zaza.model,
"set_application_config")
self.patch_object(
openstack_upgrade.zaza.model,
"get_application_config")
def _get_application_config(app, model_name=None):
app_config = {
'ceph-mon': {'verbose': True, 'source': 'old-src'},
'neutron-openvswitch': {'verbose': True},
'ntp': {'verbose': True},
'percona-cluster': {'verbose': True, 'source': 'old-src'},
'cinder': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
'neutron-api': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
'nova-compute': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
}
return app_config[app]
self.get_application_config.side_effect = _get_application_config
self.juju_status.applications = {
'mydb': { # Filter as it is on UPGRADE_EXCLUDE_LIST
'charm': 'cs:percona-cluster'},
'neutron-openvswitch': { # Filter as it is a subordinates
'charm': 'cs:neutron-openvswitch',
'subordinate-to': 'nova-compute'},
'ntp': { # Filter as it has no source option
'charm': 'cs:ntp'},
'nova-compute': {
'charm': 'cs:nova-compute',
'units': {
'nova-compute/0': {
'subordinates': {
'neutron-openvswitch/2': {
'charm': 'cs:neutron-openvswitch-22'}}}}},
'cinder': {
'charm': 'cs:cinder-23',
'units': {
'cinder/1': {
'subordinates': {
'cinder-hacluster/0': {
'charm': 'cs:hacluster-42'},
'cinder-ceph/3': {
'charm': 'cs:cinder-ceph-2'}}}}}}
def test_pause_units(self):
openstack_upgrade.pause_units(['cinder/1', 'glance/2'])
self.async_run_action_on_units.assert_called_once_with(
['cinder/1', 'glance/2'],
'pause',
model_name=None,
raise_on_failure=True)
def test_resume_units(self):
openstack_upgrade.resume_units(['cinder/1', 'glance/2'])
self.async_run_action_on_units.assert_called_once_with(
['cinder/1', 'glance/2'],
'resume',
model_name=None,
raise_on_failure=True)
def test_action_unit_upgrade(self):
openstack_upgrade.action_unit_upgrade(['cinder/1', 'glance/2'])
self.async_run_action_on_units.assert_called_once_with(
['cinder/1', 'glance/2'],
'openstack-upgrade',
model_name=None,
raise_on_failure=True)
def test_action_upgrade_group(self):
self.patch_object(openstack_upgrade, "pause_units")
self.patch_object(openstack_upgrade, "action_unit_upgrade")
self.patch_object(openstack_upgrade, "resume_units")
mock_nova_compute_0 = mock.MagicMock()
mock_nova_compute_0.entity_id = 'nova-compute/0'
mock_cinder_1 = mock.MagicMock()
mock_cinder_1.entity_id = 'cinder/1'
units = {
'nova-compute': [mock_nova_compute_0],
'cinder': [mock_cinder_1]}
self.get_units.side_effect = lambda app, model_name: units[app]
openstack_upgrade.action_upgrade_group(['nova-compute', 'cinder'])
pause_calls = [
mock.call(['cinder-hacluster/0'], model_name=None),
mock.call(['nova-compute/0', 'cinder/1'], model_name=None)]
self.pause_units.assert_has_calls(pause_calls, any_order=False)
action_unit_upgrade_calls = [
mock.call(['nova-compute/0', 'cinder/1'], model_name=None)]
self.action_unit_upgrade.assert_has_calls(
action_unit_upgrade_calls,
any_order=False)
resume_calls = [
mock.call(['nova-compute/0', 'cinder/1'], model_name=None),
mock.call(['cinder-hacluster/0'], model_name=None)]
self.resume_units.assert_has_calls(resume_calls, any_order=False)
def test_set_upgrade_application_config(self):
openstack_upgrade.set_upgrade_application_config(
['neutron-api', 'cinder'],
'new-src')
set_app_calls = [
mock.call(
'neutron-api',
{
'openstack-origin': 'new-src',
'action-managed-upgrade': 'True'},
model_name=None),
mock.call(
'cinder',
{
'openstack-origin': 'new-src',
'action-managed-upgrade': 'True'},
model_name=None)]
self.set_application_config.assert_has_calls(set_app_calls)
self.set_application_config.reset_mock()
openstack_upgrade.set_upgrade_application_config(
['percona-cluster'],
'new-src',
action_managed=False)
self.set_application_config.assert_called_once_with(
'percona-cluster',
{'source': 'new-src'},
model_name=None)
def test__extract_charm_name_from_url(self):
self.assertEqual(
openstack_upgrade._extract_charm_name_from_url(
'local:bionic/heat-12'),
'heat')
self.assertEqual(
openstack_upgrade._extract_charm_name_from_url(
'cs:bionic/heat-12'),
'heat')
self.assertEqual(
openstack_upgrade._extract_charm_name_from_url('cs:heat'),
'heat')
def test_get_upgrade_candidates(self):
expect = copy.deepcopy(self.juju_status.applications)
del expect['mydb'] # Filter as it is on UPGRADE_EXCLUDE_LIST
del expect['ntp'] # Filter as it has no source option
del expect['neutron-openvswitch'] # Filter as it is a subordinates
self.assertEqual(
openstack_upgrade.get_upgrade_candidates(),
expect)
def test_get_upgrade_groups(self):
self.assertEqual(
openstack_upgrade.get_upgrade_groups(),
{
'Compute': ['nova-compute'],
'Control Plane': ['cinder'],
'Core Identity': [],
'Storage': [],
'sweep_up': []})
def test_is_action_upgradable(self):
self.assertTrue(
openstack_upgrade.is_action_upgradable('cinder'))
self.assertFalse(
openstack_upgrade.is_action_upgradable('percona-cluster'))
def test_run_action_upgrade(self):
self.patch_object(openstack_upgrade, "set_upgrade_application_config")
self.patch_object(openstack_upgrade, "action_upgrade_group")
openstack_upgrade.run_action_upgrade(
['cinder', 'neutron-api'],
'new-src')
self.set_upgrade_application_config.assert_called_once_with(
['cinder', 'neutron-api'],
'new-src',
model_name=None)
self.action_upgrade_group.assert_called_once_with(
['cinder', 'neutron-api'],
model_name=None)
def test_run_all_in_one_upgrade(self):
self.patch_object(openstack_upgrade, "set_upgrade_application_config")
self.patch_object(
openstack_upgrade.zaza.model,
'block_until_all_units_idle')
openstack_upgrade.run_all_in_one_upgrade(
['percona-cluster'],
'new-src')
self.set_upgrade_application_config.assert_called_once_with(
['percona-cluster'],
'new-src',
action_managed=False,
model_name=None)
self.block_until_all_units_idle.assert_called_once_with()
def test_run_upgrade(self):
self.patch_object(openstack_upgrade, "run_all_in_one_upgrade")
self.patch_object(openstack_upgrade, "run_action_upgrade")
openstack_upgrade.run_upgrade(
['cinder', 'neutron-api', 'ceph-mon'],
'new-src')
self.run_all_in_one_upgrade.assert_called_once_with(
['ceph-mon'],
'new-src',
model_name=None)
self.run_action_upgrade.assert_called_once_with(
['cinder', 'neutron-api'],
'new-src',
model_name=None)
def test_run_upgrade_tests(self):
self.patch_object(openstack_upgrade, "run_upgrade")
self.patch_object(openstack_upgrade, "get_upgrade_groups")
self.get_upgrade_groups.return_value = {
'Compute': ['nova-compute'],
'Control Plane': ['cinder', 'neutron-api'],
'Core Identity': ['keystone'],
'Storage': ['ceph-mon'],
'sweep_up': ['designate']}
openstack_upgrade.run_upgrade_tests('new-src', model_name=None)
run_upgrade_calls = [
mock.call(['keystone'], 'new-src', model_name=None),
mock.call(['ceph-mon'], 'new-src', model_name=None),
mock.call(['cinder', 'neutron-api'], 'new-src', model_name=None),
mock.call(['nova-compute'], 'new-src', model_name=None),
mock.call(['designate'], 'new-src', model_name=None)]
self.run_upgrade.assert_has_calls(run_upgrade_calls, any_order=False)
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
# from .models import User
class SignUpForm(UserCreationForm):
username = forms.CharField(max_length=30, required=True)
first_name = forms.CharField(max_length=30, required=True)
last_name = forms.CharField(max_length=30, required=True)
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'password1', 'password2',)
|
# defusedxml
#
# Copyright (c) 2013 by Christian Heimes <christian@python.org>
# Licensed to PSF under a Contributor Agreement.
# See https://www.python.org/psf/license for licensing details.
"""Defuse XML bomb denial of service vulnerabilities
"""
from __future__ import print_function, absolute_import
from .common import (
DefusedXmlException,
DTDForbidden,
EntitiesForbidden,
ExternalReferenceForbidden,
NotSupportedError,
_apply_defusing,
)
def defuse_stdlib():
"""Monkey patch and defuse all stdlib packages
:warning: The monkey patch is an EXPERIMETNAL feature.
"""
defused = {}
from . import cElementTree
from . import ElementTree
from . import minidom
from . import pulldom
from . import sax
from . import expatbuilder
from . import expatreader
from . import xmlrpc
xmlrpc.monkey_patch()
defused[xmlrpc] = None
for defused_mod in [
cElementTree,
ElementTree,
minidom,
pulldom,
sax,
expatbuilder,
expatreader,
]:
stdlib_mod = _apply_defusing(defused_mod)
defused[defused_mod] = stdlib_mod
return defused
__version__ = "0.6.0rc1"
__all__ = [
"DefusedXmlException",
"DTDForbidden",
"EntitiesForbidden",
"ExternalReferenceForbidden",
"NotSupportedError",
]
|
import os
import errno
import traceback
from six.moves import tkinter_messagebox as messagebox
from six import print_
class BrocoliError(Exception):
def __init__(self, exception):
self.exception = exception
def __str__(self):
return type(self.exception).__name__ + ': ' + str(self.exception)
class ConnectionError(BrocoliError):
pass
class NetworkError(BrocoliError):
pass
class FileNotFoundError(BrocoliError):
pass
class CatalogLogicError(BrocoliError):
pass
class ChecksumError(Exception):
pass
def ioerror(no):
return IOError(no, os.strerror(no))
def handle_catalog_exceptions(method):
"""
Method decorator that presents Brocoli exceptions to the user with messages
"""
def method_wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except ConnectionError as e:
messagebox.showerror('Catalog Connection Error',
('Connection failed: ' +
'{}').format(str(e)))
except FileNotFoundError as e:
messagebox.showerror('File Not Found',
('Catalog file was not found: ' +
'{}').format(str(e)))
except CatalogLogicError as e:
messagebox.showerror('Catalog Logic Error',
('Catalog logic error occurred: ' +
'{}').format(str(e)))
except ChecksumError as e:
messagebox.showerror('Checksum Error',
('Checksum error occurred: ' +
'{}').format(str(e)))
except Exception as e:
messagebox.showerror('Unknown Error',
('Some unknown exception occurred: ' +
'{}').format(str(e)))
print_(traceback.format_exc())
return method_wrapper
|
# The MIT License
#
# Copyright (c) 2017 Tarlan Payments.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class AuthorizationBuilder(object):
def __init__(self, __client_auth_data_set, __client_mandatory_fields):
from gateway.data_sets.request_parameters import (
RequestParameters,
RequestParametersTypes
)
self.__data_sets = RequestParameters
self.__data_types = RequestParametersTypes
self.__auth_mandatory_fields = __client_mandatory_fields
self.__auth_data_set = __client_auth_data_set
def add_account_guid(self, guid=None):
"""
Tarlan Payments Merchant Account GUID.
Args:
guid (str): Tarlan Payments Merchant Account GUID.
"""
self.__auth_mandatory_fields[self.__data_sets.AUTH_DATA_ACCOUNT_GUID] = self.__data_types.AUTH_DATA_ACCOUNT_GUID
self.__auth_data_set[self.__data_sets.AUTH_DATA_ACCOUNT_GUID] = guid
def add_secret_key(self, value=None):
"""
Tarlan Payments Merchant Password
Args:
value (str): Tarlan Payments Merchant Password
"""
self.__auth_mandatory_fields[self.__data_sets.AUTH_DATA_SECRET_KEY] = self.__data_types.AUTH_DATA_SECRET_KEY
self.__auth_data_set[self.__data_sets.AUTH_DATA_SECRET_KEY] = value
def add_session_id(self, id_value=None):
"""
Tarlan Payments Gateway Session ID
Args:
id_value (str): Tarlan Payments Gateway Session ID
"""
self.__auth_data_set[self.__data_sets.AUTH_DATA_SECRET_KEY] = id_value
|
from baidu_spider import spider_main
rooturl = 'https://baike.baidu.com/item/Python/407313'
obj_spider = spider_main.SpiderMain()
obj_spider.craw(rooturl)
|
import unittest
import os
from robot.running import userkeyword
from robot.running.model import ResourceFile, UserKeyword
from robot.running.userkeyword import UserLibrary
from robot.errors import DataError
from robot.utils.asserts import (assert_equal, assert_none,
assert_raises_with_msg, assert_true)
class UserHandlerStub:
def __init__(self, kwdata, library):
self.name = kwdata.name
self.libname = library
if kwdata.name == 'FAIL':
raise Exception('Expected failure')
def create(self, name):
return self
class EmbeddedArgsHandlerStub:
def __init__(self, kwdata, library, embedded):
self.name = kwdata.name
if kwdata.name != 'Embedded ${arg}':
raise TypeError
def matches(self, name):
return name == self.name
class TestUserLibrary(unittest.TestCase):
def setUp(self):
self._orig_user_handler = userkeyword.UserKeywordHandler
self._orig_embedded_handler = userkeyword.EmbeddedArgumentsHandler
userkeyword.UserKeywordHandler = UserHandlerStub
userkeyword.EmbeddedArgumentsHandler = EmbeddedArgsHandlerStub
def tearDown(self):
userkeyword.UserKeywordHandler = self._orig_user_handler
userkeyword.EmbeddedArgumentsHandler = self._orig_embedded_handler
def test_name_from_resource(self):
for source, exp in [('resources.html', 'resources'),
(os.path.join('..','res','My Res.HTM'), 'My Res'),
(os.path.abspath('my_res.xhtml'), 'my_res')]:
lib = self._get_userlibrary(source=source)
assert_equal(lib.name, exp)
def test_name_from_test_case_file(self):
assert_none(self._get_userlibrary().name)
def test_creating_keyword(self):
lib = self._get_userlibrary('kw 1', 'kw 2')
assert_equal(len(lib.handlers), 2)
assert_true('kw 1' in lib.handlers)
assert_true('kw 2' in lib.handlers)
def test_creating_keyword_when_kw_name_has_embedded_arg(self):
lib = self._get_userlibrary('Embedded ${arg}')
self._lib_has_embedded_arg_keyword(lib)
def test_creating_keywords_when_normal_and_embedded_arg_kws(self):
lib = self._get_userlibrary('kw1', 'Embedded ${arg}', 'kw2')
assert_equal(len(lib.handlers), 3)
assert_true('kw1' in lib.handlers)
assert_true('kw 2' in lib.handlers)
self._lib_has_embedded_arg_keyword(lib)
def test_creating_duplicate_embedded_arg_keyword_in_resource_file(self):
lib = self._get_userlibrary('Embedded ${arg}', 'kw', 'Embedded ${arg}')
assert_equal(len(lib.handlers), 3)
assert_true(not hasattr(lib.handlers['kw'], 'error'))
self._lib_has_embedded_arg_keyword(lib, count=2)
def test_creating_duplicate_keyword_in_resource_file(self):
lib = self._get_userlibrary('kw', 'kw', 'kw 2')
assert_equal(len(lib.handlers), 2)
assert_true('kw' in lib.handlers)
assert_true('kw 2' in lib.handlers)
assert_equal(lib.handlers['kw'].error,
"Keyword with same name defined multiple times.")
def test_creating_duplicate_keyword_in_test_case_file(self):
lib = self._get_userlibrary('MYKW', 'my kw')
assert_equal(len(lib.handlers), 1)
assert_true('mykw' in lib.handlers)
assert_equal(lib.handlers['mykw'].error,
"Keyword with same name defined multiple times.")
def test_handlers_contains(self):
lib = self._get_userlibrary('kw')
assert_true('kw' in lib.handlers)
assert_true('nonex' not in lib.handlers)
def test_handlers_getitem_with_non_existing_keyword(self):
lib = self._get_userlibrary('kw')
assert_raises_with_msg(
DataError,
"Test case file contains no keywords matching name 'non existing'.",
lib.handlers.__getitem__, 'non existing')
def test_handlers_getitem_with_existing_keyword(self):
lib = self._get_userlibrary('kw')
handler = lib.handlers['kw']
assert_true(isinstance(handler, UserHandlerStub))
def _get_userlibrary(self, *keywords, **conf):
resource = ResourceFile(**conf)
resource.keywords = [UserKeyword(name) for name in keywords]
resource_type = UserLibrary.TEST_CASE_FILE_TYPE \
if 'source' not in conf else UserLibrary.RESOURCE_FILE_TYPE
return UserLibrary(resource, resource_type)
def _lib_has_embedded_arg_keyword(self, lib, count=1):
assert_true('Embedded ${arg}' in lib.handlers)
embedded = lib.handlers._embedded
assert_equal(len(embedded), count)
for template in embedded:
assert_equal(template.name, 'Embedded ${arg}')
if __name__ == '__main__':
unittest.main()
|
"""
python-socketio.py
Sample Mcity OCTANE python socketio script
"""
import os
from dotenv import load_dotenv
import socketio
#Load environment variables
load_dotenv()
api_key = os.environ.get('MCITY_OCTANE_KEY', None)
server = os.environ.get('MCITY_OCTANE_SERVER', 'http://localhost:5000')
namespace = "/octane"
#If no API Key provided, exit.
if not api_key:
print ("No API KEY SPECIFIED. EXITING")
exit()
#Create an SocketIO Python client.
sio = socketio.Client()
# Async client is available also: sio = socketio.AsyncClient()
def send_auth():
"""
Emit an authentication event.
"""
sio.emit('auth', {'x-api-key': api_key}, namespace=namespace)
#Define event callbacks
@sio.on('connect', namespace=namespace)
def on_connect():
"""
Handle connection event and send authentication key
"""
send_auth()
@sio.on('join', namespace=namespace)
def on_join(data):
"""
Event fired when user joins a channel
"""
print('Join received with ', data)
@sio.on('channels', namespace=namespace)
def on_channels(data):
"""
Event fired when a user requests current channel information.
"""
print('Channel information', data)
@sio.on('disconnect', namespace=namespace)
def on_disconnect():
"""
Event fired on disconnect.
"""
print('disconnected from server')
#Make connection.
sio.connect(server, namespaces=[namespace])
sio.wait()
|
dataset_type = 'IcdarDataset'
data_root = 'data/icdar2015'
train = dict(
type=dataset_type,
ann_file=f'{data_root}/instances_training.json',
img_prefix=f'{data_root}/imgs',
pipeline=None)
test = dict(
type=dataset_type,
ann_file=f'{data_root}/instances_test.json',
img_prefix=f'{data_root}/imgs',
pipeline=None)
train_list = [train]
test_list = [test]
|
import gym
import pybullet_envs
from PIL import Image
import argparse
import numpy as np
import torch
import copy
import os
from sklearn.preprocessing import normalize as Normalize
from models import TD3, TD3_adv2
def parse_arguments():
parser = argparse.ArgumentParser("TESTING")
parser.add_argument('-p', "--policy", type=str, default='td3', help="td3/adv")
parser.add_argument('-e', "--env", type=str, default="LunarLanderContinuous-v2", help="env name")
parser.add_argument('-n', "--n-episodes", type=int, default=10, help="number of episodes")
parser.add_argument("--mode", type=str, default='nr', help="nr (default) / pr")
parser.add_argument("--train-seed", type=int, default=1, help="random seed for training")
parser.add_argument("--test-seed", type=int, default=1, help="random seed for testing")
parser.add_argument("--nr-delta", type=float, default=0.0, help="delta for NR-MDP")
parser.add_argument("--pr-prob", type=float, default=0.0, help="prob of PR-MDP")
parser.add_argument("--render", action="store_true", default=False)
return parser.parse_args()
def get_policy(arglist, kwargs, max_action):
# Initialize policy
if arglist.policy == "td3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = 0.0
kwargs["noise_clip"] = 0.0
kwargs["policy_freq"] = 2
policy = TD3.TD3(**kwargs)
elif arglist.policy == "OurDDPG":
policy = OurDDPG.DDPG(**kwargs)
elif arglist.policy == "DDPG":
policy = DDPG.DDPG(**kwargs)
elif arglist.policy == 'adv':
kwargs['alpha'] = 0.01
kwargs['adv_epsilon'] = 0.01
kwargs['logdir'] = f'./tensorboard/{arglist.policy}_{arglist.env}_{arglist.train_seed}/'
policy = TD3_adv2.TD3(**kwargs)
else:
raise NotImplementedError
return policy
def test(arglist):
env_name = arglist.env
random_seed = arglist.test_seed
n_episodes = arglist.n_episodes
lr = 0.002
max_timesteps = 3000
render = arglist.render
filename = "{}_{}_{}".format(arglist.policy, env_name, arglist.train_seed)
directory = "./train/{}".format(env_name)
env = gym.make(env_name)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
# Set random seed
env.seed(random_seed)
torch.manual_seed(random_seed)
np.random.seed(random_seed)
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": 0.99,
"tau": 0.005,
"policy_noise": 0.001,
"noise_clip": 1.0,
"policy_freq": 2
}
policy = get_policy(arglist, kwargs, max_action)
policy.load(os.path.join(directory, filename))
total_reward_list = []
for ep in range(1, n_episodes+1):
ep_reward = 0.0
state = env.reset()
for t in range(max_timesteps):
action = policy.select_action(state)
if arglist.mode == 'nr':
# use truncated gaussian noise for both nr-mdp and pr-mdp settings
noise = np.random.normal(0.0, max_action, size=action.shape)
noise = np.clip(noise, -max_action, max_action)
adv_action = (1.0 - arglist.nr_delta) * action + arglist.nr_delta * noise
elif arglist.mode == 'pr':
adv_action = action
if np.random.rand() < arglist.pr_prob:
adv_action = np.random.normal(0.0, action_dim, size=action.shape)
adv_action = np.clip(adv_action, -max_action, max_action)
else:
raise NotImplementedError('invalid mode')
state, reward, done, _ = env.step(adv_action)
ep_reward += reward
if render:
env.render()
if done:
break
print('Episode: {}\tReward: {}'.format(ep, int(ep_reward)))
total_reward_list.append(ep_reward)
ep_reward = 0.0
env.close()
return total_reward_list
if __name__ == '__main__':
args = parse_arguments()
reward_list = test(args)
reward_array = np.array(reward_list, dtype=np.float32)
reward_mean = reward_array.mean()
reward_half_std = reward_array.std() / 2.0
loginfo = 'policy={} env={} load_seed={} random_seed={} mode={} pr-prob={} nr-delta={} result={}±{}'
print(loginfo.format(args.policy, args.env, args.train_seed, args.test_seed, args.mode, args.pr_prob, args.nr_delta, reward_mean, reward_half_std))
|
"""Load dependencies needed to compile p4c as a 3rd-party consumer."""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def p4c_deps():
"""Loads dependencies need to compile p4c."""
# Third party projects can define the target
# @com_github_p4lang_p4c_extension:ir_extensions with a `filegroup`
# containing their custom .def files.
if not native.existing_rule("com_github_p4lang_p4c_extension"):
# By default, no IR extensions.
native.new_local_repository(
name = "com_github_p4lang_p4c_extension",
path = ".",
build_file_content = """
filegroup(
name = "ir_extensions",
srcs = [],
visibility = ["//visibility:public"],
)
""",
)
if not native.existing_rule("com_github_nelhage_rules_boost"):
git_repository(
name = "com_github_nelhage_rules_boost",
# Newest commit on main branch as of May 3, 2021.
commit = "2598b37ce68226fab465c0f0e10988af872b6dc9",
remote = "https://github.com/nelhage/rules_boost",
shallow_since = "1611019749 -0800",
)
if not native.existing_rule("com_github_p4lang_p4runtime"):
# Cannot currently use local_repository due to Bazel limitation,
# see https://github.com/bazelbuild/bazel/issues/11573.
#
# native.local_repository(
# name = "com_github_p4lang_p4runtime",
# path = "@com_github_p4lang_p4c//:control-plane/p4runtime/proto",
# )
#
# We use git_repository as a workaround; the version used here should
# ideally be kept in sync with the submodule control-plane/p4runtime.
git_repository(
name = "com_github_p4lang_p4runtime",
remote = "https://github.com/p4lang/p4runtime",
# Newest commit on main branch as of Jan 22, 2021.
commit = "0d40261b67283999bf0f03bd6b40b5374c7aebd0",
shallow_since = "1611340571 -0800",
# strip_prefix is broken; we use patch_cmds as a workaround,
# see https://github.com/bazelbuild/bazel/issues/10062.
# strip_prefix = "proto",
patch_cmds = ["mv proto/* ."],
)
if not native.existing_rule("com_google_googletest"):
# Cannot currently use local_repository due to Bazel limitation,
# see https://github.com/bazelbuild/bazel/issues/11573.
#
# local_repository(
# name = "com_google_googletest",
# path = "@com_github_p4lang_p4c//:test/frameworks/gtest",
# )
#
# We use http_archive as a workaround; the version used here should
# ideally be kept in sync with the submodule test/frameworks/gtest.
http_archive(
name = "com_google_googletest",
urls = ["https://github.com/google/googletest/archive/release-1.10.0.tar.gz"],
strip_prefix = "googletest-release-1.10.0",
sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb",
)
if not native.existing_rule("com_google_protobuf"):
http_archive(
name = "com_google_protobuf",
url = "https://github.com/protocolbuffers/protobuf/releases/download/v3.13.0/protobuf-all-3.13.0.tar.gz",
strip_prefix = "protobuf-3.13.0",
sha256 = "465fd9367992a9b9c4fba34a549773735da200903678b81b25f367982e8df376",
)
# Dependencies used by the tc backend
if not native.existing_rule("com_google_absl"):
http_archive(
name = "com_google_absl",
# The most recent commit as of 2021-09-02
urls = ["https://github.com/abseil/abseil-cpp/archive/4bb9e39c88854dbf466688177257d11810719853.zip"],
strip_prefix = "abseil-cpp-4bb9e39c88854dbf466688177257d11810719853",
sha256 = "4cad653c8d6a2c0a551bae3114e2208bf80b0e7d54a4f094f3f5e967c1dab45b",
)
if not native.existing_rule("com_github_jbeder_yaml_cpp"):
http_archive(
name = "com_github_jbeder_yaml_cpp",
urls = ["https://github.com/jbeder/yaml-cpp/archive/refs/tags/yaml-cpp-0.7.0.zip"],
strip_prefix = "yaml-cpp-yaml-cpp-0.7.0",
sha256 = "4d5e664a7fb2d7445fc548cc8c0e1aa7b1a496540eb382d137e2cc263e6d3ef5",
)
|
import pandas as pd
import plotly.express as px
url = 'https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-voc.csv'
prov_dict = {
"AB" : "Alberta",
"BC" : "British Columbia",
"CA" : "Canada",
"MB" : "Manitoba",
"NB" : "New Brunswick",
"NL" : "Newfoundland and Labrador",
"NS" : "Nova Scotia",
"NT" : "Northwest Territories",
"NU" : "Nunavut",
"ON" : "Ontario",
"PE" : "Prince Edward Island",
"QC" : "Quebec",
"SK" : "Saskatchewan",
"YK" : "Yukon",
"YT" : "Yukon"
}
colours = ["#012169", "#E03C31", "green", "lightgray"]
def get_province(prov):
try:
return prov_dict[prov]
except:
return prov
def get_area(prov):
if prov == 'YK':
return 'YT'
else:
return prov
df = pd.read_csv(url).fillna(0)
dfclean = df[ (df["report_date"] > "2021") & (df["report_date"] < "2023") & (df["b117"] >= 0) & (df["b1351"] >= 0) & (df["p1"] >= 0) ]
dfclean["Province"] = dfclean.apply(lambda r: get_province(r["prov"]), axis=1)
dfclean["Area"] = dfclean.apply(lambda r: get_area(r["prov"]), axis=1)
dfAlpha = dfclean.copy()
dfAlpha["Variant"] = "B.1.1.7 (Alpha)"
dfAlpha["Count"] = dfAlpha["b117"]
dfBeta = dfclean.copy()
dfBeta["Variant"] = "B.1.351 (Beta)"
dfBeta["Count"] = dfBeta["b1351"]
dfGamma = dfclean.copy()
dfGamma["Variant"] = "P.1 (Gamma)"
dfGamma["Count"] = dfGamma["p1"]
dfvoc = dfAlpha.append(dfBeta).append(dfGamma)
dfvocmax = dfvoc.groupby(["Province", "Variant"]).max().reset_index() \
[["Province", "Variant", "Count"]] \
.rename(columns={"Count" : "MaxVocCount"})
dfvoc = pd.merge(dfvoc, dfvocmax, how="left", left_on=["Province", "Variant"], right_on=["Province", "Variant"])
dfvoc = dfvoc.sort_values(by=["Variant", "MaxVocCount", "Province", "report_date"], ascending=[True, False, True, True])
dfvoc["New"] = dfvoc.groupby(["Province", "Variant"])["Count"].diff()
dfprov = dfvoc[dfvoc["Province"] != "Canada"]
figlineprov = px.line(dfprov,
x="report_date", y="Count", color="Variant", facet_col="Province", facet_col_wrap=1,
labels={"report_date" : "Reported date", "Count" : "Cumulative cases", "Province" : "Province/Territory"},
title="Cumulative cases with a variant of concern<br>by reported date by province/territory by variant",
height=5000, template="plotly_white", color_discrete_sequence=colours, facet_row_spacing=0.025
)
figbarprovd = px.bar(dfprov, x="report_date", y="New", color="Variant", facet_col="Province", facet_col_wrap=1,
labels={"report_date" : "Reported date", "New" : "New cases", "Province" : "Province/Territory", "Variant" : "Variant of concern"},
hover_name="Variant",
title="New cases with a variant of concern by reported date<br>by province/territory",
height=5000, template="plotly_white", color_discrete_sequence=colours, facet_row_spacing=0.025
)
dfcan = dfvoc[dfvoc["Province"] == "Canada"]
figlinecan_c = px.line(dfcan,
x="report_date", y="Count", color="Variant",
labels={"report_date" : "Reported date", "Count" : "Cumulative cases"},
title="Cumulative cases in Canada with a variant of concern<br>by reported date by variant",
template="plotly_white", color_discrete_sequence=colours
)
figbarcan_d = px.bar(dfcan, x="report_date", y="New", color="Variant",
labels={"report_date" : "Reported date", "New" : "New cases", "Variant" : "Variant of concern"},
hover_name="Variant",
title="New cases in Canada with a variant of concern by reported date",
template="plotly_white", color_discrete_sequence=colours
)
# Accessibility
date_name = "Date"
def join(df, area, variant):
dfarea = dfclean[dfclean["Area"] == area][["report_date", variant]].rename(columns={"report_date" : date_name, variant : area})
return pd.merge(df, dfarea, how="left", left_on=[date_name], right_on=[date_name])
def create_table(variant):
date_max = dfclean.max()["report_date"]
df_max = dfclean[(dfclean["Area"]!="CA") & (dfclean["report_date"] == date_max)][["Area", variant]].sort_values(by=[variant, "Area"], ascending=[False, True])
areas = df_max["Area"].tolist()
df_variant = pd.DataFrame()
df_variant[date_name] = dfclean[dfclean["Area"]=="CA"]["report_date"]
for area in areas:
df_variant = join(df_variant, area, variant)
df_variant = join(df_variant, "CA", variant)
return df_variant.set_index(date_name).sort_values(by=[date_name], ascending=[False]).round().astype(int)
df_Alpha = create_table("b117")
df_Beta = create_table("b1351")
df_Gamma = create_table("p1")
|
# Random Point in Non-overlapping Rectangles
'''
Given a list of non-overlapping axis-aligned rectangles rects, write a function pick which randomly and uniformily picks an integer point in the space covered by the rectangles.
Note:
An integer point is a point that has integer coordinates.
A point on the perimeter of a rectangle is included in the space covered by the rectangles.
ith rectangle = rects[i] = [x1,y1,x2,y2], where [x1, y1] are the integer coordinates of the bottom-left corner, and [x2, y2] are the integer coordinates of the top-right corner.
length and width of each rectangle does not exceed 2000.
1 <= rects.length <= 100
pick return a point as an array of integer coordinates [p_x, p_y]
pick is called at most 10000 times.
Example 1:
Input:
["Solution","pick","pick","pick"]
[[[[1,1,5,5]]],[],[],[]]
Output:
[null,[4,1],[4,1],[3,3]]
Example 2:
Input:
["Solution","pick","pick","pick","pick","pick"]
[[[[-2,-2,-1,-1],[1,0,3,0]]],[],[],[],[],[]]
Output:
[null,[-1,-2],[2,0],[-2,-1],[3,0],[-2,-2]]
Explanation of Input Syntax:
The input is two lists: the subroutines called and their arguments. Solution's constructor has one argument, the array of rectangles rects.
pick has no arguments. Arguments are always wrapped with a list, even if there aren't any.
'''
import random
class Solution:
def __init__(self, rects: List[List[int]]):
self.rects = rects
self.weights = []
s = 0
for x1, y1, x2, y2 in rects:
w = (x2-x1+1)*(y2-y1+1)
self.weights.append(w)
s+=w
self.weights = [x/s for x in self.weights]
print(self.weights)
def pick(self) -> List[int]:
rectangle = random.choices(population = self.rects, weights = self.weights, k=1)[0]
x1, y1, x2, y2 = rectangle
return [random.randint(x1,x2), random.randint(y1,y2)]
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
out1 = np.maximum(0, X.dot(W1) + b1) # relu, (N, H)
scores = out1.dot(W2) + b2 # (N, C)
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
correct_class_score = scores[np.arange(N), y].reshape(N, 1)
exp_sum = np.sum(np.exp(scores), axis=1).reshape(N, 1)
loss = np.sum(np.log(exp_sum) - correct_class_score)
loss /= N
loss += 0.5 * reg * np.sum(W1 * W1)+ 0.5 * reg * np.sum(W2 * W2)
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
margin = np.exp(scores) / exp_sum
margin[np.arange(N), y] += -1
margin /= N #(N, C)
dW2 = out1.T.dot(margin) #(H ,C)
dW2 += reg * W2
grads['W2'] = dW2
grads['b2'] = np.sum(margin, axis = 0)
margin1 = margin.dot(W2.T) #(N, H)
margin1[out1 <= 0] = 0
dW1 = X.T.dot(margin1) #(D, H)
dW1 += reg * W1
grads['W1'] = dW1
grads['b1'] = np.sum(margin1, axis = 0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
mask = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[mask]
y_batch = y[mask]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
self.params['W1'] -= learning_rate * grads['W1']
self.params['W2'] -= learning_rate * grads['W2']
self.params['b1'] -= learning_rate * grads['b1']
self.params['b2'] -= learning_rate * grads['b2']
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
out1 = np.maximum(0, X.dot(self.params['W1']) + self.params['b1']) # relu, (N, H)
y_pred = np.argmax(out1.dot(self.params['W2']) + self.params['b2'],axis = 1) # (N, C)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
|
# coding: utf-8
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mainconfig.settings')
application = get_wsgi_application()
|
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.name, self.header,self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
|
"""
@param: n -> int : Upper Limit of the range
"""
def multiples(n: int) -> int:
num: list = []
for i in range(1, n):
if (i % 3 == 0) or (i % 5 == 0):
num.append(i)
return sum(num)
if __name__ == '__main__':
t: int = int(input())
for _x in range(t):
n: int = int(input())
print(multiples(n))
|
class Solution:
r"""
函数注解
>>> def add(x: int, y: int) -> int:
... return a + b
>>> add.__annotations__
{'x': <class 'int'>, 'y': <class 'int'>, 'return': <class 'int'>}
"""
def __init__(self):
pass
def solve(self):
pass
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# module for the <archdesc/> or collection-level description
import xml.etree.cElementTree as ET
from archdescsimple import archdescsimple
from access_use_restrict import access_use_restrict
import globals
import wx
def archdesc(arch_root, CSheet, version, input_data):
from wx.lib.pubsub import pub
#update GUI progress bar
if "ask_gui" in globals.new_elements:
wx.CallAfter(pub.sendMessage, "update", msg="Writing <archdesc>...")
#collection-level did
if "ask_gui" in globals.new_elements:
wx.CallAfter(pub.sendMessage, "update", msg="Writing collection-level <did>...")
from collection_did import collection_did
cdid_root = arch_root.find('did')
collection_did(cdid_root, CSheet, version)
if "ask_gui" in globals.new_elements:
wx.CallAfter(pub.sendMessage, "update", msg="Writing <archdesc> elements...")
#Access Restrictions Section
if "add_accessrestrict" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
access_use_restrict(arch_root, CSheet.find('Access'), "accessrestrict", "Access", add)
#Accruals Section
if "add_accruals" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "accruals", CSheet.find('Accruals'), CSheet.find('Accruals/Accrual'), add)
#Acquisitions Information Section
if CSheet.find('AcquisitionInfo/Acquis/Event') is None:
pass
else:
if CSheet.find('AcquisitionInfo/Acquis/Event').text:
if arch_root.find('acqinfo') is None:
if "add_acq" in globals.new_elements or "add-all" in globals.add_all:
acq_element = ET.Element('acqinfo')
arch_root.insert(1, acq_element)
count = 0
for acquis in CSheet.find('AcquisitionInfo'):
if acquis.find('Event').text and acquis.find('Date').text:
count = count + 1
if count > 1:
chronlist_element = ET.Element('chronlist')
arch_root.find('acqinfo').append(chronlist_element)
for acquis in CSheet.find('AcquisitionInfo'):
if acquis.find('Event').text:
chronitem_element = ET.Element('chronitem')
chronlist_element.append(chronitem_element)
event_element = ET.Element('event')
chronitem_element.append(event_element)
event_element.text = acquis.find('Event').text
if version == "ead3":
if acquis.find('Date').text:
from date import basic_date
chronitem_element.append(basic_date(acquis.find('Date').text, acquis.find('DateNormal').text, 'inclusive'))
else:
date_element = ET.Element('date')
if acquis.find('Date').text:
chronitem_element.append(date_element)
date_element.text = acquis.find('Date').text
if acquis.find('DateNormal').text:
date_element.set('normal', acquis.find('DateNormal').text)
else:
date_element.set('normal', acquis.find('Date').text)
else:
for acquis in CSheet.find('AcquisitionInfo'):
if acquis.find('Event').text:
p_element = ET.Element('p')
arch_root.find('acqinfo').append(p_element)
p_element.text = acquis.find('Event').text
date_element = ET.Element('date')
if acquis.find('Date').text:
p_element.append(date_element)
date_element.text = acquis.find('Date').text
if acquis.find('DateNormal').text:
date_element.set('normal', acquis.find('DateNormal').text)
else:
date_element.set('normal', acquis.find('Date').text)
else:
old_acquis = arch_root.find('acqinfo').attrib
old_head = arch_root.find('acqinfo/head')
arch_root.find('acqinfo').clear()
if old_acquis is None:
pass
else:
arch_root.find('acqinfo').attrib = old_acquis
if old_head is None:
pass
else:
arch_root.find('acqinfo').append(old_head)
count = 0
for acquis in CSheet.find('AcquisitionInfo'):
if acquis.find('Event').text and acquis.find('Date').text:
count = count + 1
if count > 1:
chronlist_element = ET.Element('chronlist')
arch_root.find('acqinfo').append(chronlist_element)
for acquis in CSheet.find('AcquisitionInfo'):
if acquis.find('Event').text:
chronitem_element = ET.Element('chronitem')
chronlist_element.append(chronitem_element)
event_element = ET.Element('event')
chronitem_element.append(event_element)
event_element.text = acquis.find('Event').text
if version == "ead3":
if acquis.find('Date').text:
from date import basic_date
chronitem_element.append(basic_date(acquis.find('Date').text, acquis.find('DateNormal').text, 'inclusive'))
else:
date_element = ET.Element('date')
if acquis.find('Date').text:
chronitem_element.append(date_element)
date_element.text = acquis.find('Date').text
if acquis.find('DateNormal').text:
date_element.set('normal', acquis.find('DateNormal').text)
else:
date_element.set('normal', acquis.find('Date').text)
else:
for acquis in CSheet.find('AcquisitionInfo'):
if acquis.find('Event').text:
p_element = ET.Element('p')
arch_root.find('acqinfo').append(p_element)
p_element.text = acquis.find('Event').text
date_element = ET.Element('date')
if acquis.find('Date').text:
p_element.append(date_element)
date_element.text = acquis.find('Date').text
if acquis.find('DateNormal').text:
date_element.set('normal', acquis.find('DateNormal').text)
else:
date_element.set('normal', acquis.find('Date').text)
else:
for empty_acquis in arch_root:
if empty_acquis.tag == "acqinfo":
arch_root.remove(empty_acquis)
# Alternate Forms Available Section <altformavail>
if "add_altforms" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "altformavail", CSheet.find('AlternateForms'), CSheet.find('AlternateForms/Alternative'), add)
# Appraisal Section <appraisal>
if "add_appraisal" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "appraisal", CSheet.find('AppraisalInfo'), CSheet.find('AppraisalInfo/Appraisal'), add)
# Arrangement Section <arrangement>
if "add_arrange" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
if arch_root.find('arrangement/list') is None:
arrange_list = False
else:
arrange_list = True
archdescsimple(arch_root, "arrangement", CSheet.find('CollectionArrangement'), CSheet.find('CollectionArrangement/Arrangement'), add)
if arrange_list == True:
if CSheet.find('CollectionMap/Component/ComponentName').text:
if CSheet.find('CollectionMap/Component/ComponentName').text.lower() == "no series" or CSheet.find('CollectionMap/Component/ComponentName').text.lower() == "noseries":
pass
else:
list_element = ET.Element('list')
if arch_root.find('arrangement') is None:
arrangement_element = ET.Element('arrangement')
arr_index = arch_root.getchildren().index(arch_root.find('dsc')) - 1
arch_root.insert(arr_index, arrangement_element)
arrangement_element.append(list_element)
else:
arch_root.find('arrangement').append(list_element)
list_element.set('type', 'simple')
for cmpnt in CSheet.find('CollectionMap'):
if cmpnt.find('ComponentName').text:
item_element = ET.Element('item')
list_element.append(item_element)
if cmpnt.find('ComponentLevel').text == "1":
emph_element = ET.Element('emph')
item_element.append(emph_element)
emph_element.set('render', 'bold')
if cmpnt.find('ComponentNumber').text:
emph_element.text = "Series " + cmpnt.find('ComponentNumber').text + " - " + cmpnt.find('ComponentName').text
else:
emph_element.text = "Series" + " - " + cmpnt.find('ComponentName').text
cmpnt_num = cmpnt.find('ComponentNumber').text
for ComponentSheet in input_data:
if ComponentSheet.find('SeriesNumber') is None:
pass
elif ComponentSheet.find('SeriesNumber').text == cmpnt_num:
cmpnt_info = ComponentSheet
if cmpnt_info.find('SeriesDate').text:
emph_element.tail = ", " + cmpnt_info.find('SeriesDate').text
else:
if cmpnt.find('ComponentNumber').text:
item_element.text = "Subseries " + cmpnt.find('ComponentNumber').text + ": " + cmpnt.find('ComponentName').text
else:
item_element.text = "Subseries" + ": " + cmpnt.find('ComponentName').text
cmpnt_num = cmpnt.find('ComponentNumber').text
for ComponentSheet in input_data:
if ComponentSheet.find('SeriesNumber') is None:
pass
elif ComponentSheet.find('SeriesNumber').text == cmpnt_num:
cmpnt_info = ComponentSheet
if cmpnt_info.find('SeriesDate').text:
emph_element.tail = ", " + cmpnt_info.find('SeriesDate').text
# Bibliography Section <bibliography>
if CSheet.find('PublicationBibliography/Publication/Title').text or CSheet.find('ManuscriptBibliography/Manuscript/UnitTitle').text:
if arch_root.find('bibliography') is None:
if "add_biblio" in globals.new_elements or "add-all" in globals.add_all:
biblio_element = ET.Element('bibliography')
biblio_index = arch_root.getchildren().index(arch_root.find('dsc'))
arch_root.insert(biblio_index, biblio_element)
if CSheet.find('BibliographyNote').text:
p_element = ET.Element('p')
biblio_element.append(p_element)
p_element.text = CSheet.find('BibliographyNote').text
for pub in CSheet.find('PublicationBibliography'):
if pub.find('Author').text or pub.find('Title').text or pub.find('Citation').text:
bibref_element = ET.Element('bibref')
biblio_element.append(bibref_element)
if pub.find('Author').text:
bibref_element.text = pub.find('Author').text + ", "
if pub.find('Title').text:
title_element = ET.Element('title')
bibref_element.append(title_element)
title_element.text = pub.find('Title').text
if pub.find('Citation').text:
title_element.tail = " " + pub.find('Citation').text + ", "
if pub.find('Date').text:
date_element = ET.Element('date')
if version == "ead3":
bibref_element.append(date_element)
date_element.text = pub.find('Date').text
else:
if pub.find('Title').text:
title_element.append(date_element)
date_element.text = pub.find('Date').text
else:
bibref_element.append(date_element)
date_element.text = pub.find('Date').text
if pub.find('NormalDate').text:
date_element.set("normal", pub.find('NormalDate').text)
if pub.find('Reference').text:
ref_element = ET.Element('ref')
bibref_element.append(ref_element)
ref_element.text = pub.find('Reference').text
if pub.find('ReferenceLink').text:
ref_element.set('href', pub.find('ReferenceLink').text)
for man in CSheet.find('ManuscriptBibliography'):
if man.find('Collection').text or man.find('UnitID').text or man.find('UnitID').text:
archref_element = ET.Element('archref')
arch_root.find('bibliography').append(archref_element)
if man.find('Collection').text:
archref_element.text = man.find('Collection').text + ", "
if man.find('UnitTitle').text:
title_element = ET.Element('title')
archref_element.append(title_element)
title_element.text = man.find('UnitTitle').text
if man.find('UnitID').text:
title_element.tail = " " + man.find('UnitID').text + ", "
if man.find('Date').text:
date_element = ET.Element('date')
if version == "ead3":
archref_element.append(date_element)
date_element.text = man.find('Date').text
else:
if man.find('UnitTitle').text:
title_element.append(date_element)
date_element.text = man.find('Date').text
else:
archref_element.append(date_element)
date_element.text = man.find('Date').text
if man.find('NormalDate').text:
date_element.set("normal", man.find('NormalDate').text)
if man.find('Reference').text:
ref_element = ET.Element('ref')
archref_element.append(ref_element)
ref_element.text = man.find('Reference').text
if man.find('ReferenceLink').text:
ref_element.set('href', man.find('ReferenceLink').text)
else:
old_biblio = arch_root.find('bibliography').attrib
old_head = arch_root.find('bibliography/head')
arch_root.find('bibliography').clear()
if old_biblio is None:
pass
else:
arch_root.find('bibliography').attrib = old_biblio
if old_head is None:
pass
else:
arch_root.find('bibliography').append(old_head)
if CSheet.find('BibliographyNote').text:
p_element = ET.Element('p')
arch_root.find('bibliography').append(p_element)
p_element.text = CSheet.find('BibliographyNote').text
for pub in CSheet.find('PublicationBibliography'):
if pub.find('Author').text or pub.find('Title').text or pub.find('Citation').text:
bibref_element = ET.Element('bibref')
arch_root.find('bibliography').append(bibref_element)
if pub.find('Author').text:
bibref_element.text = pub.find('Author').text + ", "
if pub.find('Title').text:
title_element = ET.Element('title')
bibref_element.append(title_element)
title_element.text = pub.find('Title').text
if pub.find('Citation').text:
title_element.tail = " " + pub.find('Citation').text + ", "
if pub.find('Date').text:
date_element = ET.Element('date')
if version == "ead3":
bibref_element.append(date_element)
date_element.text = pub.find('Date').text
else:
if pub.find('Title').text:
title_element.append(date_element)
date_element.text = pub.find('Date').text
else:
bibref_element.append(date_element)
date_element.text = pub.find('Date').text
if pub.find('NormalDate').text:
date_element.set("normal", pub.find('NormalDate').text)
if pub.find('Reference').text:
ref_element = ET.Element('ref')
bibref_element.append(ref_element)
ref_element.text = pub.find('Reference').text
if pub.find('ReferenceLink').text:
ref_element.set('href', pub.find('ReferenceLink').text)
for man in CSheet.find('ManuscriptBibliography'):
if man.find('Collection').text or man.find('UnitID').text or man.find('UnitID').text:
archref_element = ET.Element('archref')
arch_root.find('bibliography').append(archref_element)
if man.find('Collection').text:
archref_element.text = man.find('Collection').text + ", "
if man.find('UnitTitle').text:
title_element = ET.Element('title')
archref_element.append(title_element)
title_element.text = man.find('UnitTitle').text
if man.find('UnitID').text:
title_element.tail = " " + man.find('UnitID').text + ", "
if man.find('Date').text:
date_element = ET.Element('date')
if version == "ead3":
archref_element.append(date_element)
date_element.text = man.find('Date').text
else:
if man.find('UnitTitle').text:
title_element.append(date_element)
date_element.text = man.find('Date').text
else:
archref_element.append(date_element)
date_element.text = man.find('Date').text
if man.find('NormalDate').text:
date_element.set("normal", man.find('NormalDate').text)
if man.find('Reference').text:
ref_element = ET.Element('ref')
archref_element.append(ref_element)
ref_element.text = man.find('Reference').text
if man.find('ReferenceLink').text:
ref_element.set('href', man.find('ReferenceLink').text)
else:
old_biblio_list = arch_root.findall('bibliography')
for old_biblio in old_biblio_list:
arch_root.remove(old_biblio)
# Biographical or Administrative History Section<bioghist>
if CSheet.find('HistoricalNote/p') is None:
pass
else:
if CSheet.find('HistoricalNote/p').text:
if arch_root.find('bioghist') is None:
if "add_bio" in globals.new_elements or "add-all" in globals.add_all:
bio_element = ET.Element('bioghist')
bio_index = arch_root.getchildren().index(arch_root.find('dsc'))
arch_root.insert(bio_index, bio_element)
if CSheet.find('HistoricalNoteTitle').text:
head_element = ET.Element('head')
arch_root.find('bioghist').append(head_element)
head_element.text = CSheet.find('HistoricalNoteTitle').text
for para in CSheet.find('HistoricalNote'):
p_element = ET.Element('p')
bio_element.append(p_element)
p_element.text = para.text
else:
arch_root.find('bioghist').clear()
if CSheet.find('HistoricalNoteTitle').text:
head_element = ET.Element('head')
arch_root.find('bioghist').append(head_element)
head_element.text = CSheet.find('HistoricalNoteTitle').text
for para in CSheet.find('HistoricalNote'):
p_element = ET.Element('p')
arch_root.find('bioghist').append(p_element)
p_element.text = para.text
else:
old_hist_list = arch_root.findall('bioghist')
for old_hist in old_hist_list:
arch_root.remove(old_hist)
# Controlled Access Headings <controlaccess>
old_access = arch_root.find('controlaccess')
if CSheet.find('ControlledAccess/AccessPoint/Part') is None or CSheet.find('ControlledAccess/AccessPoint/ElementName') is None:
pass
else:
if CSheet.find('ControlledAccess/AccessPoint/Part').text and CSheet.find('ControlledAccess/AccessPoint/ElementName').text:
if arch_root.find('controlaccess') is None:
if "add_controlaccess" in globals.new_elements or "add-all" in globals.add_all:
access_element = ET.Element('controlaccess')
access_index = arch_root.getchildren().index(arch_root.find('dsc'))
arch_root.insert(access_index, access_element)
for access in CSheet.find('ControlledAccess'):
if access.find('UnitID').text:
pass
else:
if access.find('Part').text and access.find('ElementName').text:
new_element = ET.Element(access.find('ElementName').text)
access_element.append(new_element)
if version == "ead2002":
new_element.text = access.find('Part').text
else:
part_element = ET.Element('part')
new_element.append(part_element)
part_element.text = access.find('Part').text
if access.find('MARCEncoding').text:
new_element.set('encodinganalog', access.find('MARCEncoding').text)
if access.find('Identifier').text:
if version == "ead3":
new_element.set('identifier', access.find('Identifier').text)
else:
new_element.set('id', access.find('Identifier').text)
if access.find('Relator').text:
if version == "ead3":
new_element.set('relator', access.find('Relator').text)
else:
new_element.set('role', access.find('Relator').text)
if access.find('Normal').text:
new_element.set('normal', access.find('Normal').text)
if access.find('Source').text:
new_element.set('source', access.find('Source').text)
else:
if access.find('Part').text or access.find('ElementName').text:
from messages import error
error("All Access Headings must have both an Element Name and a Heading, headings without these fields will not be encoded.", False)
else:
old_access = arch_root.find('controlaccess').attrib
old_access_list = arch_root.findall('controlaccess')
for old_access_ele in old_access_list:
arch_root.remove(old_access_ele)
access_element = ET.Element('controlaccess')
access_index = arch_root.getchildren().index(arch_root.find('dsc'))
arch_root.insert(access_index, access_element)
if old_access is None:
pass
else:
access_element.attrib = old_access
for access in CSheet.find('ControlledAccess'):
if access.find('UnitID').text:
pass
else:
if access.find('Part').text and access.find('ElementName').text:
new_element = ET.Element(access.find('ElementName').text)
access_element.append(new_element)
if version == "ead2002":
new_element.text = access.find('Part').text
else:
part_element = ET.Element('part')
new_element.append(part_element)
part_element.text = access.find('Part').text
if access.find('MARCEncoding').text:
new_element.set('encodinganalog', access.find('MARCEncoding').text)
if access.find('Identifier').text:
if version == "ead3":
new_element.set('identifier', access.find('Identifier').text)
else:
new_element.set('id', access.find('Identifier').text)
if access.find('Relator').text:
if version == "ead3":
new_element.set('relator', access.find('Relator').text)
else:
new_element.set('role', access.find('Relator').text)
if access.find('Normal').text:
new_element.set('normal', access.find('Normal').text)
if access.find('Source').text:
new_element.set('source', access.find('Source').text)
else:
if access.find('Part').text or access.find('ElementName').text:
from messages import error
error("All Access Headings must have both an Element Name and a Heading, headings without these fields will not be encoded.", False)
else:
old_ca_list = arch_root.findall('controlaccess')
for old_ca in old_ca_list:
arch_root.remove(old_ca)
# Custodial History section <custodhist>
if "add_custhistory" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "custodhist", CSheet.find('CustodialHistory'), CSheet.find('CustodialHistory/Event'), add)
# Legal Status <legalstatus>
if "add_legalstatus" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "legalstatus", CSheet.find('LegalStatus'), CSheet.find('LegalStatus/Status'), add)
# Location of Originals when collection contains photocopies, etc. <originalsloc>
if "add_originalsloc" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "originalsloc", CSheet.find('LocationOriginals'), CSheet.find('LocationOriginals/Location'), add)
# Other Finding Aids <otherfindaid>
if "add_otherfa" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "otherfindaid", CSheet.find('OtherFindingAids'), CSheet.find('OtherFindingAids/Other'), add)
# Physical or technical details or requirements <phystech>
if "add_phystech" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "phystech", CSheet.find('PhysicalTechnical'), CSheet.find('PhysicalTechnical/Details'), add)
# Preferred Citation <prefercite>
if "add_prefcite" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "prefercite", CSheet.find('PreferredCitation'), CSheet.find('PreferredCitation/Example'), add)
# Processing Information <processinfo>
if "add_processinfo" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "processinfo", CSheet.find('ProcessingInformation'), CSheet.find('ProcessingInformation/Details'), add)
# Related Material <relatedmaterial>
if CSheet.find('RelatedPublications/Publication/Title') is None or CSheet.find('RelatedManuscripts/Manuscript/UnitTitle') is None:
pass
else:
if CSheet.find('RelatedPublications/Publication/Title').text or CSheet.find('RelatedManuscripts/Manuscript/UnitTitle').text:
if arch_root.find('relatedmaterial') is None:
if "add_related" in globals.new_elements or "add-all" in globals.add_all:
related_element = ET.Element('relatedmaterial')
related_index = arch_root.getchildren().index(arch_root.find('dsc'))
arch_root.insert(related_index, related_element)
if CSheet.find('RelatedMaterialNotes') is None:
pass
else:
for note in CSheet.find('RelatedMaterialNotes'):
if note.text:
p_element = ET.Element('p')
related_element.append(p_element)
p_element.text = note.text
for related in CSheet.find('RelatedPublications'):
if related.find('UnitID') is None:
pass
else:
if related.find('UnitID').text:
pass
else:
if related.find('Author').text or related.find('Title').text or related.find('Citation').text:
bibref_element = ET.Element('bibref')
related_element.append(bibref_element)
if related.find('Author').text:
bibref_element.text = related.find('Author').text + ", "
if related.find('Title').text:
title_element = ET.Element('title')
bibref_element.append(title_element)
title_element.text = related.find('Title').text
if related.find('Citation').text:
title_element.tail = " " + related.find('Citation').text + ", "
if related.find('Date').text:
date_element = ET.Element('date')
bibref_element.append(date_element)
date_element.text = related.find('Date').text
if related.find('NormalDate').text:
date_element.set("normal", related.find('NormalDate').text)
if related.find('Reference').text:
ref_element = ET.Element('ref')
bibref_element.append(ref_element)
ref_element.text = related.find('Reference').text
if related.find('ReferenceLink').text:
ref_element.set('href', related.find('ReferenceLink').text)
for relatedman in CSheet.find('RelatedManuscripts'):
if related.find('UnitID') is None:
pass
else:
if related.find('UnitID').text:
pass
else:
if relatedman.find('Collection').text or relatedman.find('UnitTitle').text or relatedman.find('MaterialID').text:
archref_element = ET.Element('archref')
arch_root.find('relatedmaterial').append(archref_element)
if relatedman.find('Collection').text:
archref_element.text = relatedman.find('Collection').text + ", "
if relatedman.find('UnitTitle').text:
title_element = ET.Element('title')
archref_element.append(title_element)
title_element.text = relatedman.find('UnitTitle').text
if relatedman.find('MaterialID').text:
title_element.tail = " " + relatedman.find('MaterialID').text + ", "
if relatedman.find('Date').text:
date_element = ET.Element('date')
archref_element.append(date_element)
date_element.text = relatedman.find('Date').text
if relatedman.find('NormalDate').text:
date_element.set("normal", relatedman.find('NormalDate').text)
if relatedman.find('Reference').text:
ref_element = ET.Element('ref')
archref_element.append(ref_element)
ref_element.text = relatedman.find('Reference').text
if relatedman.find('ReferenceLink').text:
ref_element.set('href', relatedman.find('ReferenceLink').text)
else:
old_related = arch_root.find('relatedmaterial').attrib
old_head = arch_root.find('relatedmaterial/head')
arch_root.find('relatedmaterial').clear()
if old_related is None:
pass
else:
arch_root.find('relatedmaterial').attrib = old_related
if old_head is None:
pass
else:
arch_root.find('relatedmaterial').append(old_head)
for note in CSheet.find('RelatedMaterialNotes'):
if note.text:
p_element = ET.Element('p')
arch_root.find('relatedmaterial').append(p_element)
p_element.text = note.text
for related in CSheet.find('RelatedPublications'):
if related.find('UnitID') is None:
pass
else:
if related.find('UnitID').text:
pass
else:
if related.find('Author').text or related.find('Title').text or related.find('Citation').text:
bibref_element = ET.Element('bibref')
arch_root.find('relatedmaterial').append(bibref_element)
if related.find('Author').text:
bibref_element.text = related.find('Author').text + ", "
if related.find('Title').text:
title_element = ET.Element('title')
bibref_element.append(title_element)
title_element.text = related.find('Title').text
if related.find('Citation').text:
title_element.tail = " " + related.find('Citation').text + ", "
if related.find('Date').text:
date_element = ET.Element('date')
bibref_element.append(date_element)
date_element.text = related.find('Date').text
if related.find('NormalDate').text:
date_element.set("normal", related.find('NormalDate').text)
if related.find('Reference').text:
ref_element = ET.Element('ref')
bibref_element.append(ref_element)
ref_element.text = related.find('Reference').text
if related.find('ReferenceLink').text:
ref_element.set('href', related.find('ReferenceLink').text)
for relatedman in CSheet.find('RelatedManuscripts'):
if related.find('UnitID') is None:
pass
else:
if related.find('UnitID').text:
pass
else:
if relatedman.find('Collection').text or relatedman.find('UnitTitle').text or relatedman.find('MaterialID').text:
archref_element = ET.Element('archref')
arch_root.find('relatedmaterial').append(archref_element)
if relatedman.find('Collection').text:
archref_element.text = relatedman.find('Collection').text + ", "
if relatedman.find('UnitTitle').text:
title_element = ET.Element('title')
archref_element.append(title_element)
title_element.text = relatedman.find('UnitTitle').text
if relatedman.find('MaterialID').text:
title_element.tail = " " + relatedman.find('MaterialID').text + ", "
if relatedman.find('Date').text:
date_element = ET.Element('date')
archref_element.append(date_element)
date_element.text = relatedman.find('Date').text
if relatedman.find('NormalDate').text:
date_element.set("normal", relatedman.find('NormalDate').text)
if relatedman.find('Reference').text:
ref_element = ET.Element('ref')
archref_element.append(ref_element)
ref_element.text = relatedman.find('Reference').text
if relatedman.find('ReferenceLink').text:
ref_element.set('href', relatedman.find('ReferenceLink').text)
else:
old_related_list = arch_root.findall('relatedmaterial')
for old_related in old_related_list:
arch_root.remove(old_related)
#relations
from relations import relations
if version == "ead3":
relations(arch_root, CSheet.find('Relations'))
# Scope and Content Note <scopecontent>
if "add_scope" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "scopecontent", CSheet.find('ScopeContent'), CSheet.find('ScopeContent/p'), add)
# Separated Materials <separatedmaterial>
if "add_sepmat" in globals.new_elements or "add-all" in globals.add_all:
add = True
else:
add = False
archdescsimple(arch_root, "separatedmaterial", CSheet.find('SeparatedMaterial'), CSheet.find('SeparatedMaterial/Material'), add)
# Use Restrictions <userestrict>
if "add_userestrict" in globals.new_elements or "add-all" in globals.add_all:
add is True
else:
add is False
access_use_restrict(arch_root, CSheet.find('UseRestrictions'), "userestrict", "Use", add)
#dsc
from dsc import dsc
dsc(arch_root.find('dsc'), input_data, version)
##################################################################################################################
#archdesc elements matched to lower levels:
##################################################################################################################
from wx.lib.pubsub import pub
if "ask_gui" in globals.new_elements:
wx.CallAfter(pub.sendMessage, "update", msg="Writing <archdesc> elements to lower levels...")
#Access and Use Restricitons matched to lower levels:
from access_use_restrict import access_use_lower
if CSheet.find('CollectionID').text and CSheet.find('IDModel/CollectionSeparator').text:
collectionID = CSheet.find('CollectionID').text + CSheet.find('IDModel/CollectionSeparator').text
else:
if CSheet.find('CollectionID').text:
collectionID = CSheet.find('CollectionID').text
else:
collectionID = ""
series_separator = CSheet.find('IDModel/SeriesSeparator').text
access_use_lower(arch_root, CSheet.find('Access'), "accessrestrict", collectionID, series_separator)
access_use_lower(arch_root, CSheet.find('UseRestrictions'), "userestrict", collectionID, series_separator)
#Acquisitions matched to lower levels:
from archdesc_lower import acquisitions_lower
acquisitions_lower(arch_root, CSheet.find('AcquisitionInfo'), version, "acqinfo", collectionID, series_separator)
#Controlled Access Headings matched to lower levels:
from archdesc_lower import controlaccess_lower
controlaccess_lower(arch_root, CSheet.find('ControlledAccess'), version, "controlaccess", collectionID, series_separator)
#Related Material matched to lower levels:
from archdesc_lower import relatedmaterial_lower
relatedmaterial_lower(arch_root, CSheet.find('RelatedPublications'), CSheet.find('RelatedManuscripts'), version, "relatedmaterial", collectionID, series_separator)
#Relations matched to lower levels:
if version == "ead3":
from relations import relations_lower
relations_lower(arch_root, CSheet.find('Relations'), version, "relations", collectionID, series_separator)
#Simple archdesc elements matched to lower levels:
from archdescsimple import archdescsimple_lower
if CSheet.find('CollectionID').text and CSheet.find('IDModel/CollectionSeparator').text:
collectionID = CSheet.find('CollectionID').text + CSheet.find('IDModel/CollectionSeparator').text
else:
if CSheet.find('CollectionID').text:
collectionID = CSheet.find('CollectionID').text
else:
collectionID = ""
series_separator = CSheet.find('IDModel/SeriesSeparator').text
archdescsimple_lower(arch_root, CSheet.find('Accruals'), "accruals", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('AlternateForms'), "altformavail", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('AppraisalInfo'), "appraisal", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('LowerLevelHist'), "bioghist", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('CollectionArrangement'), "arrangement", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('CustodialHistory'), "custodhist", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('LegalStatus'), "legalstatus", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('LocationOriginals'), "originalsloc", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('OtherFindingAids'), "otherfindaid", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('PhysicalTechnical'), "phystech", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('ProcessingInformation'), "processinfo", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('LowerLevelScope'), "scopecontent", collectionID, series_separator)
archdescsimple_lower(arch_root, CSheet.find('SeparatedMaterial'), "separatedmaterial", collectionID, series_separator)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-17 05:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stream', '0016_auto_20161012_0838'),
]
operations = [
migrations.AddField(
model_name='stream',
name='active',
field=models.BooleanField(default=False),
),
]
|
import copy
from gym import Wrapper
from pythogic.base.Symbol import Symbol
from pythogic.base.Alphabet import Alphabet
from pythogic.base.Formula import AtomicFormula, PathExpressionEventually, PathExpressionSequence, And, Not, \
LogicalTrue, PathExpressionStar
from pythogic.base.utils import _to_pythomata_dfa
from pythogic.ldlf_empty_traces.LDLf_EmptyTraces import LDLf_EmptyTraces
import numpy as np
from pythomata.base.Simulator import Simulator
from pythomata.base.utils import Sink
class BreakoutRABUWrapper(Wrapper):
"""Env wrapper for bottom-up rows deletion"""
def __init__(self, env):
super().__init__(env)
self.row_symbols = [Symbol(r) for r in ["r0", "r1", "r2"]]
self.dfa = self._build_automata()
self.goal_reward = 1000
self.transition_reward = 100
self.simulator = Simulator(self.dfa)
self.last_status = None
def reset(self):
self.env.reset()
self.simulator.reset()
def step(self, action):
obs, reward, done, _ = self.env.step(action)
if done:
# when we lose a life
return obs, reward, done, _
# overwrite old reward
# reward = 0
f = self.state2propositional_formula()
old_state = self.simulator.cur_state
self.simulator.make_transition(f)
new_state = self.simulator.cur_state
if new_state==Sink():
done = True
reward = -1000
elif new_state in self.dfa.accepting_states:
reward = 1000
elif old_state!=new_state:
reward = self.transition_reward
return obs, reward, done or self.env.unwrapped.state.terminal, _
def state2propositional_formula(self):
e = self.unwrapped
matrix = e.state.bricks.bricks_status_matrix
row_status = np.all(matrix==0.0, axis=1)
result = set()
for rs, sym in zip(row_status, reversed(self.row_symbols)):
if rs:
result.add(sym)
return frozenset(result)
def _build_automata(self):
rows = self.row_symbols
atoms = [AtomicFormula(r) for r in rows]
alphabet = Alphabet(set(rows))
ldlf = LDLf_EmptyTraces(alphabet)
f = PathExpressionEventually(
PathExpressionSequence.chain([
PathExpressionStar(And.chain([Not(atoms[0]), Not(atoms[1]), Not(atoms[2])])),
PathExpressionStar(And.chain([atoms[0], Not(atoms[1]), Not(atoms[2])])),
# Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),
PathExpressionStar(And.chain([atoms[0], atoms[1], Not(atoms[2])])),
# Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),
# And.chain([atoms[0], atoms[1], atoms[2]]), # Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),
# And.chain([atoms[0], atoms[1], atoms[2], atoms[3], Not(atoms[4]), Not(atoms[5])]),
# And.chain([atoms[0], atoms[1], atoms[2], atoms[3], atoms[4], Not(atoms[5])]),
# And.chain([atoms[0], atoms[1], atoms[2], atoms[3], atoms[4], atoms[5] ])
]),
And.chain([atoms[0], atoms[1], atoms[2]])
)
nfa = ldlf.to_nfa(f)
dfa = _to_pythomata_dfa(nfa)
return dfa
|
from django.contrib import admin
from krankit.polls.models import Question, Choice, ChoiceVote
admin.site.register(Question)
admin.site.register(Choice)
admin.site.register(ChoiceVote)
|
import src.tnet as tnet
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import math
plt.style.use(['science','ieee', 'high-vis'])
def txt2list(fname):
return [line for line in open(fname)]
def read_result(fname):
df = pd.read_csv(fname)
results = df.T.values.tolist()
return results
def read_parameters(fname):
dic = {}
for line in open(fname, 'r').readlines():
p,v = line.split()
dic[p] = v
return dic
def plot_topology(netname):
netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters(net_name=netname,
experiment_name=netname + 'topo_plot')
tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)
tNet.read_node_coordinates('data/pos/'+netname+'.txt')
fig, ax = tnet.plot_network(tNet.G, width=0.3)
return fig, ax
def plot_convergance(fname_sys, fname_usr):
return 1
def plot_costPenRate(fname, ax, parameters, k):
j, cavsCost, noncavsCost, totCost, cavsFlow, nonCavsFlow, pedestrianFlow, rebalancingFlow, bikeFlow, subwayFlow = read_result(fname)
if k == 'A':
for i in range(len(cavsCost)):
cavsCost[i] = max(noncavsCost[i], cavsCost[i])
totCost[i] = max(noncavsCost[i], totCost[i])
j = [round(.1 * i, 1) for i in range(11)]
lstyle = ['-', '--', ':']
i = 0
alg = 'CARS'+parameters['n:']
ax.plot(j, noncavsCost, label='Private', linestyle=lstyle[i], linewidth=2, marker='x')
ax.plot(j, cavsCost, label='AMoDs', linestyle=lstyle[i], linewidth=2, marker="^")
ax.plot(j, totCost, label='Total', linestyle=lstyle[i], linewidth=2, marker='o')
ax.legend()
ax.set_xlabel('Penetration Rate')
ax.set_ylabel('Avg. Travel Time (min)')
ax.set_xlim((0, 1))
ax.legend(framealpha=0.8, fontsize='small', frameon=True, facecolor='w', fancybox='False')
#ax.legend.get_frame().set_linewidth(0.2)
return ax
def plot_flowPenRate(fname, ax, parameters):
n, cavsCost, noncavsCost, totCost, cavsFlow, nonCavsFlow, pedestrianFlow, rebalancingFlow, bikeFlow, subwayFlow = read_result(fname)
width = 0.9
x_name = [round(.1 * i, 1) for i in range(11)]
x = list(range(len(x_name)))
p1 = ax.bar(x, nonCavsFlow, width, label='Private')
p2 = ax.bar(x, cavsFlow, width,
bottom=nonCavsFlow, label='AMoD')
p3 = ax.bar(x, rebalancingFlow, width,
bottom=[cavsFlow[i] + nonCavsFlow[i] for i in range(len(cavsFlow))], label='Rebalancing')
if sum(subwayFlow)>10:
p6 = ax.bar(x, subwayFlow, width,
bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] + pedestrianFlow[i] + bikeFlow[i] for i in
range(len(cavsFlow))], label='Subway')
if sum(pedestrianFlow)>10:
p4 = ax.bar(x, pedestrianFlow, width,
bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] for i in range(len(cavsFlow))], label='Pedestrian')
if sum(bikeFlow)>10:
p5 = ax.bar(x, bikeFlow, width,
bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] + pedestrianFlow[i] for i in
range(len(cavsFlow))], label='Biking')
ax.set_ylabel('Miles per mode of transport')
ax.set_xlabel('Penetration rate')
ax.set_xticks(x)
ax.set_xticklabels(x_name)
ax.legend(framealpha=0.8, fontsize='small', frameon=True, loc=3, facecolor='w', fancybox='False')
#ax.legend.get_frame().set_linewidth(0.2)
return ax
'''
dire = '2021-01-08_11:51:44_penRate_NYC_1.5ASB_Reb_True'
fname = 'results/' + dire + '/results.csv'
parameters = read_parameters('results/' + dire + '/parameters.txt' )
#print(read_result(fname))
fig, ax = plt.subplots(1 ,figsize=(2.5,2))
plot_costPenRate(fname, ax, parameters)
plt.savefig('a.pdf')
fig, ax = plt.subplots(1 ,figsize=(3.6,2))
plot_flowPenRate(fname, ax, parameters)
plt.savefig('b.pdf')
'''
# comparison
def plot_comparison(fnames, out):
fig, ax = plt.subplots(ncols=2,
nrows=len(fnames),
# width_ratios=[1,2],
gridspec_kw={'width_ratios':[1,2]},
figsize=(3.6*1.7, 1.7*len(fnames)),
#sharex=True,
sharey=False)
j = 0
for f in fnames:
fname = 'results/' + f + '/results.csv'
parameters = read_parameters('results/' + f + '/parameters.txt' )
if out =='1c':
plot_costPenRate(fname, ax[j,0], parameters, 'A')
else:
plot_costPenRate(fname, ax[j,0], parameters, 'B')
plot_flowPenRate(fname, ax[j,1], parameters)
j +=1
#plt.legend(frameon=True, fancybox=False)
plt.tight_layout()
plt.savefig(out+'.pdf')
#plt.show()
one = '2021-01-08_11/50/19_penRate_NYC_1.0A_Reb_True'.replace('/', ':')
two = '2021-01-08_11/50/08_penRate_NYC_1.5A_Reb_True'.replace('/', ':')
three = '2021-01-08_11/51/44_penRate_NYC_2.0A_Reb_True'.replace('/', ':')
four = '2021-01-08_11/51/44_penRate_NYC_4.0A_Reb_True'.replace('/', ':')
fnames = [one, two, three, four]
plot_comparison(fnames,'1c')
one = '2021-01-08_11/50/19_penRate_NYC_1.0AS_Reb_True'.replace('/', ':')
two = '2021-01-08_11/50/08_penRate_NYC_1.5AS_Reb_True'.replace('/', ':')
three = '2021-01-08_11/51/44_penRate_NYC_2.0AS_Reb_True'.replace('/', ':')
four = '2021-01-08_11/51/43_penRate_NYC_4.0AS_Reb_True'.replace('/', ':')
fnames = [one, two, three, four]
plot_comparison(fnames,'1_5c')
one = '2021-01-08_11/50/08_penRate_NYC_1.0ASP_Reb_True'.replace('/', ':')
two = '2021-01-08_11/51/48_penRate_NYC_1.5ASP_Reb_True'.replace('/', ':')
three = '2021-01-08_11/51/44_penRate_NYC_2.0ASP_Reb_True'.replace('/', ':')
four = '2021-01-08_11/52/40_penRate_NYC_4.0ASP_Reb_True'.replace('/', ':')
fnames = [one, two, three, four]
plot_comparison(fnames,'2c')
one = '2021-01-08_11/50/08_penRate_NYC_1.0ASPB_Reb_True'.replace('/', ':')
two = '2021-01-08_11/51/44_penRate_NYC_1.5ASPB_Reb_True'.replace('/', ':')
three = '2021-01-12_00:58:41_penRate_NYC_2.0ASPB_Reb_True'.replace('/', ':')
four = '2021-01-14_02:00:28_penRate_NYC_4.0ASPB_Reb_True'.replace('/', ':')
fnames = [one, two, three, four]
plot_comparison(fnames,'4c')
one = '2021-01-08_11/51/44_penRate_NYC_2.0A_Reb_True'.replace('/', ':')
two = '2021-01-08_11/51/44_penRate_NYC_2.0AS_Reb_True'.replace('/', ':')
three = '2021-01-08_11/51/44_penRate_NYC_2.0ASP_Reb_True'.replace('/', ':')
four = '2021-01-12_00:58:41_penRate_NYC_2.0ASPB_Reb_True'.replace('/', ':')
fnames = [one, two, three, four]
plot_comparison(fnames,'4c')
|
class BinTree:
def __init__(self):
'''Container for structuring and handling all nodes used in an option-like asset.
Creates a generic binomial option tree whose by planting an option.
Ha, what?
'''
class Binodes:
def __init__(self, u_node, d_node):
pass
|
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w&u3p_ohh-&rs3i-)qwcgf55d6td29($1zch4(tudt$97foaj*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
|
import random
import torch
import sys
from contextlib import closing
from torch.multiprocessing import Pool
from random import randint
from exploration_strategies.OUNoise import OrnsteinUhlenbeckActionNoise
class Parallel_Experience_Generator(object):
""" Plays n episode in parallel using a fixed agent. """
def __init__(self, environment, policy, seed, hyperparameters, action_size, use_GPU=False, action_choice_output_columns=None):
self.use_GPU = use_GPU
self.environment = environment
self.policy = policy
self.action_choice_output_columns = action_choice_output_columns
self.hyperparameters = hyperparameters
self.noise = OrnsteinUhlenbeckActionNoise(mu=[0 for _ in range(self.environment.action_shape[1])],
sigma=0.15,
theta=.01,
dt=1e-2,
seed=seed)
def play_n_episodes(self, n):
"""Plays n episodes in parallel using the fixed policy and returns the data"""
with closing(Pool(processes=n)) as pool:
results = pool.map(self, range(n))
pool.terminate()
states_for_all_episodes = [episode[0] for episode in results]
actions_for_all_episodes = [episode[1] for episode in results]
rewards_for_all_episodes = [episode[2] for episode in results]
return states_for_all_episodes, actions_for_all_episodes, rewards_for_all_episodes
def play_1_episode(self, epsilon_exploration):
"""Plays 1 episode using the fixed policy and returns the data"""
state = self.reset_game()
done = False
episode_states = []
episode_actions = []
episode_rewards = []
while not done:
action = self.pick_action(self.policy, state)
next_state, reward, done, _ = self.environment.step(action)
episode_states.append(state)
episode_actions.append(action)
episode_rewards.append(reward)
state = next_state
return episode_states, episode_actions, episode_rewards
def reset_game(self):
"""Resets the game environment so it is ready to play a new episode"""
seed = randint(0, sys.maxsize)
torch.manual_seed(seed) # Need to do this otherwise each worker generates same experience
state = self.environment.reset()
return state
def pick_action(self, policy, state):
state = torch.from_numpy(state).float().unsqueeze(0)
actor_output = policy(state)
if self.action_choice_output_columns is not None:
actor_output = actor_output[:, self.action_choice_output_columns]
action_distribution = self.create_distributions(policy, self.environment.action_size)
action = action_distribution.sample().cpu()
action += torch.Tensor(self.noise())
return action.detach().numpy()
@staticmethod
def create_distributions(policy_output, number_of_actions):
means = policy_output[:, :number_of_actions].squeeze(0)
stds = policy_output[:, number_of_actions:].squeeze(0)
action_distribution = torch.distributions.normal.Normal(means.squeeze(0), torch.abs(stds))
return action_distribution
|
import copy
import os
import re
from typing import Any, Dict, List, Optional, Set, Tuple
from unittest import mock
import ujson
from django.conf import settings
from django.test import TestCase, override_settings
from zerver.lib import bugdown, mdiff
from zerver.lib.actions import (
do_add_alert_words,
do_remove_realm_emoji,
do_set_realm_property,
do_set_user_display_setting,
)
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.create_user import create_user
from zerver.lib.emoji import get_emoji_url
from zerver.lib.exceptions import BugdownRenderingException
from zerver.lib.mention import possible_mentions, possible_user_group_mentions
from zerver.lib.message import render_markdown
from zerver.lib.request import JsonableError
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_runner import slow
from zerver.lib.tex import render_tex
from zerver.lib.user_groups import create_user_group
from zerver.models import (
MAX_MESSAGE_LENGTH,
Message,
Realm,
RealmEmoji,
RealmFilter,
Stream,
UserGroup,
UserMessage,
UserProfile,
flush_per_request_caches,
flush_realm_filter,
get_client,
get_realm,
get_stream,
realm_filters_for_realm,
realm_in_local_realm_filters_cache,
)
class FencedBlockPreprocessorTest(TestCase):
def test_simple_quoting(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'bye',
'',
'',
]
expected = [
'',
'> hi',
'> bye',
'',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_quoting(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'~~~',
'',
'~~~ quote',
'bye',
'',
'',
]
expected = [
'',
'> hi',
'',
'',
'',
'> bye',
'',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_code(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'``` .py',
'hello()',
'```',
'',
'```vb.net',
'goodbye()',
'```',
'',
'```c#',
'weirdchar()',
'```',
'',
'```',
'no-highlight()',
'```',
'',
]
expected = [
'',
'**py:hello()**',
'',
'',
'',
'**vb.net:goodbye()**',
'',
'',
'',
'**c#:weirdchar()**',
'',
'',
'',
'**:no-highlight()**',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_nested_code(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'~~~ quote',
'hi',
'``` .py',
'hello()',
'```',
'',
'',
]
expected = [
'',
'> hi',
'',
'> **py:hello()**',
'',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def bugdown_convert(content: str) -> str:
return bugdown.convert(
content=content,
message_realm=get_realm('zulip'),
)
class BugdownMiscTest(ZulipTestCase):
def test_diffs_work_as_expected(self) -> None:
str1 = "<p>The quick brown fox jumps over the lazy dog. Animal stories are fun, yeah</p>"
str2 = "<p>The fast fox jumps over the lazy dogs and cats. Animal stories are fun</p>"
expected_diff = "\u001b[34m-\u001b[0m <p>The \u001b[33mquick brown\u001b[0m fox jumps over the lazy dog. Animal stories are fun\u001b[31m, yeah\u001b[0m</p>\n\u001b[34m+\u001b[0m <p>The \u001b[33mfast\u001b[0m fox jumps over the lazy dog\u001b[32ms and cats\u001b[0m. Animal stories are fun</p>\n"
self.assertEqual(mdiff.diff_strings(str1, str2), expected_diff)
def test_get_possible_mentions_info(self) -> None:
realm = get_realm('zulip')
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password='whatever',
realm=realm,
full_name=full_name,
short_name='whatever',
)
fred1 = make_user('fred1@example.com', 'Fred Flintstone')
fred1.is_active = False
fred1.save()
fred2 = make_user('fred2@example.com', 'Fred Flintstone')
fred3 = make_user('fred3@example.com', 'Fred Flintstone')
fred3.is_active = False
fred3.save()
fred4 = make_user('fred4@example.com', 'Fred Flintstone')
lst = bugdown.get_possible_mentions_info(realm.id, {'Fred Flintstone', 'cordelia LEAR', 'Not A User'})
set_of_names = set(map(lambda x: x['full_name'].lower(), lst))
self.assertEqual(set_of_names, {'fred flintstone', 'cordelia lear'})
by_id = {
row['id']: row
for row in lst
}
self.assertEqual(by_id.get(fred2.id), dict(
email=fred2.email,
full_name='Fred Flintstone',
id=fred2.id,
))
self.assertEqual(by_id.get(fred4.id), dict(
email=fred4.email,
full_name='Fred Flintstone',
id=fred4.id,
))
def test_mention_data(self) -> None:
realm = get_realm('zulip')
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
content = '@**King Hamlet** @**Cordelia lear**'
mention_data = bugdown.MentionData(realm.id, content)
self.assertEqual(mention_data.get_user_ids(), {hamlet.id, cordelia.id})
self.assertEqual(mention_data.get_user_by_id(hamlet.id), dict(
email=hamlet.email,
full_name=hamlet.full_name,
id=hamlet.id,
))
user = mention_data.get_user_by_name('king hamLET')
assert(user is not None)
self.assertEqual(user['email'], hamlet.email)
self.assertFalse(mention_data.message_has_wildcards())
content = '@**King Hamlet** @**Cordelia lear** @**all**'
mention_data = bugdown.MentionData(realm.id, content)
self.assertTrue(mention_data.message_has_wildcards())
def test_invalid_katex_path(self) -> None:
with self.settings(DEPLOY_ROOT="/nonexistent"):
with mock.patch('logging.error') as mock_logger:
render_tex("random text")
mock_logger.assert_called_with("Cannot find KaTeX for latex rendering!")
class BugdownListPreprocessorTest(ZulipTestCase):
# We test that the preprocessor inserts blank lines at correct places.
# We use <> to indicate that we need to insert a blank line here.
def split_message(self, msg: str) -> Tuple[List[str], List[str]]:
original = msg.replace('<>', '').split('\n')
expected = re.split(r'\n|<>', msg)
return original, expected
def test_basic_list(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
original, expected = self.split_message('List without a gap\n<>* One\n* Two')
self.assertEqual(preprocessor.run(original), expected)
def test_list_after_quotes(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
original, expected = self.split_message('```quote\nSomething\n```\n\nList without a gap\n<>* One\n* Two')
self.assertEqual(preprocessor.run(original), expected)
def test_list_in_code(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
original, expected = self.split_message('```\nList without a gap\n* One\n* Two\n```')
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_different_fences(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
~~~
This is a nested code fence, do not make changes here:
* one
* two
````quote
Quote in code fence. Should not convert:
* one
* two
````
~~~
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_same_fence(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
```python
This is a nested code fence, do not make changes here:
* one
* two
```quote
Quote in code fence. Should not convert:
* one
* two
```
```
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
class BugdownTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
bugdown.clear_state_for_testing()
def assertEqual(self, first: Any, second: Any, msg: str = "") -> None:
if isinstance(first, str) and isinstance(second, str):
if first != second:
raise AssertionError("Actual and expected outputs do not match; showing diff.\n" +
mdiff.diff_strings(first, second) + msg)
else:
super().assertEqual(first, second)
def load_bugdown_tests(self) -> Tuple[Dict[str, Any], List[List[str]]]:
test_fixtures = {}
with open(os.path.join(os.path.dirname(__file__), 'fixtures/markdown_test_cases.json')) as f:
data = ujson.load(f)
for test in data['regular_tests']:
test_fixtures[test['name']] = test
return test_fixtures, data['linkify_tests']
def test_bugdown_no_ignores(self) -> None:
# We do not want any ignored tests to be committed and merged.
format_tests, linkify_tests = self.load_bugdown_tests()
for name, test in format_tests.items():
message = f'Test "{name}" shouldn\'t be ignored.'
is_ignored = test.get('ignore', False)
self.assertFalse(is_ignored, message)
@slow("Aggregate of runs dozens of individual markdown tests")
def test_bugdown_fixtures(self) -> None:
format_tests, linkify_tests = self.load_bugdown_tests()
valid_keys = {"name", "input", "expected_output",
"backend_only_rendering",
"marked_expected_output", "text_content",
"translate_emoticons", "ignore"}
for name, test in format_tests.items():
with self.subTest(markdown_test_case=name):
# Check that there aren't any unexpected keys as those are often typos
self.assertEqual(len(set(test.keys()) - valid_keys), 0)
# Ignore tests if specified
if test.get('ignore', False):
continue # nocoverage
if test.get('translate_emoticons', False):
# Create a userprofile and send message with it.
user_profile = self.example_user('othello')
do_set_user_display_setting(user_profile, 'translate_emoticons', True)
msg = Message(sender=user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, test['input'])
else:
converted = bugdown_convert(test['input'])
self.assertEqual(converted, test['expected_output'])
def replaced(payload: str, url: str, phrase: str='') -> str:
if url[:4] == 'http':
href = url
elif '@' in url:
href = 'mailto:' + url
else:
href = 'http://' + url
return payload % (f"<a href=\"{href}\">{url}</a>",)
print("Running Bugdown Linkify tests")
with mock.patch('zerver.lib.url_preview.preview.link_embed_data_from_cache', return_value=None):
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = bugdown_convert(inline_url)
self.assertEqual(match, converted)
def test_inline_file(self) -> None:
msg = 'Check out this file file:///Volumes/myserver/Users/Shared/pi.py'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out this file <a href="file:///Volumes/myserver/Users/Shared/pi.py">file:///Volumes/myserver/Users/Shared/pi.py</a></p>')
bugdown.clear_state_for_testing()
with self.settings(ENABLE_FILE_LINKS=False):
realm = Realm.objects.create(string_id='file_links_test')
bugdown.maybe_update_markdown_engines(realm.id, False)
converted = bugdown.convert(msg, message_realm=realm)
self.assertEqual(converted, '<p>Check out this file file:///Volumes/myserver/Users/Shared/pi.py</p>')
def test_inline_bitcoin(self) -> None:
msg = 'To bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa or not to bitcoin'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>To <a href="bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa">bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa</a> or not to bitcoin</p>')
def test_inline_youtube(self) -> None:
msg = 'Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
msg = 'http://www.youtube.com/watch?v=hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
msg = 'https://youtu.be/hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://youtu.be/hx1mjT73xYE">https://youtu.be/hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="https://youtu.be/hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
msg = 'https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo'
not_converted = bugdown_convert(msg)
self.assertEqual(not_converted, '<p><a href="https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>')
msg = 'https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>\n<div class="youtube-video message_inline_image"><a data-id="O5nskjZ_GoI" href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"><img src="https://i.ytimg.com/vi/O5nskjZ_GoI/default.jpg"></a></div>')
msg = 'http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw">http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw</a></p>\n<div class="youtube-video message_inline_image"><a data-id="nOJgD4fcZhI" href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"><img src="https://i.ytimg.com/vi/nOJgD4fcZhI/default.jpg"></a></div>')
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_inline_vimeo(self) -> None:
msg = 'Check out the debate: https://vimeo.com/246979354'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out the debate: <a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>')
msg = 'https://vimeo.com/246979354'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>')
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_thumbnail_url(self) -> None:
realm = get_realm("zephyr")
msg = '[foobar](/user_uploads/{realm_id}/50/w2G6ok9kr8AMCQCTNAUOFMln/IMG_0677.JPG)'
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=thumbnail"><'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = 'https://www.google.com/images/srpr/logo4w.png'
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = 'www.google.com/images/srpr/logo4w.png'
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = 'https://www.google.com/images/srpr/logo4w.png'
thumbnail_img = '<div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img src="https://www.google.com/images/srpr/logo4w.png"></a></div>'
with self.settings(THUMBNAIL_IMAGES=False):
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
# Any url which is not an external link and doesn't start with
# /user_uploads/ is not thumbnailed
msg = '[foobar](/static/images/cute/turtle.png)'
thumbnail_img = '<div class="message_inline_image"><a href="/static/images/cute/turtle.png" title="foobar"><img src="/static/images/cute/turtle.png"></a></div>'
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = '[foobar](/user_avatars/{realm_id}/emoji/images/50.png)'
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<div class="message_inline_image"><a href="/user_avatars/{realm_id}/emoji/images/50.png" title="foobar"><img src="/user_avatars/{realm_id}/emoji/images/50.png"></a></div>'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview(self) -> None:
with_preview = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
without_preview = '<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>'
content = 'http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, with_preview)
realm = msg.get_realm()
setattr(realm, 'inline_image_preview', False)
realm.save()
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, without_preview)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_quoted_blocks(self) -> None:
content = 'http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg'
expected = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = '>http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!'
expected = '<blockquote>\n<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = '>* http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!'
expected = '<blockquote>\n<ul>\n<li><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></li>\n</ul>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview_order(self) -> None:
realm = get_realm("zulip")
content = 'http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg'
expected = '<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg</a></p>\n<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = 'http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\n\n>http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\n\n* http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg\n* https://www.google.com/images/srpr/logo4w.png'
expected = '<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><blockquote>\n<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a></p>\n</blockquote>\n<ul>\n<li><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div></li>\n<li><div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail"></a></div></li>\n</ul>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = 'Test 1\n[21136101110_1dde1c1a7e_o.jpg](/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg) \n\nNext Image\n[IMG_20161116_023910.jpg](/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg) \n\nAnother Screenshot\n[Screenshot-from-2016-06-01-16-22-42.png](/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png)'
content = content.format(realm_id=realm.id)
expected = '<p>Test 1<br>\n<a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg">21136101110_1dde1c1a7e_o.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg" title="21136101110_1dde1c1a7e_o.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=thumbnail"></a></div><p>Next Image<br>\n<a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg">IMG_20161116_023910.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg" title="IMG_20161116_023910.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=thumbnail"></a></div><p>Another Screenshot<br>\n<a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png">Screenshot-from-2016-06-01-16-22-42.png</a></p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png" title="Screenshot-from-2016-06-01-16-22-42.png"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=thumbnail"></a></div>'
expected = expected.format(realm_id=realm.id)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_corrected_image_source(self) -> None:
# testing only wikipedia because linx.li urls can be expected to expire
content = 'https://en.wikipedia.org/wiki/File:Wright_of_Derby,_The_Orrery.jpg'
expected = '<div class="message_inline_image"><a href="https://en.wikipedia.org/wiki/Special:FilePath/File:Wright_of_Derby,_The_Orrery.jpg"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=full" src="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
@override_settings(INLINE_IMAGE_PREVIEW=False)
def test_image_preview_enabled(self) -> None:
ret = bugdown.image_preview_enabled()
self.assertEqual(ret, False)
settings.INLINE_IMAGE_PREVIEW = True
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = message.get_realm()
ret = bugdown.image_preview_enabled()
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(no_previews=True)
self.assertEqual(ret, False)
ret = bugdown.image_preview_enabled(message, realm)
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(message)
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(message, realm,
no_previews=True)
self.assertEqual(ret, False)
ret = bugdown.image_preview_enabled(message, no_previews=True)
self.assertEqual(ret, False)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_url_embed_preview_enabled(self) -> None:
sender_user_profile = self.example_user('othello')
message = copy.deepcopy(Message(sender=sender_user_profile, sending_client=get_client("test")))
realm = message.get_realm()
realm.inline_url_embed_preview = True # off by default
realm.save(update_fields=['inline_url_embed_preview'])
ret = bugdown.url_embed_preview_enabled()
self.assertEqual(ret, False)
settings.INLINE_URL_EMBED_PREVIEW = True
ret = bugdown.url_embed_preview_enabled()
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(no_previews=True)
self.assertEqual(ret, False)
ret = bugdown.url_embed_preview_enabled(message, realm)
self.assertEqual(ret, True)
ret = bugdown.url_embed_preview_enabled(message)
self.assertEqual(ret, True)
ret = bugdown.url_embed_preview_enabled(message, no_previews=True)
self.assertEqual(ret, False)
def test_inline_dropbox(self) -> None:
msg = 'Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG'
image_info = {'image': 'https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG', 'desc': 'Shared with Dropbox', 'title': 'IMG_0923.JPG'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" title="IMG_0923.JPG"><img src="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?dl=1"></a></div>')
msg = 'Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl='
image_info = {'image': 'https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png', 'desc': 'Shared with Dropbox', 'title': 'Saves'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" title="Saves"><img src="https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>')
def test_inline_dropbox_preview(self) -> None:
# Test photo album previews
msg = 'https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5'
image_info = {'image': 'https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0', 'desc': 'Shared with Dropbox', 'title': '1 photo'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" title="1 photo"><img src="https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0"></a></div>')
def test_inline_dropbox_negative(self) -> None:
# Make sure we're not overzealous in our conversion:
msg = 'Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png'
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=thumbnail"></a></div>')
def test_inline_dropbox_bad(self) -> None:
# Don't fail on bad dropbox links
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>')
def test_inline_github_preview(self) -> None:
# Test photo album previews
msg = 'Test: https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Test: <a href="https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png">https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png</a></p>\n<div class="message_inline_image"><a href="https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmaster%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=full" src="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmaster%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=thumbnail"></a></div>')
msg = 'Test: https://developer.github.com/assets/images/hero-circuit-bg.png'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Test: <a href="https://developer.github.com/assets/images/hero-circuit-bg.png">https://developer.github.com/assets/images/hero-circuit-bg.png</a></p>\n<div class="message_inline_image"><a href="https://developer.github.com/assets/images/hero-circuit-bg.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=full" src="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=thumbnail"></a></div>')
def test_twitter_id_extraction(self) -> None:
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/#!/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/statuses/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858/'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/photo/1'), '410766290349879296')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/'), '410766290349879296')
def test_inline_interesting_links(self) -> None:
def make_link(url: str) -> str:
return f'<a href="{url}">{url}</a>'
normal_tweet_html = ('<a href="https://twitter.com/Twitter"'
'>@Twitter</a> '
'meets @seepicturely at #tcdisrupt cc.'
'<a href="https://twitter.com/boscomonkey"'
'>@boscomonkey</a> '
'<a href="https://twitter.com/episod"'
'>@episod</a> '
'<a href="http://t.co/6J2EgYM"'
'>http://instagr.am/p/MuW67/</a>')
mention_in_link_tweet_html = """<a href="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = ('<a href="http://t.co/xo7pAhK6n3">'
'http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>')
emoji_in_tweet_html = """Zulip is <span aria-label=\"100\" class="emoji emoji-1f4af" role=\"img\" title="100">:100:</span>% open-source!"""
def make_inline_twitter_preview(url: str, tweet_html: str, image_html: str='') -> str:
## As of right now, all previews are mocked to be the exact same tweet
return ('<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
f'<a href="{url}">'
'<img class="twitter-avatar"'
' src="https://external-content.zulipcdn.net/external_content/1f7cd2436976d410eab8189ebceda87ae0b34ead/687474703a2f2f7062732e7477696d672e63'
'6f6d2f70726f66696c655f696d616765732f313338303931323137332f53637265656e5f73686f745f323031312d30362d30335f61745f372e33352e33'
'365f504d5f6e6f726d616c2e706e67">'
'</a>'
f'<p>{tweet_html}</p>'
'<span>- Eoin McMillan (@imeoin)</span>'
f'{image_html}'
'</div>'
'</div>')
msg = 'http://www.twitter.com'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com')))
msg = 'http://www.twitter.com/wdaher/'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/')))
msg = 'http://www.twitter.com/wdaher/status/3'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/status/3')))
# id too long
msg = 'http://www.twitter.com/wdaher/status/2879779692873154569'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/status/2879779692873154569')))
# id too large (i.e. tweet doesn't exist)
msg = 'http://www.twitter.com/wdaher/status/999999999999999999'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/status/999999999999999999')))
msg = 'http://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'https://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('https://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('https://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'http://twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
# Repeated links will only be converted once
msg = ('http://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457')
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{} {} {} {}</p>\n{}{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html)))
# A max of 3 will be converted
msg = ('http://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315457 '
'https://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315460')
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{} {} {} {}</p>\n{}{}{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('https://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315460'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html),
make_inline_twitter_preview('https://twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
# Tweet has a mention in a URL, only the URL is linked
msg = 'http://twitter.com/wdaher/status/287977969287315458'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315458'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315458', mention_in_link_tweet_html)))
# Tweet with an image
msg = 'http://twitter.com/wdaher/status/287977969287315459'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315459'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315459',
media_tweet_html,
('<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3">'
'<img src="https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small">'
'</a>'
'</div>'))))
msg = 'http://twitter.com/wdaher/status/287977969287315460'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315460'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315460', emoji_in_tweet_html)))
def test_fetch_tweet_data_settings_validation(self) -> None:
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, bugdown.fetch_tweet_data('287977969287315459'))
def test_content_has_emoji(self) -> None:
self.assertFalse(bugdown.content_has_emoji_syntax('boring'))
self.assertFalse(bugdown.content_has_emoji_syntax('hello: world'))
self.assertFalse(bugdown.content_has_emoji_syntax(':foobar'))
self.assertFalse(bugdown.content_has_emoji_syntax('::: hello :::'))
self.assertTrue(bugdown.content_has_emoji_syntax('foo :whatever:'))
self.assertTrue(bugdown.content_has_emoji_syntax('\n:whatever:'))
self.assertTrue(bugdown.content_has_emoji_syntax(':smile: ::::::'))
def test_realm_emoji(self) -> None:
def emoji_img(name: str, file_name: str, realm_id: int) -> str:
return '<img alt="{}" class="emoji" src="{}" title="{}">'.format(
name, get_emoji_url(file_name, realm_id), name[1:-1].replace("_", " "))
realm = get_realm('zulip')
# Needs to mock an actual message because that's how bugdown obtains the realm
msg = Message(sender=self.example_user('hamlet'))
converted = bugdown.convert(":green_tick:", message_realm=realm, message=msg)
realm_emoji = RealmEmoji.objects.filter(realm=realm,
name='green_tick',
deactivated=False).get()
self.assertEqual(converted, '<p>{}</p>'.format(emoji_img(':green_tick:', realm_emoji.file_name, realm.id)))
# Deactivate realm emoji.
do_remove_realm_emoji(realm, 'green_tick')
converted = bugdown.convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted, '<p>:green_tick:</p>')
def test_deactivated_realm_emoji(self) -> None:
# Deactivate realm emoji.
realm = get_realm('zulip')
do_remove_realm_emoji(realm, 'green_tick')
msg = Message(sender=self.example_user('hamlet'))
converted = bugdown.convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted, '<p>:green_tick:</p>')
def test_unicode_emoji(self) -> None:
msg = '\u2615' # ☕
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><span aria-label=\"coffee\" class="emoji emoji-2615" role=\"img\" title="coffee">:coffee:</span></p>')
msg = '\u2615\u2615' # ☕☕
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><span aria-label=\"coffee\" class="emoji emoji-2615" role=\"img\" title="coffee">:coffee:</span><span aria-label=\"coffee\" class="emoji emoji-2615" role=\"img\" title="coffee">:coffee:</span></p>')
def test_no_translate_emoticons_if_off(self) -> None:
user_profile = self.example_user('othello')
do_set_user_display_setting(user_profile, 'translate_emoticons', False)
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = ':)'
expected = '<p>:)</p>'
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
def test_same_markup(self) -> None:
msg = '\u2615' # ☕
unicode_converted = bugdown_convert(msg)
msg = ':coffee:' # ☕☕
converted = bugdown_convert(msg)
self.assertEqual(converted, unicode_converted)
def test_links_in_topic_name(self) -> None:
realm = get_realm('zulip')
msg = Message(sender=self.example_user('othello'))
msg.set_topic_name("https://google.com/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['https://google.com/hello-world'])
msg.set_topic_name("http://google.com/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['http://google.com/hello-world'])
msg.set_topic_name("Without scheme google.com/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['https://google.com/hello-world'])
msg.set_topic_name("Without scheme random.words/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, [])
msg.set_topic_name("Try out http://ftp.debian.org, https://google.com/ and https://google.in/.")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['http://ftp.debian.org', 'https://google.com/', 'https://google.in/'])
def test_realm_patterns(self) -> None:
realm = get_realm('zulip')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
msg = Message(sender=self.example_user('othello'))
msg.set_topic_name("#444")
flush_per_request_caches()
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
converted = bugdown.convert(content, message_realm=realm, message=msg)
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted, '<p>We should fix <a href="https://trac.example.com/ticket/224">#224</a> and <a href="https://trac.example.com/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.example.com/ticket/16">trac #15</a> today.</p>')
self.assertEqual(converted_topic, ['https://trac.example.com/ticket/444'])
msg.set_topic_name("#444 https://google.com")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['https://trac.example.com/ticket/444', 'https://google.com'])
RealmFilter(realm=realm, pattern=r'#(?P<id>[a-zA-Z]+-[0-9]+)',
url_format_string=r'https://trac.example.com/ticket/%(id)s').save()
msg = Message(sender=self.example_user('hamlet'))
content = '#ZUL-123 was fixed and code was deployed to production, also #zul-321 was deployed to staging'
converted = bugdown.convert(content, message_realm=realm, message=msg)
self.assertEqual(converted, '<p><a href="https://trac.example.com/ticket/ZUL-123">#ZUL-123</a> was fixed and code was deployed to production, also <a href="https://trac.example.com/ticket/zul-321">#zul-321</a> was deployed to staging</p>')
def assert_conversion(content: str, convert: bool=True) -> None:
converted = bugdown.convert(content, message_realm=realm, message=msg)
converted_topic = bugdown.topic_links(realm.id, content)
if convert:
self.assertTrue('trac.example.com' in converted)
self.assertEqual(len(converted_topic), 1)
self.assertTrue('trac.example.com' in converted_topic[0])
else:
self.assertTrue('trac.example.com' not in converted)
self.assertEqual(len(converted_topic), 0)
assert_conversion('Hello #123 World')
assert_conversion('Hello #123World', False)
assert_conversion('Hello#123 World', False)
assert_conversion('Hello#123World', False)
# Ideally, these should be converted, but bugdown doesn't
# handle word boundary detection in languages that don't use
# whitespace for that correctly yet.
assert_conversion('チケットは#123です', False)
assert_conversion('チケットは #123です', False)
assert_conversion('チケットは#123 です', False)
assert_conversion('チケットは #123 です')
assert_conversion('(#123)')
assert_conversion('#123>')
assert_conversion('"#123"')
assert_conversion('#123@')
assert_conversion(')#123(', False)
assert_conversion('##123', False)
# test nested realm patterns should avoid double matching
RealmFilter(realm=realm, pattern=r'hello#(?P<id>[0-9]+)',
url_format_string=r'https://trac.example.com/hello/%(id)s').save()
converted_topic = bugdown.topic_links(realm.id, 'hello#123 #234')
self.assertEqual(converted_topic, ['https://trac.example.com/ticket/234', 'https://trac.example.com/hello/123'])
def test_maybe_update_markdown_engines(self) -> None:
realm = get_realm('zulip')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
bugdown.realm_filter_data = {}
bugdown.maybe_update_markdown_engines(None, False)
all_filters = bugdown.realm_filter_data
zulip_filters = all_filters[realm.id]
self.assertEqual(len(zulip_filters), 1)
self.assertEqual(zulip_filters[0],
('#(?P<id>[0-9]{2,8})', 'https://trac.example.com/ticket/%(id)s', realm_filter.id))
def test_flush_realm_filter(self) -> None:
realm = get_realm('zulip')
def flush() -> None:
'''
flush_realm_filter is a post-save hook, so calling it
directly for testing is kind of awkward
'''
class Instance:
realm_id: Optional[int] = None
instance = Instance()
instance.realm_id = realm.id
flush_realm_filter(sender=None, instance=instance)
def save_new_realm_filter() -> None:
realm_filter = RealmFilter(realm=realm,
pattern=r"whatever",
url_format_string='whatever')
realm_filter.save()
# start fresh for our realm
flush()
self.assertFalse(realm_in_local_realm_filters_cache(realm.id))
# call this just for side effects of populating the cache
realm_filters_for_realm(realm.id)
self.assertTrue(realm_in_local_realm_filters_cache(realm.id))
# Saving a new RealmFilter should have the side effect of
# flushing the cache.
save_new_realm_filter()
self.assertFalse(realm_in_local_realm_filters_cache(realm.id))
# and flush it one more time, to make sure we don't get a KeyError
flush()
self.assertFalse(realm_in_local_realm_filters_cache(realm.id))
def test_realm_patterns_negative(self) -> None:
realm = get_realm('zulip')
RealmFilter(realm=realm, pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.example.com/ticket/%(id)s").save()
boring_msg = Message(sender=self.example_user('othello'))
boring_msg.set_topic_name("no match here")
converted_boring_topic = bugdown.topic_links(realm.id, boring_msg.topic_name())
self.assertEqual(converted_boring_topic, [])
def test_is_status_message(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = '/me makes a list\n* one\n* two'
rendered_content = render_markdown(msg, content)
self.assertEqual(
rendered_content,
'<p>/me makes a list</p>\n<ul>\n<li>one</li>\n<li>two</li>\n</ul>',
)
self.assertTrue(Message.is_status_message(content, rendered_content))
content = '/me takes a walk'
rendered_content = render_markdown(msg, content)
self.assertEqual(
rendered_content,
'<p>/me takes a walk</p>',
)
self.assertTrue(Message.is_status_message(content, rendered_content))
content = '/me writes a second line\nline'
rendered_content = render_markdown(msg, content)
self.assertEqual(
rendered_content,
'<p>/me writes a second line<br>\nline</p>',
)
self.assertTrue(Message.is_status_message(content, rendered_content))
def test_alert_words(self) -> None:
user_profile = self.example_user('othello')
do_add_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = "We have an ALERTWORD day today!"
self.assertEqual(render(msg, content), "<p>We have an ALERTWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, {user_profile.id})
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
self.assertEqual(render(msg, content), "<p>We have a NOTHINGWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, set())
def test_alert_words_returns_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['how'], 'cordelia': ['this possible'],
'iago': ['hello'], 'prospero': ['hello'],
'othello': ['how are you'], 'aaron': ['hey'],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = "hello how is this possible how are you doing today"
render(msg, content)
expected_user_ids: Set[int] = {
user_profiles['hamlet'].id, user_profiles['cordelia'].id, user_profiles['iago'].id,
user_profiles['prospero'].id, user_profiles['othello'].id,
}
# All users except aaron have their alert word appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_1(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['provisioning', 'Prod deployment'],
'cordelia': ['test', 'Prod'],
'iago': ['prod'], 'prospero': ['deployment'],
'othello': ['last'],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """Hello, everyone. Prod deployment has been completed
And this is a new line
to test out how markdown convert this into something line ending splitted array
and this is a new line
last"""
render(msg, content)
expected_user_ids: Set[int] = {
user_profiles['hamlet'].id,
user_profiles['cordelia'].id,
user_profiles['iago'].id,
user_profiles['prospero'].id,
user_profiles['othello'].id,
}
# All users have their alert word appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_in_french(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['réglementaire', 'une politique', 'une merveille'],
'cordelia': ['énormément', 'Prod'],
'iago': ['prod'], 'prospero': ['deployment'],
'othello': ['last'],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """This is to test out alert words work in languages with accented characters too
bonjour est (énormément) ce a quoi ressemble le français
et j'espère qu'il n'y n' réglementaire a pas de mots d'alerte dans ce texte français
"""
render(msg, content)
expected_user_ids: Set[int] = {user_profiles['hamlet'].id, user_profiles['cordelia'].id}
# Only hamlet and cordelia have their alert-words appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_empty_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': [], 'cordelia': [], 'iago': [], 'prospero': [],
'othello': [], 'aaron': [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """hello how is this possible how are you doing today
This is to test that the no user_ids who have alrert wourldword is participating
in sending of the message
"""
render(msg, content)
expected_user_ids: Set[int] = set()
# None of the users have their alert-words appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def get_mock_alert_words(self, num_words: int, word_length: int) -> List[str]:
alert_words = ['x' * word_length] * num_words # type List[str]
return alert_words
def test_alert_words_with_empty_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': [],
'cordelia': [],
'iago': [],
'othello': [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """This is to test a empty alert words i.e. no user has any alert-words set"""
render(msg, content)
expected_user_ids: Set[int] = set()
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_retuns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['issue124'],
'cordelia': self.get_mock_alert_words(500, 10),
'iago': self.get_mock_alert_words(500, 10),
'othello': self.get_mock_alert_words(500, 10),
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """The code above will print 10 random values of numbers between 1 and 100.
The second line, for x in range(10), determines how many values will be printed (when you use
range(x), the number that you use in place of x will be the amount of values that you'll have
printed. if you want 20 values, use range(20). use range(5) if you only want 5 values returned,
etc.). I was talking abou the issue124 on github. Then the third line: print random.randint(1,101) will automatically select a random integer
between 1 and 100 for you. The process is fairly simple
"""
render(msg, content)
expected_user_ids: Set[int] = {user_profiles['hamlet'].id}
# Only hamlet has alert-word 'issue124' present in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_default_code_block_language(self) -> None:
realm = get_realm('zulip')
self.assertEqual(realm.default_code_block_language, None)
text = "```{}\nconsole.log('Hello World');\n```\n"
# Render without default language
msg_with_js = bugdown_convert(text.format('js'))
msg_with_python = bugdown_convert(text.format('python'))
msg_without_language = bugdown_convert(text.format(''))
msg_with_quote = bugdown_convert(text.format('quote'))
msg_with_math = bugdown_convert(text.format('math'))
# Render with default=javascript
do_set_realm_property(realm, 'default_code_block_language', 'javascript')
msg_without_language_default_js = bugdown_convert(text.format(''))
msg_with_python_default_js = bugdown_convert(text.format('python'))
# Render with default=python
do_set_realm_property(realm, 'default_code_block_language', 'python')
msg_without_language_default_py = bugdown_convert(text.format(''))
msg_with_none_default_py = bugdown_convert(text.format('none'))
# Render with default=quote
do_set_realm_property(realm, 'default_code_block_language', 'quote')
msg_without_language_default_quote = bugdown_convert(text.format(''))
# Render with default=math
do_set_realm_property(realm, 'default_code_block_language', 'math')
msg_without_language_default_math = bugdown_convert(text.format(''))
# Render without default language
do_set_realm_property(realm, 'default_code_block_language', None)
msg_without_language_final = bugdown_convert(text.format(''))
self.assertTrue(msg_with_js == msg_without_language_default_js)
self.assertTrue(msg_with_python == msg_with_python_default_js == msg_without_language_default_py)
self.assertTrue(msg_with_quote == msg_without_language_default_quote)
self.assertTrue(msg_with_math == msg_without_language_default_math)
self.assertTrue(msg_without_language == msg_with_none_default_py == msg_without_language_final)
# Test checking inside nested quotes
nested_text = "````quote\n\n{}\n\n{}````".format(text.format('js'), text.format(''))
do_set_realm_property(realm, 'default_code_block_language', 'javascript')
rendered = bugdown_convert(nested_text)
with_language, without_language = re.findall(r'<pre>(.*?)$', rendered, re.MULTILINE)
self.assertTrue(with_language == without_language)
do_set_realm_property(realm, 'default_code_block_language', None)
rendered = bugdown_convert(nested_text)
with_language, without_language = re.findall(r'<pre>(.*?)$', rendered, re.MULTILINE)
self.assertFalse(with_language == without_language)
def test_mention_wildcard(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**all** test"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" data-user-id="*">'
'@all'
'</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_everyone(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**everyone** test"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" data-user-id="*">'
'@everyone'
'</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_stream(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**stream** test"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" data-user-id="*">'
'@stream'
'</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_at_wildcard(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
self.assertEqual(render_markdown(msg, content),
'<p>@all test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_at_everyone(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
self.assertEqual(render_markdown(msg, content),
'<p>@everyone test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_word_starting_with_at_wildcard(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "test @alleycat.com test"
self.assertEqual(render_markdown(msg, content),
'<p>test @alleycat.com test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_at_normal_user(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@aaron test"
self.assertEqual(render_markdown(msg, content),
'<p>@aaron test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_single(self) -> None:
sender_user_profile = self.example_user('othello')
user_profile = self.example_user('hamlet')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**King Hamlet**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
'@King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, {user_profile.id})
def test_mention_silent(self) -> None:
sender_user_profile = self.example_user('othello')
user_profile = self.example_user('hamlet')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**King Hamlet**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention silent" '
f'data-user-id="{user_id}">'
'King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_possible_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str], has_wildcards: bool=False) -> None:
self.assertEqual(possible_mentions(content), (names, has_wildcards))
assert_mentions('', set())
assert_mentions('boring', set())
assert_mentions('@**all**', set(), True)
assert_mentions('smush@**steve**smush', set())
assert_mentions(
'Hello @**King Hamlet** and @**Cordelia Lear**\n@**Foo van Barson|1234** @**all**',
{'King Hamlet', 'Cordelia Lear', 'Foo van Barson|1234'}, True,
)
def test_mention_multiple(self) -> None:
sender_user_profile = self.example_user('othello')
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**Cordelia Lear**, check this out"
self.assertEqual(render_markdown(msg, content),
'<p>'
'<span class="user-mention" '
f'data-user-id="{hamlet.id}">@King Hamlet</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia Lear</span>, '
'check this out</p>')
self.assertEqual(msg.mentions_user_ids, {hamlet.id, cordelia.id})
def test_mention_in_quotes(self) -> None:
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
msg = Message(sender=othello, sending_client=get_client("test"))
content = "> @**King Hamlet** and @**Othello, the Moor of Venice**\n\n @**King Hamlet** and @**Cordelia Lear**"
self.assertEqual(render_markdown(msg, content),
'<blockquote>\n<p>'
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
' and '
f'<span class="user-mention silent" data-user-id="{othello.id}">Othello, the Moor of Venice</span>'
'</p>\n</blockquote>\n'
'<p>'
f'<span class="user-mention" data-user-id="{hamlet.id}">@King Hamlet</span>'
' and '
f'<span class="user-mention" data-user-id="{cordelia.id}">@Cordelia Lear</span>'
'</p>')
self.assertEqual(msg.mentions_user_ids, {hamlet.id, cordelia.id})
# Both fenced quote and > quote should be identical for both silent and regular syntax.
expected = ('<blockquote>\n<p>'
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
'</p>\n</blockquote>')
content = "```quote\n@**King Hamlet**\n```"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
content = "> @**King Hamlet**"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
content = "```quote\n@_**King Hamlet**\n```"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
content = "> @_**King Hamlet**"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_duplicate_full_name(self) -> None:
realm = get_realm('zulip')
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password='whatever',
realm=realm,
full_name=full_name,
short_name='whatever',
)
sender_user_profile = self.example_user('othello')
twin1 = make_user('twin1@example.com', 'Mark Twin')
twin2 = make_user('twin2@example.com', 'Mark Twin')
cordelia = self.example_user('cordelia')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = f"@**Mark Twin|{twin1.id}**, @**Mark Twin|{twin2.id}** and @**Cordelia Lear**, hi."
self.assertEqual(render_markdown(msg, content),
'<p>'
'<span class="user-mention" '
f'data-user-id="{twin1.id}">@Mark Twin</span>, '
'<span class="user-mention" '
f'data-user-id="{twin2.id}">@Mark Twin</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia Lear</span>, '
'hi.</p>')
self.assertEqual(msg.mentions_user_ids, {twin1.id, twin2.id, cordelia.id})
def test_mention_invalid(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
self.assertEqual(render_markdown(msg, content),
'<p>Hey @<strong>Nonexistent User</strong></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_user_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user('othello')
realm = get_realm('zulip')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a user that potentially interferes with the pattern.
test_user = create_user(email='atomic@example.com',
password='whatever',
realm=realm,
full_name='Atomic #123',
short_name='whatever')
content = "@**Atomic #123**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{test_user.id}">'
'@Atomic #123</span></p>')
self.assertEqual(msg.mentions_user_ids, {test_user.id})
content = "@_**Atomic #123**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention silent" '
f'data-user-id="{test_user.id}">'
'Atomic #123</span></p>')
self.assertEqual(msg.mentions_user_ids, set())
def create_user_group_for_test(self, user_group_name: str) -> UserGroup:
othello = self.example_user('othello')
return create_user_group(user_group_name, [othello], get_realm('zulip'))
def test_user_group_mention_single(self) -> None:
sender_user_profile = self.example_user('othello')
user_profile = self.example_user('hamlet')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test('support')
content = "@**King Hamlet** @*support*"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
'@King Hamlet</span> '
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
'@support</span></p>')
self.assertEqual(msg.mentions_user_ids, {user_profile.id})
self.assertEqual(msg.mentions_user_group_ids, {user_group.id})
def test_user_group_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user('othello')
realm = get_realm('zulip')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user('hamlet')
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a user-group that potentially interferes with the pattern.
user_id = user_profile.id
user_group = self.create_user_group_for_test('support #123')
content = "@**King Hamlet** @*support #123*"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
'@King Hamlet</span> '
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
'@support #123</span></p>')
self.assertEqual(msg.mentions_user_ids, {user_profile.id})
self.assertEqual(msg.mentions_user_group_ids, {user_group.id})
def test_possible_user_group_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str]) -> None:
self.assertEqual(possible_user_group_mentions(content), names)
assert_mentions('', set())
assert_mentions('boring', set())
assert_mentions('@**all**', set())
assert_mentions('smush@*steve*smush', set())
assert_mentions(
'@*support* Hello @**King Hamlet** and @**Cordelia Lear**\n'
'@**Foo van Barson** @**all**', {'support'},
)
assert_mentions(
'Attention @*support*, @*frontend* and @*backend*\ngroups.',
{'support', 'frontend', 'backend'},
)
def test_user_group_mention_multiple(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test('support')
backend = self.create_user_group_for_test('backend')
content = "@*support* and @*backend*, check this out"
self.assertEqual(render_markdown(msg, content),
'<p>'
'<span class="user-group-mention" '
f'data-user-group-id="{support.id}">'
'@support</span> '
'and '
'<span class="user-group-mention" '
f'data-user-group-id="{backend.id}">'
'@backend</span>, '
'check this out'
'</p>')
self.assertEqual(msg.mentions_user_group_ids, {support.id, backend.id})
def test_user_group_mention_edit(self) -> None:
sender_user_profile = self.example_user('hamlet')
user_profile = self.example_user('othello')
self.create_user_group_for_test('support')
self.login('hamlet')
msg_id = self.send_stream_message(sender_user_profile,
"Denmark",
topic_name="editing",
content='test')
def update_message_and_check_flag(content: str, mentioned: bool) -> None:
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id, 'content': content,
})
self.assert_json_success(result)
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=msg_id,
)
if mentioned:
self.assertIn('mentioned', um.flags_list())
else:
self.assertNotIn('mentioned', um.flags_list())
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@*support-invalid* edited", False)
update_message_and_check_flag("@*support* edited", True)
update_message_and_check_flag("edited", False)
update_message_and_check_flag("@*support*", True)
def test_user_group_mention_invalid(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @*Nonexistent group*"
self.assertEqual(render_markdown(msg, content),
'<p>Hey @<em>Nonexistent group</em></p>')
self.assertEqual(msg.mentions_user_group_ids, set())
def test_stream_single(self) -> None:
denmark = get_stream('Denmark', get_realm('zulip'))
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
))
def test_stream_multiple(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = get_realm('zulip')
denmark = get_stream('Denmark', realm)
scotland = get_stream('Scotland', realm)
content = "Look to #**Denmark** and #**Scotland**, there something"
self.assertEqual(render_markdown(msg, content),
'<p>Look to '
'<a class="stream" '
'data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-Denmark">#{denmark.name}</a> and '
'<a class="stream" '
'data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-Scotland">#{scotland.name}</a>, '
'there something</p>'.format(denmark=denmark, scotland=scotland))
def test_stream_case_sensitivity(self) -> None:
realm = get_realm('zulip')
case_sens = Stream.objects.create(name='CaseSens', realm=realm)
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**CaseSens**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{s.id}" href="/#narrow/stream/{s.id}-{s.name}">#{s.name}</a></p>'.format(
s=case_sens,
))
def test_stream_case_sensitivity_nonmatching(self) -> None:
"""#StreamName requires the stream be spelled with the correct case
currently. If we change that in the future, we'll need to change this
test."""
realm = get_realm('zulip')
Stream.objects.create(name='CaseSens', realm=realm)
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**casesens**"
self.assertEqual(
render_markdown(msg, content),
'<p>#<strong>casesens</strong></p>')
def test_topic_single(self) -> None:
denmark = get_stream('Denmark', get_realm('zulip'))
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>some topic**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/some.20topic">#{d.name} > some topic</a></p>'.format(
d=denmark,
))
def test_topic_atomic_string(self) -> None:
realm = get_realm('zulip')
# Create a linkifier.
sender_user_profile = self.example_user('othello')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a topic link that potentially interferes with the pattern.
denmark = get_stream('Denmark', realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>#1234**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/.231234">#{d.name} > #1234</a></p>'.format(
d=denmark,
))
def test_topic_multiple(self) -> None:
denmark = get_stream('Denmark', get_realm('zulip'))
scotland = get_stream('Scotland', get_realm('zulip'))
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "This has two links: #**Denmark>some topic** and #**Scotland>other topic**."
self.assertEqual(
render_markdown(msg, content),
'<p>This has two links: '
'<a class="stream-topic" data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-{denmark.name}/topic/some.20topic">'
'#{denmark.name} > some topic</a>'
' and '
'<a class="stream-topic" data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-{scotland.name}/topic/other.20topic">'
'#{scotland.name} > other topic</a>'
'.</p>'.format(denmark=denmark, scotland=scotland))
def test_possible_stream_names(self) -> None:
content = '''#**test here**
This mentions #**Denmark** too.
#**garçon** #**천국** @**Ignore Person**
'''
self.assertEqual(
bugdown.possible_linked_stream_names(content),
{'test here', 'Denmark', 'garçon', '천국'},
)
def test_stream_unicode(self) -> None:
realm = get_realm('zulip')
uni = Stream.objects.create(name='привет', realm=realm)
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**привет**"
quoted_name = '.D0.BF.D1.80.D0.B8.D0.B2.D0.B5.D1.82'
href = f'/#narrow/stream/{uni.id}-{quoted_name}'
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=uni,
href=href,
))
def test_stream_atomic_string(self) -> None:
realm = get_realm('zulip')
# Create a linkifier.
sender_user_profile = self.example_user('othello')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a stream that potentially interferes with the pattern.
stream = Stream.objects.create(name='Stream #1234', realm=realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Stream #1234**"
href = f'/#narrow/stream/{stream.id}-Stream-.231234'
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=stream,
href=href,
))
def test_stream_invalid(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "There #**Nonexistentstream**"
self.assertEqual(render_markdown(msg, content),
'<p>There #<strong>Nonexistentstream</strong></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_image_preview_title(self) -> None:
msg = '[My favorite image](https://example.com/testimage.png)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<a href="https://example.com/testimage.png">My favorite image</a>'
'</p>\n'
'<div class="message_inline_image">'
'<a href="https://example.com/testimage.png" title="My favorite image">'
'<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=full" src="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=thumbnail">'
'</a>'
'</div>',
)
def test_mit_rendering(self) -> None:
"""Test the markdown configs for the MIT Zephyr mirroring system;
verifies almost all inline patterns are disabled, but
inline_interesting_links is still enabled"""
msg = "**test**"
realm = get_realm("zephyr")
client = get_client("zephyr_mirror")
message = Message(sending_client=client,
sender=self.mit_user("sipbtest"))
converted = bugdown.convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted,
"<p>**test**</p>",
)
msg = "* test"
converted = bugdown.convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = bugdown.convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
def test_url_to_a(self) -> None:
url = 'javascript://example.com/invalidURL'
converted = bugdown.url_to_a(db_data=None, url=url, text=url)
self.assertEqual(
converted,
'javascript://example.com/invalidURL',
)
def test_disabled_code_block_processor(self) -> None:
msg = "Hello,\n\n" + \
" I am writing this message to test something. I am writing this message to test something."
converted = bugdown_convert(msg)
expected_output = '<p>Hello,</p>\n' + \
'<div class="codehilite"><pre><span></span><code>I am writing this message to test something. I am writing this message to test something.\n' + \
'</code></pre></div>'
self.assertEqual(converted, expected_output)
realm = Realm.objects.create(string_id='code_block_processor_test')
bugdown.maybe_update_markdown_engines(realm.id, True)
converted = bugdown.convert(msg, message_realm=realm, email_gateway=True)
expected_output = '<p>Hello,</p>\n' + \
'<p>I am writing this message to test something. I am writing this message to test something.</p>'
self.assertEqual(converted, expected_output)
def test_normal_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://example.com/#settings/"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="http://example.com/#settings/">http://example.com/#settings/</a></p>',
)
def test_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#narrow/stream/999-hello"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="#narrow/stream/999-hello">http://zulip.testserver/#narrow/stream/999-hello</a></p>',
)
def test_relative_link_streams_page(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#streams/all"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="#streams/all">http://zulip.testserver/#streams/all</a></p>',
)
def test_md_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "[hello](http://zulip.testserver/#narrow/stream/999-hello)"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="#narrow/stream/999-hello">hello</a></p>',
)
class BugdownApiTests(ZulipTestCase):
def test_render_message_api(self) -> None:
content = 'That is a **bold** statement'
result = self.api_post(
self.example_user("othello"),
'/api/v1/messages/render',
dict(content=content),
)
self.assert_json_success(result)
self.assertEqual(result.json()['rendered'],
'<p>That is a <strong>bold</strong> statement</p>')
def test_render_mention_stream_api(self) -> None:
"""Determines whether we're correctly passing the realm context"""
content = 'This mentions #**Denmark** and @**King Hamlet**.'
result = self.api_post(
self.example_user("othello"),
'/api/v1/messages/render',
dict(content=content),
)
self.assert_json_success(result)
user_id = self.example_user('hamlet').id
stream_id = get_stream('Denmark', get_realm('zulip')).id
self.assertEqual(result.json()['rendered'],
f'<p>This mentions <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-Denmark">#Denmark</a> and <span class="user-mention" data-user-id="{user_id}">@King Hamlet</span>.</p>')
class BugdownErrorTests(ZulipTestCase):
def test_bugdown_error_handling(self) -> None:
with self.simulated_markdown_failure():
with self.assertRaises(BugdownRenderingException):
bugdown_convert('')
def test_send_message_errors(self) -> None:
message = 'whatever'
with self.simulated_markdown_failure():
# We don't use assertRaisesRegex because it seems to not
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_stream_message(self.example_user("othello"), "Denmark", message)
def test_ultra_long_rendering(self) -> None:
"""A rendered message with an ultra-long lenght (> 10 * MAX_MESSAGE_LENGTH)
throws an exception"""
msg = 'mock rendered message\n' * MAX_MESSAGE_LENGTH
with mock.patch('zerver.lib.bugdown.timeout', return_value=msg), \
mock.patch('zerver.lib.bugdown.bugdown_logger'):
with self.assertRaises(BugdownRenderingException):
bugdown_convert(msg)
def test_curl_code_block_validation(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
processor.run_content_validators = True
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'``` curl',
'curl {{ api_url }}/v1/register',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY',
' -d "queue_id=1375801870:2942"',
'```',
]
with self.assertRaises(BugdownRenderingException):
processor.run(markdown)
def test_curl_code_block_without_validation(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'``` curl',
'curl {{ api_url }}/v1/register',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY',
' -d "queue_id=1375801870:2942"',
'```',
]
expected = [
'',
'**curl:curl {{ api_url }}/v1/register',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY',
' -d "queue_id=1375801870:2942"**',
'',
'',
]
result = processor.run(markdown)
self.assertEqual(result, expected)
class BugdownAvatarTestCase(ZulipTestCase):
def test_possible_avatar_emails(self) -> None:
content = '''
hello !avatar(foo@example.com) my email is ignore@ignore.com
!gravatar(bar@yo.tv)
smushing!avatar(hamlet@example.org) is allowed
'''
self.assertEqual(
bugdown.possible_avatar_emails(content),
{'foo@example.com', 'bar@yo.tv', 'hamlet@example.org'},
)
def test_avatar_with_id(self) -> None:
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user('hamlet')
msg = f'!avatar({user_profile.email})'
converted = bugdown.convert(msg, message=message)
values = {'email': user_profile.email, 'id': user_profile.id}
self.assertEqual(
converted,
'<p><img alt="{email}" class="message_body_gravatar" src="/avatar/{id}?s=30" title="{email}"></p>'.format(**values))
def test_avatar_of_unregistered_user(self) -> None:
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
email = 'fakeuser@example.com'
msg = f'!avatar({email})'
converted = bugdown.convert(msg, message=message)
self.assertEqual(
converted,
'<p><img alt="{0}" class="message_body_gravatar" src="/avatar/{0}?s=30" title="{0}"></p>'.format(email))
|
from .types import *
from .row import *
from .column import *
from .utils import scope
from .session import *
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWriteSheetFormatPr(unittest.TestCase):
"""
Test the Worksheet _write_sheet_format_pr() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_format_pr(self):
"""Test the _write_sheet_format_pr() method"""
self.worksheet._write_sheet_format_pr()
exp = """<sheetFormatPr defaultRowHeight="15"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v1.proto.resources import location_view_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_location__view__pb2
from google.ads.google_ads.v1.proto.services import location_view_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_location__view__service__pb2
class LocationViewServiceStub(object):
"""Service to fetch location views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetLocationView = channel.unary_unary(
'/google.ads.googleads.v1.services.LocationViewService/GetLocationView',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_location__view__service__pb2.GetLocationViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_location__view__pb2.LocationView.FromString,
)
class LocationViewServiceServicer(object):
"""Service to fetch location views.
"""
def GetLocationView(self, request, context):
"""Returns the requested location view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LocationViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetLocationView': grpc.unary_unary_rpc_method_handler(
servicer.GetLocationView,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_location__view__service__pb2.GetLocationViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_location__view__pb2.LocationView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.LocationViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, frappe and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestTest(unittest.TestCase):
pass
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
if os.environ.get('READTHEDOCS', None) == 'True':
# Run sphinx-apidoc automatically in readthedocs
# Taken from this: https://lists.torproject.org/pipermail/tor-commits/2012-September/046695.html
os.system('sphinx-apidoc -o api -T ../mitopipeline --separate')
sys.path.insert(0, os.path.abspath(os.path.pardir))
# -- Project information -----------------------------------------------------
project = u'mitopipeline'
copyright = u'2019, Timothy Kuo'
author = u'Timothy Kuo'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store', 'README.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mitopipelinedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mitopipeline.tex', u'mitopipeline Documentation',
u'Timothy Kuo', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mitopipeline', u'mitopipeline Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mitopipeline', u'mitopipeline Documentation',
author, 'mitopipeline', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
import sqlite3
from db import db
class StoreModel(db.Model):
__tablename__ = 'stores'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
items = db.relationship('ItemModel', lazy='dynamic')
def __init__(self, _id, name):
self.id = _id
self.name = name
def json_items(self):
return {'id': self.id, 'name': self.name, 'item': [item.json() for item in self.items.all()]}
def json(self):
return {'id': self.id, 'name': self.name}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first() # SELECT * from __tablename__ WHERE name=name LIMIT 1
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
import sys
import json
import datetime
from terminalplot import plot
from balsam.launcher.dag import BalsamJob
now = '_'.join(str(datetime.datetime.now(datetime.timezone.utc)).split(" "))
def max_list(l):
rl = [l[0]]
mx = l[0]
for i in range(1, len(l)):
mx = max(mx, l[i])
rl.append(mx)
return rl
def rm_none(l):
return list(filter(lambda e: e != None, list(l)))
def process_data(workflow):
data = BalsamJob.objects.filter(workflow=workflow).values_list('data__reward', flat=True)
print(f'data len: {len(data)}')
raw_rewards = list(filter(lambda e: e != None, rm_none(data)))
if len(raw_rewards) == 0:
print(f'no rewards for : {workflow}')
return -1
plot([i for i in range(len(raw_rewards))], raw_rewards)
max_rewards = max_list(raw_rewards)
plot([i for i in range(len(max_rewards))], max_rewards)
data = BalsamJob.objects.filter(workflow=workflow).values_list('data__arch_seq', flat=True)
arch_seq = rm_none(data)
data = BalsamJob.objects.filter(workflow=workflow).values_list('data__id_worker', flat=True)
w = rm_none(data)
filename = f'wf-{workflow}_{now}'
print(f'filename: {filename}')
with open('data/'+filename+'.json', "w") as f:
data = dict(
fig=filename,
raw_rewards=raw_rewards,
max_rewards=max_rewards,
arch_seq=arch_seq,
id_worker=w
)
json.dump(data, f)
return 0
for wf in sys.argv[1:]:
process_data(wf)
|
from . import axes_size as Size
from .axes_divider import Divider, SubplotDivider, LocatableAxes, \
make_axes_locatable
from .axes_grid import Grid, ImageGrid, AxesGrid
#from axes_divider import make_axes_locatable
from matplotlib.cbook import warn_deprecated
warn_deprecated(since='2.1',
name='mpl_toolkits.axes_grid',
alternative='mpl_toolkits.axes_grid1 and'
' mpl_toolkits.axisartist, which provide'
' the same functionality',
obj_type='module')
|
import intcode
INPUT_FILE = 'day005.in'
def part1(filename):
source = intcode.load_from_file(filename)
i, o = [], []
# AC Unit input value
i.append(1)
modified = intcode.run_intcode(source, i, o)
return modified[0], i, o
def part2(filename):
source = intcode.load_from_file(filename)
i, o = [], []
# Thermal Radiator Controller
i.append(5)
modified = intcode.run_intcode(source, i, o)
return modified[0], i, o
if __name__ == '__main__':
result_code, instream, outstream = part1(INPUT_FILE)
print('Part1: ', result_code, 'IN', instream, 'OUT', outstream)
result_code, instream, outstream = part2(INPUT_FILE)
print('Part2: ', result_code, 'IN', instream, 'OUT', outstream)
|
# -*- coding: utf-8 -*-
"""Utilities for working with VPC subnets."""
from . import client as boto3client
def create(profile, cidr_block, vpc, availability_zone=None):
"""Create a subnet in a VPC.
Args:
profile
A profile to connect to AWS with.
cidr_block
The network range for the subnet, in CIDR notation.
For instance, "10.0.0.0/24".
vpc
The ID of the VPC you want to create the subnet in.
availability_zone
The name of the availability zone to create the subnet in.
If None, Amazon will pick one for you.
Returns:
The JSON response returned by boto3.
"""
client = boto3client.get("ec2", profile)
params = {}
params["CidrBlock"] = cidr_block
params["VpcId"] = vpc
if availability_zone:
params["AvailabilityZone"] = availability_zone
return client.create_subnet(**params)
def delete(profile, subnet):
"""Delete a subnet from a VPC.
Args:
profile
A profile to connect to AWS with.
subnet
The ID of the subnet you want to delete.
"""
client = boto3client.get("ec2", profile)
params = {}
params["SubnetId"] = subnet
return client.delete_subnet(**params)
def get(profile, filters=None):
"""Get a list of all subnets.
Args:
profile
A profile to connect to AWS with.
filters
Filters to apply to the request.
Returns:
The JSON response returned by boto3.
"""
client = boto3client.get("ec2", profile)
params = {}
if filters:
params["Filters"] = filters
return client.describe_subnets(**params)
def enable_public_ips(profile, subnet):
"""Set the subnet to give instances public IPs by default.
Args:
profile
A profile to connect to AWS with.
subnet
The ID of the subnet.
Returns:
The JSON response returned by boto3.
"""
client = boto3client.get("ec2", profile)
params = {}
params["SubnetId"] = subnet
params["MapPublicIpOnLaunch"] = {"Value": True}
return client.modify_subnet_attribute(**params)
def disable_public_ips(profile, subnet):
"""Set the subnet not to give instances public IPs by default.
Args:
profile
A profile to connect to AWS with.
subnet
The ID of the subnet.
Returns:
The JSON response returned by boto3.
"""
client = boto3client.get("ec2", profile)
params = {}
params["SubnetId"] = subnet
params["MapPublicIpOnLaunch"] = {"Value": False}
return client.modify_subnet_attribute(**params)
def tag(profile, subnet, key, value):
"""Add a tag to a subnet.
Args:
profile
A profile to connect to AWS with.
subnet
The ID of the subnet you want to tag.
key
The key/name of the tag.
value
The value of the tag.
Returns:
The response returned by boto3.
"""
client = boto3client.get("ec2", profile)
params = {}
params["Resources"] = [subnet]
params["Tags"] = [{"Key": key, "Value": value}]
return client.create_tags(**params)
|
#!/usr/bin/env python3
# Copyright 2016 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
import time
import utils
HOST_OS = utils.GuessOS()
HOST_ARCH = utils.GuessArchitecture()
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
AVAILABLE_ARCHS = utils.ARCH_FAMILY.keys()
GN = os.path.join(DART_ROOT, 'buildtools', 'gn')
# Environment variables for default settings.
DART_USE_TOOLCHAIN = "DART_USE_TOOLCHAIN" # Use instread of --toolchain-prefix
DART_USE_SYSROOT = "DART_USE_SYSROOT" # Use instead of --target-sysroot
DART_USE_CRASHPAD = "DART_USE_CRASHPAD" # Use instead of --use-crashpad
# use instead of --platform-sdk
DART_MAKE_PLATFORM_SDK = "DART_MAKE_PLATFORM_SDK"
DART_GN_ARGS = "DART_GN_ARGS"
def ToolchainPrefix(args):
if args.toolchain_prefix:
return args.toolchain_prefix
return os.environ.get(DART_USE_TOOLCHAIN)
def TargetSysroot(args):
if args.target_sysroot:
return args.target_sysroot
return os.environ.get(DART_USE_SYSROOT)
def MakePlatformSDK():
return DART_MAKE_PLATFORM_SDK in os.environ
def GetGNArgs(args):
if args.gn_args != None:
return args.gn_args
args = os.environ.get(DART_GN_ARGS) or ""
return args.split()
def GetOutDir(mode, arch, target_os, sanitizer):
return utils.GetBuildRoot(HOST_OS, mode, arch, target_os, sanitizer)
def ToCommandLine(gn_args):
def merge(key, value):
if type(value) is bool:
return '%s=%s' % (key, 'true' if value else 'false')
elif type(value) is int:
return '%s=%d' % (key, value)
return '%s="%s"' % (key, value)
return [merge(x, y) for x, y in gn_args.items()]
def HostCpuForArch(arch):
if arch in ['ia32', 'arm', 'armv6', 'simarm', 'simarmv6', 'simarm_x64']:
return 'x86'
if arch in [
'x64', 'arm64', 'simarm64', 'arm_x64', 'x64c', 'arm64c', 'simarm64c'
]:
return 'x64'
# The C compiler's target.
def TargetCpuForArch(arch, target_os):
if arch in ['ia32', 'simarm', 'simarmv6']:
return 'x86'
if arch in ['x64', 'simarm64', 'simarm_x64', 'x64c', 'simarm64c']:
return 'x64'
if arch == 'arm_x64':
return 'arm'
if arch == 'arm64c':
return 'arm64'
return arch
# The Dart compiler's target.
def DartTargetCpuForArch(arch):
if arch in ['ia32']:
return 'ia32'
if arch in ['x64', 'x64c']:
return 'x64'
if arch in ['arm', 'simarm', 'simarm_x64', 'arm_x64']:
return 'arm'
if arch in ['armv6', 'simarmv6']:
return 'armv6'
if arch in ['arm64', 'simarm64', 'arm64c', 'simarm64c']:
return 'arm64'
return arch
def IsCompressedPointerArch(arch):
return arch in ['x64c', 'arm64c', 'simarm64c']
def HostOsForGn(host_os):
if host_os.startswith('macos'):
return 'mac'
if host_os.startswith('win'):
return 'win'
return host_os
# Where string_map is formatted as X1=Y1,X2=Y2 etc.
# If key is X1, returns Y1.
def ParseStringMap(key, string_map):
for m in string_map.split(','):
l = m.split('=')
if l[0] == key:
return l[1]
return None
def UseSysroot(args, gn_args):
# Don't try to use a Linux sysroot if we aren't on Linux.
if gn_args['target_os'] != 'linux' and HOST_OS != 'linux':
return False
# Don't use the sysroot if we're given another sysroot.
if TargetSysroot(args):
return False
# Our Debian Jesse sysroot doesn't work with GCC 9
if not gn_args['is_clang']:
return False
# Our Debian Jesse sysroot has incorrect annotations on realloc.
if gn_args['is_ubsan']:
return False
# Otherwise use the sysroot.
return True
def ToGnArgs(args, mode, arch, target_os, sanitizer, verify_sdk_hash):
gn_args = {}
host_os = HostOsForGn(HOST_OS)
if target_os == 'host':
gn_args['target_os'] = host_os
else:
gn_args['target_os'] = target_os
gn_args['host_cpu'] = HostCpuForArch(arch)
gn_args['target_cpu'] = TargetCpuForArch(arch, target_os)
gn_args['dart_target_arch'] = DartTargetCpuForArch(arch)
gn_args['dart_use_compressed_pointers'] = IsCompressedPointerArch(arch)
# Configure Crashpad library if it is used.
gn_args['dart_use_crashpad'] = (args.use_crashpad or
DART_USE_CRASHPAD in os.environ)
if gn_args['dart_use_crashpad']:
# Tell Crashpad's BUILD files which checkout layout to use.
gn_args['crashpad_dependencies'] = 'dart'
if DartTargetCpuForArch(arch) != HostCpuForArch(arch):
# Training an app-jit snapshot under a simulator is slow. Use script
# snapshots instead.
gn_args['dart_snapshot_kind'] = 'kernel'
else:
gn_args['dart_snapshot_kind'] = 'app-jit'
# We only want the fallback root certs in the standalone VM on
# Linux and Windows.
if gn_args['target_os'] in ['linux', 'win']:
gn_args['dart_use_fallback_root_certificates'] = True
# Use tcmalloc only when targeting Linux and when not using ASAN.
gn_args['dart_use_tcmalloc'] = ((gn_args['target_os'] == 'linux') and
sanitizer == 'none')
if gn_args['target_os'] == 'linux':
if gn_args['target_cpu'] == 'arm':
# Default to -mfloat-abi=hard and -mfpu=neon for arm on Linux as we're
# specifying a gnueabihf compiler in //build/toolchain/linux/BUILD.gn.
floatabi = 'hard' if args.arm_float_abi == '' else args.arm_float_abi
gn_args['arm_version'] = 7
gn_args['arm_float_abi'] = floatabi
gn_args['arm_use_neon'] = True
elif gn_args['target_cpu'] == 'armv6':
floatabi = 'softfp' if args.arm_float_abi == '' else args.arm_float_abi
gn_args['target_cpu'] = 'arm'
gn_args['arm_version'] = 6
gn_args['arm_float_abi'] = floatabi
gn_args['is_debug'] = mode == 'debug'
gn_args['is_release'] = mode == 'release'
gn_args['is_product'] = mode == 'product'
gn_args['dart_debug'] = mode == 'debug'
# This setting is only meaningful for Flutter. Standalone builds of the VM
# should leave this set to 'develop', which causes the build to defer to
# 'is_debug', 'is_release' and 'is_product'.
if mode == 'product':
gn_args['dart_runtime_mode'] = 'release'
else:
gn_args['dart_runtime_mode'] = 'develop'
gn_args['exclude_kernel_service'] = args.exclude_kernel_service
gn_args['is_clang'] = args.clang
enable_code_coverage = args.code_coverage and gn_args['is_clang']
gn_args['dart_vm_code_coverage'] = enable_code_coverage
gn_args['is_asan'] = sanitizer == 'asan'
gn_args['is_lsan'] = sanitizer == 'lsan'
gn_args['is_msan'] = sanitizer == 'msan'
gn_args['is_tsan'] = sanitizer == 'tsan'
gn_args['is_ubsan'] = sanitizer == 'ubsan'
gn_args['is_qemu'] = args.use_qemu
if not args.platform_sdk:
gn_args['dart_platform_sdk'] = args.platform_sdk
# We don't support stripping on Windows
if host_os != 'win':
gn_args['dart_stripped_binary'] = 'exe.stripped/dart'
gn_args['dart_precompiled_runtime_stripped_binary'] = (
'exe.stripped/dart_precompiled_runtime_product')
gn_args['gen_snapshot_stripped_binary'] = (
'exe.stripped/gen_snapshot_product')
# Setup the user-defined sysroot.
if UseSysroot(args, gn_args):
gn_args['dart_use_debian_sysroot'] = True
else:
sysroot = TargetSysroot(args)
if sysroot:
gn_args['target_sysroot'] = ParseStringMap(arch, sysroot)
toolchain = ToolchainPrefix(args)
if toolchain:
gn_args['toolchain_prefix'] = ParseStringMap(arch, toolchain)
goma_dir = os.environ.get('GOMA_DIR')
# Search for goma in depot_tools in path
goma_depot_tools_dir = None
for path in os.environ.get('PATH', '').split(os.pathsep):
if os.path.basename(path) == 'depot_tools':
cipd_bin = os.path.join(path, '.cipd_bin')
if os.path.isfile(os.path.join(cipd_bin, 'gomacc')):
goma_depot_tools_dir = cipd_bin
break
# Otherwise use goma from home directory.
# TODO(whesse): Remove support for goma installed in home directory.
# Goma will only be distributed through depot_tools.
goma_home_dir = os.path.join(os.getenv('HOME', ''), 'goma')
if args.goma and goma_dir:
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_dir
elif args.goma and goma_depot_tools_dir:
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_depot_tools_dir
elif args.goma and os.path.exists(goma_home_dir):
gn_args['use_goma'] = True
gn_args['goma_dir'] = goma_home_dir
else:
gn_args['use_goma'] = False
gn_args['goma_dir'] = None
if gn_args['target_os'] == 'mac' and gn_args['use_goma']:
gn_args['mac_use_goma_rbe'] = True
# Code coverage requires -O0 to be set.
if enable_code_coverage:
gn_args['dart_debug_optimization_level'] = 0
gn_args['debug_optimization_level'] = 0
elif args.debug_opt_level:
gn_args['dart_debug_optimization_level'] = args.debug_opt_level
gn_args['debug_optimization_level'] = args.debug_opt_level
gn_args['verify_sdk_hash'] = verify_sdk_hash
return gn_args
def ProcessOsOption(os_name):
if os_name == 'host':
return HOST_OS
return os_name
def ProcessOptions(args):
if args.arch == 'all':
args.arch = 'ia32,x64,simarm,simarm64,x64c,simarm64c'
if args.mode == 'all':
args.mode = 'debug,release,product'
if args.os == 'all':
args.os = 'host,android,fuchsia'
if args.sanitizer == 'all':
args.sanitizer = 'none,asan,lsan,msan,tsan,ubsan'
args.mode = args.mode.split(',')
args.arch = args.arch.split(',')
args.os = args.os.split(',')
args.sanitizer = args.sanitizer.split(',')
for mode in args.mode:
if not mode in ['debug', 'release', 'product']:
print("Unknown mode %s" % mode)
return False
for i, arch in enumerate(args.arch):
if not arch in AVAILABLE_ARCHS:
# Normalise to lower case form to make it less case-picky.
arch_lower = arch.lower()
if arch_lower in AVAILABLE_ARCHS:
args.arch[i] = arch_lower
continue
print("Unknown arch %s" % arch)
return False
oses = [ProcessOsOption(os_name) for os_name in args.os]
for os_name in oses:
if not os_name in [
'android', 'freebsd', 'linux', 'macos', 'win32', 'fuchsia'
]:
print("Unknown os %s" % os_name)
return False
if os_name == 'android':
if not HOST_OS in ['linux', 'macos']:
print(
"Cross-compilation to %s is not supported on host os %s." %
(os_name, HOST_OS))
return False
if not arch in [
'ia32', 'x64', 'arm', 'arm_x64', 'armv6', 'arm64', 'x64c',
'arm64c'
]:
print(
"Cross-compilation to %s is not supported for architecture %s."
% (os_name, arch))
return False
elif os_name == 'fuchsia':
if HOST_OS != 'linux':
print(
"Cross-compilation to %s is not supported on host os %s." %
(os_name, HOST_OS))
return False
if not arch in ['x64', 'arm64', 'x64c', 'arm64c']:
print(
"Cross-compilation to %s is not supported for architecture %s."
% (os_name, arch))
return False
elif os_name != HOST_OS:
print("Unsupported target os %s" % os_name)
return False
if HOST_OS != 'win' and args.use_crashpad:
print("Crashpad is only supported on Windows")
return False
return True
def os_has_ide(host_os):
return host_os.startswith('win') or host_os.startswith('mac')
def ide_switch(host_os):
if host_os.startswith('win'):
return '--ide=vs'
elif host_os.startswith('mac'):
return '--ide=xcode'
else:
return '--ide=json'
def AddCommonGnOptionArgs(parser):
"""Adds arguments that will change the default GN arguments."""
parser.add_argument('--goma', help='Use goma', action='store_true')
parser.add_argument('--no-goma',
help='Disable goma',
dest='goma',
action='store_false')
parser.set_defaults(goma=True)
parser.add_argument('--verify-sdk-hash',
help='Enable SDK hash checks (default)',
dest='verify_sdk_hash',
action='store_true')
parser.add_argument('-nvh',
'--no-verify-sdk-hash',
help='Disable SDK hash checks',
dest='verify_sdk_hash',
action='store_false')
parser.set_defaults(verify_sdk_hash=True)
parser.add_argument('--clang', help='Use Clang', action='store_true')
parser.add_argument('--no-clang',
help='Disable Clang',
dest='clang',
action='store_false')
parser.set_defaults(clang=True)
parser.add_argument(
'--platform-sdk',
help='Directs the create_sdk target to create a smaller "Platform" SDK',
default=MakePlatformSDK(),
action='store_true')
parser.add_argument('--use-crashpad',
default=False,
dest='use_crashpad',
action='store_true')
parser.add_argument('--use-qemu',
default=False,
dest='use_qemu',
action='store_true')
parser.add_argument('--exclude-kernel-service',
help='Exclude the kernel service.',
default=False,
dest='exclude_kernel_service',
action='store_true')
parser.add_argument('--arm-float-abi',
type=str,
help='The ARM float ABI (soft, softfp, hard)',
metavar='[soft,softfp,hard]',
default='')
parser.add_argument('--code-coverage',
help='Enable code coverage for the standalone VM',
default=False,
dest="code_coverage",
action='store_true')
parser.add_argument('--debug-opt-level',
'-d',
help='The optimization level to use for debug builds',
type=str)
parser.add_argument('--gn-args',
help='Set extra GN args',
dest='gn_args',
action='append')
parser.add_argument(
'--toolchain-prefix',
'-t',
type=str,
help='Comma-separated list of arch=/path/to/toolchain-prefix mappings')
parser.add_argument('--ide',
help='Generate an IDE file.',
default=os_has_ide(HOST_OS),
action='store_true')
parser.add_argument('--export-compile-commands',
help='Export compile_commands.json database file.',
default=False,
action='store_true')
parser.add_argument(
'--target-sysroot',
'-s',
type=str,
help='Comma-separated list of arch=/path/to/sysroot mappings')
def AddCommonConfigurationArgs(parser):
"""Adds arguments that influence which configuration will be built."""
parser.add_argument("-a",
"--arch",
type=str,
help='Target architectures (comma-separated).',
metavar='[all,' + ','.join(AVAILABLE_ARCHS) + ']',
default=utils.GuessArchitecture())
parser.add_argument('--mode',
'-m',
type=str,
help='Build variants (comma-separated).',
metavar='[all,debug,release,product]',
default='debug')
parser.add_argument('--os',
type=str,
help='Target OSs (comma-separated).',
metavar='[all,host,android,fuchsia]',
default='host')
parser.add_argument('--sanitizer',
type=str,
help='Build variants (comma-separated).',
metavar='[all,none,asan,lsan,msan,tsan,ubsan]',
default='none')
def AddOtherArgs(parser):
"""Adds miscellaneous arguments to the parser."""
parser.add_argument("-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
def parse_args(args):
args = args[1:]
parser = argparse.ArgumentParser(
description='A script to run `gn gen`.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config_group = parser.add_argument_group('Configuration Related Arguments')
AddCommonConfigurationArgs(config_group)
gn_group = parser.add_argument_group('GN Related Arguments')
AddCommonGnOptionArgs(gn_group)
other_group = parser.add_argument_group('Other Arguments')
AddOtherArgs(other_group)
options = parser.parse_args(args)
if not ProcessOptions(options):
parser.print_help()
return None
return options
def BuildGnCommand(args, mode, arch, target_os, sanitizer, out_dir):
gn = os.path.join(DART_ROOT, 'buildtools',
'gn.exe' if utils.IsWindows() else 'gn')
if not os.path.isfile(gn):
raise Exception("Couldn't find the gn binary at path: " + gn)
# TODO(infra): Re-enable --check. Many targets fail to use
# public_deps to re-expose header files to their dependents.
# See dartbug.com/32364
command = [gn, 'gen', out_dir]
gn_args = ToCommandLine(
ToGnArgs(args, mode, arch, target_os, sanitizer, args.verify_sdk_hash))
gn_args += GetGNArgs(args)
if args.ide:
command.append(ide_switch(HOST_OS))
if args.export_compile_commands:
command.append('--export-compile-commands')
command.append('--args=%s' % ' '.join(gn_args))
return command
def RunGnOnConfiguredConfigurations(args):
commands = []
for target_os in args.os:
for mode in args.mode:
for arch in args.arch:
for sanitizer in args.sanitizer:
out_dir = GetOutDir(mode, arch, target_os, sanitizer)
commands.append(
BuildGnCommand(args, mode, arch, target_os, sanitizer,
out_dir))
if args.verbose:
print("gn gen --check in %s" % out_dir)
active_commands = []
def cleanup(command):
print("Command failed: " + ' '.join(command))
for (_, process) in active_commands:
process.terminate()
for command in commands:
try:
process = subprocess.Popen(command, cwd=DART_ROOT)
active_commands.append([command, process])
except Exception as e:
print('Error: %s' % e)
cleanup(command)
return 1
while active_commands:
time.sleep(0.1)
for active_command in active_commands:
(command, process) = active_command
if process.poll() is not None:
active_commands.remove(active_command)
if process.returncode != 0:
cleanup(command)
return 1
return 0
def Main(argv):
starttime = time.time()
args = parse_args(argv)
if args is None:
return 1
result = RunGnOnConfiguredConfigurations(args)
if args.verbose:
endtime = time.time()
print("GN Time: %.3f seconds" % (endtime - starttime))
return result
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
from test_support import verbose, TestFailed, TestSkipped
import nis
print 'nis.maps()'
try:
maps = nis.maps()
except nis.error, msg:
# NIS is probably not active, so this test isn't useful
if verbose:
raise TestFailed, msg
# only do this if running under the regression suite
raise TestSkipped, msg
done = 0
for nismap in maps:
if verbose:
print nismap
mapping = nis.cat(nismap)
for k, v in mapping.items():
if verbose:
print ' ', k, v
if not k:
continue
if nis.match(k, nismap) <> v:
print "NIS match failed for key `%s' in map `%s'" % (k, nismap)
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break
|
import os
from tkinter import *
import db_save
filepath = os.path.dirname(__file__)
icon_eq = os.path.join(filepath, "data\\pics\\pleczak.ico")
tlos = os.path.join(filepath, "data\\pics\\hg.png")
tloe = os.path.join(filepath, "data\\pics\\hp.png")
def informacja(tresc, zrodlo_pliku):
eq = "☆ Otrzymujesz " + tresc + " ☆"
tytul_okna = "Ekwipunek/Statystyki"
root = Tk()
root.iconbitmap(icon_eq)
root.geometry("350x150")
root.title(tytul_okna)
tlo = PhotoImage(file=tloe)
label_image = Label(root, image=tlo)
label_image.place(x=0, y=0, relwidth=1, relheight=1)
text3_lbl = Label(root, text=eq, foreground="#FFFFFF", bg="#000000")
text3_lbl.pack()
przedmiot = PhotoImage(file=zrodlo_pliku)
graph_lbl = Label(root, image=przedmiot, relief="flat", bd=0)
graph_lbl.pack()
root.mainloop()
def zapiski(osoba, tabela):
""" Pobiera informacje o osobie """
tytul_okna = "Zapiski na temat " + osoba
dane = db_save.pobierz_dane(tabela)
root = Tk()
root.iconbitmap(icon_eq)
root.geometry("400x400")
root.title(tytul_okna)
tlo = PhotoImage(file=tlos)
label_image = Label(root, image=tlo)
label_image.place(x=0, y=0, relwidth=1, relheight=1)
label_head = Label(root, width=0, foreground="#000000", bg="#000000")
label_head.pack()
for x in dane:
tex = x[0] + " : " + x[1]
label = Label(root, text=tex, foreground="#FFFFFF", bg="#000000") # set your text
label.pack()
root.mainloop()
|
import random, re, time, uuid
from dtest import Tester, debug
from pytools import since
from pyassertions import assert_invalid
from cassandra import InvalidRequest
from cassandra.query import BatchStatement, SimpleStatement
from cassandra.protocol import ConfigurationException
class TestSecondaryIndexes(Tester):
def bug3367_test(self):
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
columns = {"password": "varchar", "gender": "varchar", "session_token": "varchar", "state": "varchar", "birth_year": "bigint"}
self.create_cf(cursor, 'users', columns=columns)
# insert data
cursor.execute("INSERT INTO users (KEY, password, gender, state, birth_year) VALUES ('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
cursor.execute("INSERT INTO users (KEY, password, gender, state, birth_year) VALUES ('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
# create index
cursor.execute("CREATE INDEX gender_key ON users (gender);")
cursor.execute("CREATE INDEX state_key ON users (state);")
cursor.execute("CREATE INDEX birth_year_key ON users (birth_year);")
# insert data
cursor.execute("INSERT INTO users (KEY, password, gender, state, birth_year) VALUES ('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
cursor.execute("INSERT INTO users (KEY, password, gender, state, birth_year) VALUES ('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
result = cursor.execute("SELECT * FROM users;")
assert len(result) == 4, "Expecting 4 users, got" + str(result)
result = cursor.execute("SELECT * FROM users WHERE state='TX';")
assert len(result) == 2, "Expecting 2 users, got" + str(result)
result = cursor.execute("SELECT * FROM users WHERE state='CA';")
assert len(result) == 1, "Expecting 1 users, got" + str(result)
@since('2.1')
def test_low_cardinality_indexes(self):
"""
Checks that low-cardinality secondary index subqueries are executed
concurrently
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
conn = self.patient_cql_connection(node1, version='3.0.0')
cursor = conn
cursor.max_trace_wait = 120
cursor.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': '1'};")
cursor.execute("CREATE TABLE ks.cf (a text PRIMARY KEY, b text);")
cursor.execute("CREATE INDEX b_index ON ks.cf (b);")
num_rows = 100
for i in range(num_rows):
indexed_value = i % (num_rows / 3)
# use the same indexed value three times
cursor.execute("INSERT INTO ks.cf (a, b) VALUES ('%d', '%d');" % (i, indexed_value))
cluster.flush()
def check_trace_events(trace):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# Submitting range requests on 769 ranges with a concurrency of 769 (0.0070312 rows per range expected)
regex = r"Submitting range requests on [0-9]+ ranges with a concurrency of (\d+) \(([0-9.]+) rows per range expected\)"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
concurrency = int(match.group(1))
expected_per_range = float(match.group(2))
self.assertTrue(concurrency > 1, "Expected more than 1 concurrent range request, got %d" % concurrency)
self.assertTrue(expected_per_range > 0)
break
else:
self.fail("Didn't find matching trace event")
query = SimpleStatement("SELECT * FROM ks.cf WHERE b='1';")
result = cursor.execute(query, trace=True)
self.assertEqual(3, len(result))
check_trace_events(query.trace)
query = SimpleStatement("SELECT * FROM ks.cf WHERE b='1' LIMIT 100;")
result = cursor.execute(query, trace=True)
self.assertEqual(3, len(result))
check_trace_events(query.trace)
query = SimpleStatement("SELECT * FROM ks.cf WHERE b='1' LIMIT 3;")
result = cursor.execute(query, trace=True)
self.assertEqual(3, len(result))
check_trace_events(query.trace)
for limit in (1, 2):
result = cursor.execute("SELECT * FROM ks.cf WHERE b='1' LIMIT %d;" % (limit,))
self.assertEqual(limit, len(result))
@since('2.1')
def test_6924_dropping_ks(self):
"""Tests CASSANDRA-6924
Data inserted immediately after dropping and recreating a
keyspace with an indexed column familiy is not included
in the index.
"""
# Reproducing requires at least 3 nodes:
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
conn = self.patient_cql_connection(node1)
cursor = conn
#This only occurs when dropping and recreating with
#the same name, so loop through this test a few times:
for i in range(10):
debug("round %s" % i)
try:
cursor.execute("DROP KEYSPACE ks")
except ConfigurationException:
pass
self.create_ks(cursor, 'ks', 1)
cursor.execute("CREATE TABLE ks.cf (key text PRIMARY KEY, col1 text);")
cursor.execute("CREATE INDEX on ks.cf (col1);")
for r in range(10):
stmt = "INSERT INTO ks.cf (key, col1) VALUES ('%s','asdf');" % r
cursor.execute(stmt)
self.wait_for_schema_agreement(cursor)
rows = cursor.execute("select count(*) from ks.cf WHERE col1='asdf'")
count = rows[0][0]
self.assertEqual(count, 10)
@since('2.1')
def test_6924_dropping_cf(self):
"""Tests CASSANDRA-6924
Data inserted immediately after dropping and recreating an
indexed column family is not included in the index.
"""
# Reproducing requires at least 3 nodes:
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
conn = self.patient_cql_connection(node1)
cursor = conn
self.create_ks(cursor, 'ks', 1)
#This only occurs when dropping and recreating with
#the same name, so loop through this test a few times:
for i in range(10):
debug("round %s" % i)
try:
cursor.execute("DROP COLUMNFAMILY ks.cf")
except InvalidRequest:
pass
cursor.execute("CREATE TABLE ks.cf (key text PRIMARY KEY, col1 text);")
cursor.execute("CREATE INDEX on ks.cf (col1);")
for r in range(10):
stmt = "INSERT INTO ks.cf (key, col1) VALUES ('%s','asdf');" % r
cursor.execute(stmt)
self.wait_for_schema_agreement(cursor)
rows = cursor.execute("select count(*) from ks.cf WHERE col1='asdf'")
count = rows[0][0]
self.assertEqual(count, 10)
@since('2.0')
def test_8280_validate_indexed_values(self):
"""Tests CASSANDRA-8280
Reject inserts & updates where values of any indexed
column is > 64k
"""
cluster = self.cluster
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
conn = self.patient_cql_connection(node1)
cursor = conn
self.create_ks(cursor, 'ks', 1)
self.insert_row_with_oversize_value("CREATE TABLE %s(a int, b int, c text, PRIMARY KEY (a))",
"CREATE INDEX ON %s(c)",
"INSERT INTO %s (a, b, c) VALUES (0, 0, ?)",
cursor)
self.insert_row_with_oversize_value("CREATE TABLE %s(a int, b text, c int, PRIMARY KEY (a, b))",
"CREATE INDEX ON %s(b)",
"INSERT INTO %s (a, b, c) VALUES (0, ?, 0)",
cursor)
self.insert_row_with_oversize_value("CREATE TABLE %s(a text, b int, c int, PRIMARY KEY ((a, b)))",
"CREATE INDEX ON %s(a)",
"INSERT INTO %s (a, b, c) VALUES (?, 0, 0)",
cursor)
self.insert_row_with_oversize_value("CREATE TABLE %s(a int, b text, PRIMARY KEY (a)) WITH COMPACT STORAGE",
"CREATE INDEX ON %s(b)",
"INSERT INTO %s (a, b) VALUES (0, ?)",
cursor)
def insert_row_with_oversize_value(self, create_table_cql, create_index_cql, insert_cql, cursor):
""" Validate two variations of the supplied insert statement, first
as it is and then again transformed into a conditional statement
"""
table_name = "table_" + str(int(round(time.time() * 1000)))
cursor.execute(create_table_cql % table_name)
cursor.execute(create_index_cql % table_name)
value = "X" * 65536
self._assert_invalid_request(cursor, insert_cql % table_name, value)
self._assert_invalid_request(cursor, (insert_cql % table_name) + ' IF NOT EXISTS', value)
def _assert_invalid_request(self, cursor, insert_cql, value):
""" Perform two executions of the supplied statement, as a
single statement and again as part of a batch
"""
prepared = cursor.prepare(insert_cql)
self._execute_and_fail(lambda: cursor.execute(prepared, [value]), insert_cql)
batch = BatchStatement()
batch.add(prepared, [value])
self._execute_and_fail(lambda: cursor.execute(batch), insert_cql)
def _execute_and_fail(self, operation, cql_string):
try:
operation()
assert False, "Expecting query %s to be invalid" % cql_string
except AssertionError as e:
raise e
except InvalidRequest:
pass
def wait_for_schema_agreement(self, cursor):
rows = cursor.execute("SELECT schema_version FROM system.local")
local_version = rows[0]
all_match = True
rows = cursor.execute("SELECT schema_version FROM system.peers")
for peer_version in rows:
if peer_version != local_version:
all_match = False
break
if all_match:
return
else:
time.sleep(0.10)
self.wait_for_schema_agreement(cursor)
class TestSecondaryIndexesOnCollections(Tester):
def __init__(self, *args, **kwargs):
Tester.__init__(self, *args, **kwargs)
@since('2.1')
def test_list_indexes(self):
"""
Checks that secondary indexes on lists work for querying.
"""
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'list_index_search', 1)
stmt = ("CREATE TABLE list_index_search.users ("
"user_id uuid PRIMARY KEY,"
"email text,"
"uuids list<uuid>"
");")
cursor.execute(stmt)
# no index present yet, make sure there's an error trying to query column
stmt = ("SELECT * from list_index_search.users where uuids contains {some_uuid}"
).format(some_uuid=uuid.uuid4())
assert_invalid(cursor, stmt, 'No secondary indexes on the restricted columns support the provided operators')
# add index and query again (even though there are no rows in the table yet)
stmt = "CREATE INDEX user_uuids on list_index_search.users (uuids);"
cursor.execute(stmt)
stmt = ("SELECT * from list_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
row = cursor.execute(stmt)
self.assertEqual(0, len(row))
# add a row which doesn't specify data for the indexed column, and query again
user1_uuid = uuid.uuid4()
stmt = ("INSERT INTO list_index_search.users (user_id, email)"
"values ({user_id}, 'test@example.com')"
).format(user_id=user1_uuid)
cursor.execute(stmt)
stmt = ("SELECT * from list_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
row = cursor.execute(stmt)
self.assertEqual(0, len(row))
_id = uuid.uuid4()
# alter the row to add a single item to the indexed list
stmt = ("UPDATE list_index_search.users set uuids = [{id}] where user_id = {user_id}"
).format(id=_id, user_id=user1_uuid)
cursor.execute(stmt)
stmt = ("SELECT * from list_index_search.users where uuids contains {some_uuid}").format(some_uuid=_id)
row = cursor.execute(stmt)
self.assertEqual(1, len(row))
# add a bunch of user records and query them back
shared_uuid = uuid.uuid4() # this uuid will be on all records
log = []
for i in range(50000):
user_uuid = uuid.uuid4()
unshared_uuid = uuid.uuid4()
# give each record a unique email address using the int index
stmt = ("INSERT INTO list_index_search.users (user_id, email, uuids)"
"values ({user_uuid}, '{prefix}@example.com', [{s_uuid}, {u_uuid}])"
).format(user_uuid=user_uuid, prefix=i, s_uuid=shared_uuid, u_uuid=unshared_uuid)
cursor.execute(stmt)
log.append(
{'user_id': user_uuid,
'email':str(i)+'@example.com',
'unshared_uuid':unshared_uuid}
)
# confirm there is now 50k rows with the 'shared' uuid above in the secondary index
stmt = ("SELECT * from list_index_search.users where uuids contains {shared_uuid}").format(shared_uuid=shared_uuid)
rows = cursor.execute(stmt)
result = [row for row in rows]
self.assertEqual(50000, len(result))
# shuffle the log in-place, and double-check a slice of records by querying the secondary index
random.shuffle(log)
for log_entry in log[:1000]:
stmt = ("SELECT user_id, email, uuids FROM list_index_search.users where uuids contains {unshared_uuid}"
).format(unshared_uuid=log_entry['unshared_uuid'])
rows = cursor.execute(stmt)
self.assertEqual(1, len(rows))
db_user_id, db_email, db_uuids = rows[0]
self.assertEqual(db_user_id, log_entry['user_id'])
self.assertEqual(db_email, log_entry['email'])
self.assertEqual(str(db_uuids[0]), str(shared_uuid))
self.assertEqual(str(db_uuids[1]), str(log_entry['unshared_uuid']))
@since('2.1')
def test_set_indexes(self):
"""
Checks that secondary indexes on sets work for querying.
"""
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'set_index_search', 1)
stmt = ("CREATE TABLE set_index_search.users ("
"user_id uuid PRIMARY KEY,"
"email text,"
"uuids set<uuid>);")
cursor.execute(stmt)
# no index present yet, make sure there's an error trying to query column
stmt = ("SELECT * from set_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
assert_invalid(cursor, stmt, 'No secondary indexes on the restricted columns support the provided operators')
# add index and query again (even though there are no rows in the table yet)
stmt = "CREATE INDEX user_uuids on set_index_search.users (uuids);"
cursor.execute(stmt)
stmt = ("SELECT * from set_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
row = cursor.execute(stmt)
self.assertEqual(0, len(row))
# add a row which doesn't specify data for the indexed column, and query again
user1_uuid = uuid.uuid4()
stmt = ("INSERT INTO set_index_search.users (user_id, email) values ({user_id}, 'test@example.com')"
).format(user_id=user1_uuid)
cursor.execute(stmt)
stmt = ("SELECT * from set_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
row = cursor.execute(stmt)
self.assertEqual(0, len(row))
_id = uuid.uuid4()
# alter the row to add a single item to the indexed set
stmt = ("UPDATE set_index_search.users set uuids = {{{id}}} where user_id = {user_id}").format(id=_id, user_id=user1_uuid)
cursor.execute(stmt)
stmt = ("SELECT * from set_index_search.users where uuids contains {some_uuid}").format(some_uuid=_id)
row = cursor.execute(stmt)
self.assertEqual(1, len(row))
# add a bunch of user records and query them back
shared_uuid = uuid.uuid4() # this uuid will be on all records
log = []
for i in range(50000):
user_uuid = uuid.uuid4()
unshared_uuid = uuid.uuid4()
# give each record a unique email address using the int index
stmt = ("INSERT INTO set_index_search.users (user_id, email, uuids)"
"values ({user_uuid}, '{prefix}@example.com', {{{s_uuid}, {u_uuid}}})"
).format(user_uuid=user_uuid, prefix=i, s_uuid=shared_uuid, u_uuid=unshared_uuid)
cursor.execute(stmt)
log.append(
{'user_id': user_uuid,
'email':str(i)+'@example.com',
'unshared_uuid':unshared_uuid}
)
# confirm there is now 50k rows with the 'shared' uuid above in the secondary index
stmt = ("SELECT * from set_index_search.users where uuids contains {shared_uuid}").format(shared_uuid=shared_uuid)
rows = cursor.execute(stmt)
result = [row for row in rows]
self.assertEqual(50000, len(result))
# shuffle the log in-place, and double-check a slice of records by querying the secondary index
random.shuffle(log)
for log_entry in log[:1000]:
stmt = ("SELECT user_id, email, uuids FROM set_index_search.users where uuids contains {unshared_uuid}"
).format(unshared_uuid=log_entry['unshared_uuid'])
rows = cursor.execute(stmt)
self.assertEqual(1, len(rows))
db_user_id, db_email, db_uuids = rows[0]
self.assertEqual(db_user_id, log_entry['user_id'])
self.assertEqual(db_email, log_entry['email'])
self.assertTrue(shared_uuid in db_uuids)
self.assertTrue(log_entry['unshared_uuid'] in db_uuids)
@since('2.1')
def test_map_indexes(self):
"""
Checks that secondary indexes on maps work for querying on both keys and values
"""
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'map_index_search', 1)
stmt = ("CREATE TABLE map_index_search.users ("
"user_id uuid PRIMARY KEY,"
"email text,"
"uuids map<uuid, uuid>);")
cursor.execute(stmt)
# no index present yet, make sure there's an error trying to query column
stmt = ("SELECT * from map_index_search.users where uuids contains {some_uuid}").format(some_uuid=uuid.uuid4())
assert_invalid(cursor, stmt, 'No secondary indexes on the restricted columns support the provided operators')
stmt = ("SELECT * from map_index_search.users where uuids contains key {some_uuid}"
).format(some_uuid=uuid.uuid4())
assert_invalid(cursor, stmt, 'No secondary indexes on the restricted columns support the provided operators')
# add index on keys and query again (even though there are no rows in the table yet)
stmt = "CREATE INDEX user_uuids on map_index_search.users (KEYS(uuids));"
cursor.execute(stmt)
stmt = "SELECT * from map_index_search.users where uuids contains key {some_uuid}".format(some_uuid=uuid.uuid4())
rows = cursor.execute(stmt)
self.assertEqual(0, len(rows))
# add a row which doesn't specify data for the indexed column, and query again
user1_uuid = uuid.uuid4()
stmt = ("INSERT INTO map_index_search.users (user_id, email)"
"values ({user_id}, 'test@example.com')"
).format(user_id=user1_uuid)
cursor.execute(stmt)
stmt = ("SELECT * from map_index_search.users where uuids contains key {some_uuid}").format(some_uuid=uuid.uuid4())
rows = cursor.execute(stmt)
self.assertEqual(0, len(rows))
_id = uuid.uuid4()
# alter the row to add a single item to the indexed map
stmt = ("UPDATE map_index_search.users set uuids = {{{id}:{user_id}}} where user_id = {user_id}"
).format(id=_id, user_id=user1_uuid)
cursor.execute(stmt)
stmt = ("SELECT * from map_index_search.users where uuids contains key {some_uuid}").format(some_uuid=_id)
rows = cursor.execute(stmt)
self.assertEqual(1, len(rows))
# add a bunch of user records and query them back
shared_uuid = uuid.uuid4() # this uuid will be on all records
log = []
for i in range(50000):
user_uuid = uuid.uuid4()
unshared_uuid1 = uuid.uuid4()
unshared_uuid2 = uuid.uuid4()
# give each record a unique email address using the int index, add unique ids for keys and values
stmt = ("INSERT INTO map_index_search.users (user_id, email, uuids)"
"values ({user_uuid}, '{prefix}@example.com', {{{u_uuid1}:{u_uuid2}, {s_uuid}:{s_uuid}}})"
).format(user_uuid=user_uuid, prefix=i, s_uuid=shared_uuid, u_uuid1=unshared_uuid1, u_uuid2=unshared_uuid2)
cursor.execute(stmt)
log.append(
{'user_id': user_uuid,
'email':str(i)+'@example.com',
'unshared_uuid1':unshared_uuid1,
'unshared_uuid2':unshared_uuid2}
)
# confirm there is now 50k rows with the 'shared' uuid above in the secondary index
stmt = ("SELECT * from map_index_search.users where uuids contains key {shared_uuid}"
).format(shared_uuid=shared_uuid)
rows = cursor.execute(stmt)
result = [row for row in rows]
self.assertEqual(50000, len(result))
# shuffle the log in-place, and double-check a slice of records by querying the secondary index on keys
random.shuffle(log)
for log_entry in log[:1000]:
stmt = ("SELECT user_id, email, uuids FROM map_index_search.users where uuids contains key {unshared_uuid1}"
).format(unshared_uuid1=log_entry['unshared_uuid1'])
row = cursor.execute(stmt)
rows = self.assertEqual(1, len(row))
db_user_id, db_email, db_uuids = row[0]
self.assertEqual(db_user_id, log_entry['user_id'])
self.assertEqual(db_email, log_entry['email'])
self.assertTrue(shared_uuid in db_uuids)
self.assertTrue(log_entry['unshared_uuid1'] in db_uuids)
# attempt to add an index on map values as well (should fail)
stmt = "CREATE INDEX user_uuids on map_index_search.users (uuids);"
matching = "Cannot create index on uuids values, an index on uuids keys already exists and indexing a map on both keys and values at the same time is not currently supported"
assert_invalid(cursor, stmt, matching)
# since cannot have index on map keys and values remove current index on keys
stmt = "DROP INDEX user_uuids;"
cursor.execute(stmt)
# add index on values (will index rows added prior)
stmt = "CREATE INDEX user_uids on map_index_search.users (uuids);"
cursor.execute(stmt)
# shuffle the log in-place, and double-check a slice of records by querying the secondary index
random.shuffle(log)
time.sleep(10)
# since we already inserted unique ids for values as well, check that appropriate recors are found
for log_entry in log[:1000]:
stmt = ("SELECT user_id, email, uuids FROM map_index_search.users where uuids contains {unshared_uuid2}"
).format(unshared_uuid2=log_entry['unshared_uuid2'])
rows = cursor.execute(stmt)
self.assertEqual(1, len(rows))
db_user_id, db_email, db_uuids = rows[0]
self.assertEqual(db_user_id, log_entry['user_id'])
self.assertEqual(db_email, log_entry['email'])
self.assertTrue(shared_uuid in db_uuids)
self.assertTrue(log_entry['unshared_uuid2'] in db_uuids.values())
|
''' Compute on ANVIL GTEX files'''
# IMPORTS
import sys
import json
from fasp.runner import FASPRunner
# The implementations we're using
from fasp.loc import Gen3DRSClient
from fasp.workflow import GCPLSsamtools
from fasp.loc import anvilDRSClient
class localSearchClient:
def __init__(self):
# edit the following for your local copy of the manifest file
with open('/mnt/shared/gcp-user/session_data/manifest.json') as f:
self.data = json.load(f)
def runQuery(self, query):
# return the first three records
# edit this once your ready to run this on all the files
results = []
for f in self.data[:1]:
results.append([f['file_name'],f['object_id']])
return results
def main(argv):
# edit the following line for where you put your credentials file from anvil
credentials_file = '/mnt/shared/gcp-user/session_data/credentials.json'
faspRunner = FASPRunner(pauseSecs=0)
settings = faspRunner.settings
# Step 1 - Discovery
# query for relevant DRS objects
searchClient = localSearchClient()
#drsClient = DRSMetaResolver()
drsClient = anvilDRSClient(credentials_file, settings['GCPProject'], 'gs')
location = 'projects/{}/locations/{}'.format(settings['GCPProject'], settings['GCPPipelineRegion'])
print(location)
print(settings['GCPOutputBucket'])
workflowClient = GCPLSsamtools(location, settings['GCPOutputBucket'], debug=True)
faspRunner.configure(searchClient, drsClient, workflowClient)
faspRunner.runQuery('', 'Anvil GTEX Test')
if __name__ == "__main__":
main(sys.argv[1:])
|
from apscheduler.schedulers.blocking import BlockingScheduler
from internals.sensors import get_sensors_data
from internals.constants import plants_csv, moisture_alarm, template_email, output_email, \
from_email, to_email, interval_minutes
from internals.utils import get_dry_plants, insert_text_into_mail_body, generate_random_values, \
get_plants_name_from_csv, get_values_percentage
from internals.mailing import send_email
from datetime import datetime
import os
my_plants = get_plants_name_from_csv(plants_csv)
def plants_report():
sensor_data = get_sensors_data()
values = get_values_percentage(sensor_data)
# values = generate_random_values(sensors_number=5)
date_format = '%Y-%M-%d, %H:%M'
date = datetime.now().strftime(date_format)
print(f'{date}: Checking moisture values')
dry_plants = get_dry_plants(my_plants, values, moisture_alarm)
if len(dry_plants):
print(f'Found dry plants:')
for p in dry_plants:
print(p)
insert_text_into_mail_body(mail_template=template_email, output_file=output_email,
plants_complete_list=dry_plants)
response_code = send_email(email_file=output_email, from_email=from_email, to_email=to_email)
if response_code == 202:
os.remove(output_email)
else:
print('No dry plants, all is good!')
print('--------------------------')
# run for the fist time
plants_report()
# set scheduler and repeat it
scheduler = BlockingScheduler()
scheduler.add_job(plants_report, 'interval', minutes=interval_minutes)
scheduler.start()
|
# -*- coding: utf-8 -*-
"""
flask_login.utils
-----------------
General utilities.
"""
import hmac
from hashlib import sha512
from functools import wraps
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from werkzeug.urls import url_decode, url_encode
from flask import (
_request_ctx_stack,
current_app,
request,
session,
url_for,
has_request_context,
)
from ._compat import text_type, urlparse, urlunparse
from .config import COOKIE_NAME, EXEMPT_METHODS
from .signals import user_logged_in, user_logged_out, user_login_confirmed
#: A proxy for the current user. If no user is logged in, this will be an
#: anonymous user
current_user = LocalProxy(lambda: _get_user())
def encode_cookie(payload):
"""
This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
"""
return u"{0}|{1}".format(payload, _cookie_digest(payload))
def decode_cookie(cookie):
"""
This decodes a cookie given by `encode_cookie`. If verification of the
cookie fails, ``None`` will be implicitly returned.
:param cookie: An encoded cookie.
:type cookie: str
"""
try:
payload, digest = cookie.rsplit(u"|", 1)
if hasattr(digest, "decode"):
digest = digest.decode("ascii") # pragma: no cover
except ValueError:
return
if safe_str_cmp(_cookie_digest(payload), digest):
return payload
def make_next_param(login_url, current_url):
"""
Reduces the scheme and host from a given URL so it can be passed to
the given `login` URL more efficiently.
:param login_url: The login URL being redirected to.
:type login_url: str
:param current_url: The URL to reduce.
:type current_url: str
"""
l = urlparse(login_url)
c = urlparse(current_url)
if (not l.scheme or l.scheme == c.scheme) and (
not l.netloc or l.netloc == c.netloc
):
return urlunparse(("", "", c.path, c.params, c.query, ""))
return current_url
def expand_login_view(login_view):
"""
Returns the url for the login view, expanding the view name to a url if
needed.
:param login_view: The name of the login view or a URL for the login view.
:type login_view: str
"""
if login_view.startswith(("https://", "http://", "/")):
return login_view
else:
return url_for(login_view)
def login_url(login_view, next_url=None, next_field="next"):
"""
Creates a URL for redirecting to a login page. If only `login_view` is
provided, this will just return the URL for it. If `next_url` is provided,
however, this will append a ``next=URL`` parameter to the query string
so that the login view can redirect back to that URL. Flask-Login's default
unauthorized handler uses this function when redirecting to your login url.
To force the host name used, set `FORCE_HOST_FOR_REDIRECTS` to a host. This
prevents from redirecting to external sites if request headers Host or
X-Forwarded-For are present.
:param login_view: The name of the login view. (Alternately, the actual
URL to the login view.)
:type login_view: str
:param next_url: The URL to give the login view for redirection.
:type next_url: str
:param next_field: What field to store the next URL in. (It defaults to
``next``.)
:type next_field: str
"""
base = expand_login_view(login_view)
if next_url is None:
return base
parsed_result = urlparse(base)
md = url_decode(parsed_result.query)
md[next_field] = make_next_param(base, next_url)
netloc = current_app.config.get("FORCE_HOST_FOR_REDIRECTS") or parsed_result.netloc
parsed_result = parsed_result._replace(
netloc=netloc, query=url_encode(md, sort=True)
)
return urlunparse(parsed_result)
def login_fresh():
"""
This returns ``True`` if the current login is fresh.
"""
return session.get("_fresh", False)
def login_user(user, remember=False, duration=None, force=False, fresh=True):
"""
Logs a user in. You should pass the actual user object to this. If the
user's `is_active` property is ``False``, they will not be logged in
unless `force` is ``True``.
This will return ``True`` if the log in attempt succeeds, and ``False`` if
it fails (i.e. because the user is inactive).
:param user: The user object to log in.
:type user: object
:param remember: Whether to remember the user after their session expires.
Defaults to ``False``.
:type remember: bool
:param duration: The amount of time before the remember cookie expires. If
``None`` the value set in the settings is used. Defaults to ``None``.
:type duration: :class:`datetime.timedelta`
:param force: If the user is inactive, setting this to ``True`` will log
them in regardless. Defaults to ``False``.
:type force: bool
:param fresh: setting this to ``False`` will log in the user with a session
marked as not "fresh". Defaults to ``True``.
:type fresh: bool
"""
if not force and not user.is_active:
return False
user_id = getattr(user, current_app.login_manager.id_attribute)()
session["user_id"] = user_id
session["_fresh"] = fresh
session["_id"] = current_app.login_manager._session_identifier_generator()
if remember:
session["remember"] = "set"
if duration is not None:
try:
# equal to timedelta.total_seconds() but works with Python 2.6
session["remember_seconds"] = (
duration.microseconds
+ (duration.seconds + duration.days * 24 * 3600) * 10 ** 6
) / 10.0 ** 6
except AttributeError:
raise Exception(
"duration must be a datetime.timedelta, "
"instead got: {0}".format(duration)
)
_request_ctx_stack.top.user = user
user_logged_in.send(current_app._get_current_object(), user=_get_user())
return True
def logout_user():
"""
Logs a user out. (You do not need to pass the actual user.) This will
also clean up the remember me cookie if it exists.
"""
user = _get_user()
if "user_id" in session:
session.pop("user_id")
if "_fresh" in session:
session.pop("_fresh")
cookie_name = current_app.config.get("REMEMBER_COOKIE_NAME", COOKIE_NAME)
if cookie_name in request.cookies:
session["remember"] = "clear"
if "remember_seconds" in session:
session.pop("remember_seconds")
user_logged_out.send(current_app._get_current_object(), user=user)
current_app.login_manager.reload_user()
return True
def confirm_login():
"""
This sets the current session as fresh. Sessions become stale when they
are reloaded from a cookie.
"""
session["_fresh"] = True
session["_id"] = current_app.login_manager._session_identifier_generator()
user_login_confirmed.send(current_app._get_current_object())
def login_required(func):
"""
If you decorate a view with this, it will ensure that the current user is
logged in and authenticated before calling the actual view. (If they are
not, it calls the :attr:`LoginManager.unauthorized` callback.) For
example::
@app.route('/post')
@login_required
def post():
pass
If there are only certain times you need to require that your user is
logged in, you can do so with::
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
...which is essentially the code that this function adds to your views.
It can be convenient to globally turn off authentication when unit testing.
To enable this, if the application configuration variable `LOGIN_DISABLED`
is set to `True`, this decorator will be ignored.
.. Note ::
Per `W3 guidelines for CORS preflight requests
<http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_,
HTTP ``OPTIONS`` requests are exempt from login checks.
:param func: The view function to decorate.
:type func: function
"""
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in EXEMPT_METHODS:
return func(*args, **kwargs)
elif current_app.login_manager._login_disabled:
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view
def fresh_login_required(func):
"""
If you decorate a view with this, it will ensure that the current user's
login is fresh - i.e. their session was not restored from a 'remember me'
cookie. Sensitive operations, like changing a password or e-mail, should
be protected with this, to impede the efforts of cookie thieves.
If the user is not authenticated, :meth:`LoginManager.unauthorized` is
called as normal. If they are authenticated, but their session is not
fresh, it will call :meth:`LoginManager.needs_refresh` instead. (In that
case, you will need to provide a :attr:`LoginManager.refresh_view`.)
Behaves identically to the :func:`login_required` decorator with respect
to configutation variables.
.. Note ::
Per `W3 guidelines for CORS preflight requests
<http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0>`_,
HTTP ``OPTIONS`` requests are exempt from login checks.
:param func: The view function to decorate.
:type func: function
"""
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method in EXEMPT_METHODS:
return func(*args, **kwargs)
elif current_app.login_manager._login_disabled:
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
elif not login_fresh():
return current_app.login_manager.needs_refresh()
return func(*args, **kwargs)
return decorated_view
def set_login_view(login_view, blueprint=None):
"""
Sets the login view for the app or blueprint. If a blueprint is passed,
the login view is set for this blueprint on ``blueprint_login_views``.
:param login_view: The user object to log in.
:type login_view: str
:param blueprint: The blueprint which this login view should be set on.
Defaults to ``None``.
:type blueprint: object
"""
num_login_views = len(current_app.login_manager.blueprint_login_views)
if blueprint is not None or num_login_views != 0:
(current_app.login_manager.blueprint_login_views[blueprint.name]) = login_view
if (
current_app.login_manager.login_view is not None
and None not in current_app.login_manager.blueprint_login_views
):
(
current_app.login_manager.blueprint_login_views[None]
) = current_app.login_manager.login_view
current_app.login_manager.login_view = None
else:
current_app.login_manager.login_view = login_view
def _get_user():
if has_request_context() and not hasattr(_request_ctx_stack.top, "user"):
current_app.login_manager._load_user()
return getattr(_request_ctx_stack.top, "user", None)
def _cookie_digest(payload, key=None):
key = _secret_key(key)
return hmac.new(key, payload.encode("utf-8"), sha512).hexdigest()
def _get_remote_addr():
address = request.headers.get("X-Forwarded-For", request.remote_addr)
if address is not None:
# An 'X-Forwarded-For' header includes a comma separated list of the
# addresses, the first address being the actual remote address.
address = address.encode("utf-8").split(b",")[0].strip()
return address
def _create_identifier():
user_agent = request.headers.get("User-Agent")
if user_agent is not None:
user_agent = user_agent.encode("utf-8")
base = "{0}|{1}".format(_get_remote_addr(), user_agent)
if str is bytes:
base = text_type(base, "utf-8", errors="replace") # pragma: no cover
h = sha512()
h.update(base.encode("utf8"))
return h.hexdigest()
def _user_context_processor():
return dict(current_user=_get_user())
def _secret_key(key=None):
if key is None:
key = current_app.config["SECRET_KEY"]
if isinstance(key, text_type): # pragma: no cover
key = key.encode("latin1") # ensure bytes
return key
|
import pybullet as p
import pybullet_data
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
cube = p.loadURDF("cube.urdf")
frequency = 240
timeStep = 1. / frequency
p.setGravity(0, 0, -9.8)
p.changeDynamics(cube, -1, linearDamping=0, angularDamping=0)
p.setPhysicsEngineParameter(fixedTimeStep=timeStep)
for i in range(frequency):
p.stepSimulation()
pos, orn = p.getBasePositionAndOrientation(cube)
print(pos)
|
#https://channels.readthedocs.io/en/latest/installation.html
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import website.routing
application = ProtocolTypeRouter({
# (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
website.routing.websocket_urlpatterns
)
),
})
|
# -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
# /////////////// Corruption Helpers ///////////////
import skimage as sk
from torchvision import transforms
import torchvision.transforms.functional as F
from skimage.filters import gaussian
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
import os
from pkg_resources import resource_filename
warnings.simplefilter("ignore", UserWarning)
def disk(radius, alias_blur=0.1, dtype=np.float32):
# 17 x 17 kernel causes seg fault in opencv
# if radius <= 8:
# L = np.arange(-8, 8 + 1)
# ksize = (3, 3)
if radius <= 5:
L = np.arange(-5, 5 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=512, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / float(zoom_factor)))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def gaussian_noise(x, severity=1):
# c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
c = [0.04, 0.06, .08, .09, .10][severity - 1]
x = np.array(x) / 255.
return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, severity=1):
# c = [60, 25, 12, 5, 3][severity - 1]
c = [500, 250, 100, 75, 50][severity - 1]
x = np.array(x) / 255.
return np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
def impulse_noise(x, severity=1):
# c = [.03, .06, .09, 0.17, 0.27][severity - 1]
c = [.01, .02, .03, .05, .07][severity - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, severity=1):
# c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]
c = [.06, .1, .12, .16, .2][severity - 1]
x = np.array(x) / 255.
return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def gaussian_blur(x, severity=1):
# c = [1, 2, 3, 4, 6][severity - 1]
c = [.4, .6, 0.7, .8, 1][severity - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, severity=1):
# sigma, max_delta, iterations
# c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1]
c = [(0.05,1,1), (0.25,1,1), (0.4,1,1), (0.25,1,2), (0.4,1,2)][severity - 1]
x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
size = x.shape[0]
# locally shuffle pixels
for i in range(c[2]):
for h in range(size - c[1], c[1], -1):
for w in range(size - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255
def defocus_blur(x, severity=1):
# c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d].astype(np.float32), -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3
return np.clip(channels, 0, 1) * 255
def motion_blur(x, severity=1):
# c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]
c = [(6,1), (6,1.5), (6,2), (8,2), (9,2.5)][severity - 1]
output = BytesIO()
Image.fromarray(x).save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))
x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
if x.shape != (512, 512):
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, severity=1):
# c = [np.arange(1, 1.11, 0.01),
# np.arange(1, 1.16, 0.01),
# np.arange(1, 1.21, 0.02),
# np.arange(1, 1.26, 0.02),
# np.arange(1, 1.31, 0.03)][severity - 1]
c = [np.arange(1, 1.06, 0.01), np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.01), np.arange(1, 1.26, 0.01)][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, severity=1):
# c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
c = [(.2,3), (.5,3), (0.75,2.5), (1,2), (1.5,1.75)][severity - 1]
size = x.shape[0]
x = np.array(x) / 255.
max_val = x.max()
x += c[0] * plasma_fractal(wibbledecay=c[1])[:size, :size][..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) # *255
def frost(x, severity=1):
size = x.shape[0]
# c = [(1, 0.4),
# (0.8, 0.6),
# (0.7, 0.7),
# (0.65, 0.7),
# (0.6, 0.75)][severity - 1]
c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][severity - 1]
idx = np.random.randint(5)
filename = [resource_filename(__name__, 'frost/frost1.png'),
resource_filename(__name__, 'frost/frost2.png'),
resource_filename(__name__, 'frost/frost3.png'),
resource_filename(__name__, 'frost/frost4.jpg'),
resource_filename(__name__, 'frost/frost5.jpg'),
resource_filename(__name__, 'frost/frost6.jpg')][idx]
frost = cv2.imread(filename)
# randomly crop and convert to rgb
x_start, y_start = np.random.randint(0, frost.shape[0] - size), np.random.randint(0, frost.shape[1] - size)
frost = frost[x_start:x_start + size, y_start:y_start + size][..., [2, 1, 0]]
return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255) / 255
def snow(x, severity=1):
# c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
# (0.2, 0.3, 2, 0.5, 12, 4, 0.7),
# (0.55, 0.3, 4, 0.9, 12, 8, 0.7),
# (0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
# (0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1]
c = [(0.1,0.2,1,0.6,8,3,0.95),
(0.1,0.2,1,0.5,10,4,0.9),
(0.15,0.3,1.75,0.55,10,4,0.9),
(0.25,0.3,2.25,0.6,12,6,0.85),
(0.3,0.3,1.25,0.65,14,12,0.8)][severity - 1]
size = x.shape[0]
x = np.array(x, dtype=np.float32) / 255.
snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, code = cv2.COLOR_RGB2GRAY).reshape(size, size, 1) * 1.5 + 0.5)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, severity=1):
# c = [(0.65, 0.3, 4, 0.69, 0.6, 0),
# (0.65, 0.3, 3, 0.68, 0.6, 0),
# (0.65, 0.3, 2, 0.68, 0.5, 0),
# (0.65, 0.3, 1, 0.65, 1.5, 1),
# (0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]
c = [(0.62,0.1,0.7,0.7,0.5,0),
(0.65,0.1,0.8,0.7,0.5,0),
(0.65,0.3,1,0.69,0.5,0),
(0.65,0.1,0.7,0.69,0.6,1),
(0.65,0.1,0.5,0.68,0.6,1)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1])), axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# mud brown
color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),
42 / 255. * np.ones_like(x[..., :1]),
20 / 255. * np.ones_like(x[..., :1])), axis=2)
color *= m[..., np.newaxis]
x *= (1 - m[..., np.newaxis])
return np.clip(x + color, 0, 1) * 255
def contrast(x, severity=1):
# c = [0.4, .3, .2, .1, .05][severity - 1]
c = [.75, .5, .4, .3, 0.15][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def generate_random_lines(imshape,slant,drop_length,rain_type):
drops=[]
area=imshape[0]*imshape[1]
no_of_drops=area//600
if rain_type.lower()=='drizzle':
no_of_drops=area//770
drop_length=10
elif rain_type.lower()=='heavy':
drop_length=30
elif rain_type.lower()=='torrential':
no_of_drops=area//500
drop_length=60
for i in range(no_of_drops): ## If You want heavy rain, try increasing this
if slant<0:
x= np.random.randint(slant,imshape[1])
else:
x= np.random.randint(0,imshape[1]-slant)
y= np.random.randint(0,imshape[0]-drop_length)
drops.append((x,y))
return drops,drop_length
def rain_process(image,slant,drop_length,drop_color,drop_width,rain_drops):
imshape = image.shape
image_t = image.copy()
for rain_drop in rain_drops:
cv2.line(image_t,(rain_drop[0],rain_drop[1]),(rain_drop[0]+slant,rain_drop[1]+drop_length),drop_color,drop_width)
image= cv2.blur(image_t,(7,7)) ## rainy view are blurry
brightness_coefficient = 0.7 ## rainy days are usually shady
image_HLS = hls(image) ## Conversion to HLS
image_HLS[:,:,1] = image_HLS[:,:,1]*brightness_coefficient ## scale pixel values down for channel 1(Lightness)
image_RGB= rgb(image_HLS,'hls') ## Conversion to RGB
return image_RGB
def hls(image,src='RGB'):
image_HLS = eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2HLS)')
return image_HLS
def rgb(image, src='BGR'):
image_RGB= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)')
return image_RGB
def rain(image, slant=-1,drop_length=20,drop_width=1,drop_color=(200,200,200),rain_type='torrential'): ## (200,200,200) a shade of gray
# verify_image(image)
slant_extreme=slant
# if not(is_numeric(slant_extreme) and (slant_extreme>=-20 and slant_extreme<=20)or slant_extreme==-1):
# raise Exception(err_rain_slant)
# if not(is_numeric(drop_width) and drop_width>=1 and drop_width<=5):
# raise Exception(err_rain_width)
# if not(is_numeric(drop_length) and drop_length>=0 and drop_length<=100):
# raise Exception(err_rain_length)
imshape = image.shape
if slant_extreme==-1:
slant= np.random.randint(-10,10) ##generate random slant if no slant value is given
rain_drops, drop_length= generate_random_lines(imshape,slant,drop_length,rain_type)
output = rain_process(image,slant_extreme,drop_length,drop_color,drop_width,rain_drops)
return output
def brightness(x, severity=1):
# c = [.1, .2, .3, .4, .5][severity - 1]
c = [.05, .1, .15, .2, .3][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, severity=1):
# c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]
c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, severity=1):
# c = [25, 18, 15, 10, 7][severity - 1]
c = [80, 65, 58, 50, 40][severity - 1]
output = BytesIO()
Image.fromarray(x).save(output, 'JPEG', quality=c)
x = np.array(PILImage.open(output))
return x
def pixelate(x, severity=1):
size = x.shape[0]
# c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
c = [0.95, 0.9, 0.85, 0.75, 0.65][severity - 1]
x = Image.fromarray(x)
x = x.resize((int(size * c), int(size * c)),resample=Image.BILINEAR)
x = x.resize((size, size),Image.NEAREST)
return np.array(x)
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, severity=1):
c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 512, but ultimately nothing is incorrect
(244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02),
(244 * 0.07, 244 * 0.01, 244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
def blackoutNoise(image, severity=1):
image = np.zeros(image.shape, dtype=np.uint8)
m = (severity, severity, severity)
s = (severity, severity, severity)
image = np.clip(cv2.randn(image, m, s), 0, 255)
return image
def additiveGaussianNoise(image, severity=1):
m = (severity, severity, severity)
s = (severity, severity, severity)
corr = cv2.randn(np.zeros(image.shape, dtype=np.uint8), m, s)
image = np.clip(image.copy() + corr, 0, 255)
return image
def occlusion(image, severity=1):
mask = np.ones(image.shape, dtype=np.uint8)
x = int(image.shape[0] * np.random.rand())
y = int(image.shape[1] * np.random.rand())
r = int((min(image.shape[:2]) / 4) * np.random.rand() + (min(image.shape[:2]) / 4))
cv2.circle(mask, (x, y), r, 0, -1)
image = np.clip(image.copy() * mask, 0, 255)
return image
|
#!/usr/bin/env python3
'''
Use the Neural Engineering framework to solve Pendulum via an elitist GA
Copyright (C) 2020 Simon D. Levy
MIT License
'''
from lib import NefGym
from sys import argv
import pickle
import numpy as np
from sueap.algorithms.elitist import Elitist
class NefPendulum(NefGym):
def __init__(self, neurons=20, seed=None):
NefGym.__init__(self, 'Pendulum-v0', neurons, seed)
def activate(self, x):
return np.clip(x, -2, +2)
if __name__ == '__main__':
if len(argv) < 2:
print('Usage: python3 %s FILE' % argv[0])
exit(0)
problem = NefPendulum()
net = pickle.load(open(argv[1], 'rb'))
print('Got reward %.3f in %d steps' % problem.test(net))
|
import networkx as nx
g = nx.Graph([x.split(")") for x in open("input.txt").read().splitlines()])
print(sum([nx.shortest_path_length(g, "COM", x) for x in g.nodes]))
print(nx.shortest_path_length(g, "YOU", "SAN") - 2)
|
###############################################################################
# Author: Daniil Budanov
# Contact: danbudanov@gmail.com
# Summer Internship - 2016
###############################################################################
# Title: __init__.py
# Project: Security System
# Description:
# package dependency components
# Last Modified: 7.14.2016
###############################################################################
# from onlinevid import *
from trigger import *
from sys import argv
from buff import *
from dtinfo import currDate, currTime
from args import *
|
from .alphavantage import AlphaVantage as av
class TimeSeries(av):
"""This class implements all the api calls to times series
"""
@av._output_format
@av._call_api_on_func
def get_intraday(self, symbol, interval='15min', outputsize='compact'):
""" Return intraday time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min'
(default '15min')
outputsize: The size of the call, supported values are
'compact' and 'full; the first returns the last 100 points in the
data series, and 'full' returns the full-length intraday times
series, commonly above 1MB (default 'compact')
"""
_FUNCTION_KEY = "TIME_SERIES_INTRADAY"
return _FUNCTION_KEY, "Time Series ({})".format(interval), 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_intraday_extended(self, symbol, interval='15min', slice='year1month1', adjusted=True):
""" Return extended intraday time series in one csv_reader object.
It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
interval: time interval between two conscutive values,
supported values are '1min', '5min', '15min', '30min', '60min'
(default '15min')
slice: the trailing 2 years of intraday data is evenly divided into
24 "slices" - year1month1, year1month2, ..., year2month12
adjusted: By default, adjusted=true and the output time series is
adjusted by historical split and dividend events.
Set adjusted=false to query raw (as-traded) intraday values.
"""
_FUNCTION_KEY = "TIME_SERIES_INTRADAY_EXTENDED"
return _FUNCTION_KEY, "Time Series ({})".format(interval), 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_daily(self, symbol, outputsize='compact'):
""" Return daily time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
outputsize: The size of the call, supported values are
'compact' and 'full; the first returns the last 100 points in the
data series, and 'full' returns the full-length daily times
series, commonly above 1MB (default 'compact')
"""
_FUNCTION_KEY = "TIME_SERIES_DAILY"
return _FUNCTION_KEY, 'Time Series (Daily)', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_daily_adjusted(self, symbol, outputsize='compact'):
""" Return daily adjusted (date, daily open, daily high, daily low,
daily close, daily split/dividend-adjusted close, daily volume)
time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
outputsize: The size of the call, supported values are
'compact' and 'full; the first returns the last 100 points in the
data series, and 'full' returns the full-length daily times
series, commonly above 1MB (default 'compact')
"""
_FUNCTION_KEY = "TIME_SERIES_DAILY_ADJUSTED"
return _FUNCTION_KEY, 'Time Series (Daily)', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_weekly(self, symbol):
""" Return weekly time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_WEEKLY"
return _FUNCTION_KEY, 'Weekly Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_weekly_adjusted(self, symbol):
""" weekly adjusted time series (last trading day of each week,
weekly open, weekly high, weekly low, weekly close, weekly adjusted
close, weekly volume, weekly dividend) of the equity specified,
covering up to 20 years of historical data.
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_WEEKLY_ADJUSTED"
return _FUNCTION_KEY, 'Weekly Adjusted Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_monthly(self, symbol):
""" Return monthly time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_MONTHLY"
return _FUNCTION_KEY, 'Monthly Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_monthly_adjusted(self, symbol):
""" Return monthly time series in two json objects as data and
meta_data. It raises ValueError when problems arise
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "TIME_SERIES_MONTHLY_ADJUSTED"
return _FUNCTION_KEY, 'Monthly Adjusted Time Series', 'Meta Data'
@av._output_format
@av._call_api_on_func
def get_quote_endpoint(self, symbol):
""" Return the latest price and volume information for a
security of your choice
Keyword Arguments:
symbol: the symbol for the equity we want to get its data
"""
_FUNCTION_KEY = "GLOBAL_QUOTE"
return _FUNCTION_KEY, 'Global Quote', None
@av._output_format
@av._call_api_on_func
def get_symbol_search(self, keywords):
""" Return best matching symbols and market information
based on keywords. It raises ValueError when problems arise
Keyword Arguments:
keywords: the keywords to query on
"""
_FUNCTION_KEY = "SYMBOL_SEARCH"
return _FUNCTION_KEY, 'bestMatches', None
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import astropy
from scipy.spatial import cKDTree
import numpy as np
import matplotlib.pyplot as plt
data=np.genfromtxt('ybs.degbv',names=True)
messier=np.genfromtxt('Messierdec.txt',names=True)
vlim=4.5
magscale=10
starsize=magscale*(vlim-data['v'])
#norm = ((-data['v'])-( (-data['v'])).min())/(data['v'].max()-data['v'].min())
#starsize=vlim+norm*starsize
import astropy
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
starcoords=SkyCoord(ra=data['ra']*u.degree,dec=data['dec']*u.degree)
mcoords=SkyCoord(ra=messier['Mra']*15.*u.degree,dec=messier['Mdec']*u.degree)
CT=EarthLocation(lat=-30.159*u.deg,lon=-70.809*u.deg,height=2207.*u.m)
KP=EarthLocation(lat=31.98*u.deg,lon=-111.60*u.deg,height=2097.*u.m)
RM=EarthLocation(lat=28.7569*u.deg,lon=-17.8925*u.deg,height=2267.*u.m)
sitecodes=['CT','KP','RM']
sitenames=['Cerro Tololo','Kitt Peak', 'La Palma']
for site in range(0,2):
if site==0:
obsloc=CT
if site==1:
obsloc=KP
utcoffset=-5.0*u.hour
showtime = Time('2015-7-21 22:00:00') - utcoffset
showtime=Time.now()
print(showtime.iso)
staraltaz=starcoords.transform_to(AltAz(obstime=showtime,location=obsloc))
az2plot=np.pi/2.+np.array((3.1415926/180.)*u.degree*staraltaz.az)
zd2plot=np.array(90.*u.degree-staraltaz.alt)
#pos4kd=np.array([[az2plot],[zd2plot]])
upind=(zd2plot < 90.).nonzero()
plt.clf()
plt.figure(site+1)
ax=plt.subplot(111,polar=True)
ax.grid(False)
ax.set_xticklabels(['W', '', 'N', '', 'E', '', 'S', ''])
#plt.fill_between([0,90],[0,0],[360,360],facecolor='0')
plt.scatter(az2plot[upind],zd2plot[upind],s=starsize[upind],c=data['bv'][upind],cmap='rainbow',linewidth=0,vmax=1.2,vmin=-0.5)
plt.ylim([0.,90.])
cb=plt.colorbar(pad=0.10)
cb.set_label('Star color, B-V')
#plt.tick_params(axis='x',labelbottom='off')
plt.tick_params(axis='y',labelleft='off')
ax.set_xticklabels(['W', '', 'N', '', 'E', '', 'S', ''])
# add parallels of declination every 30 degrees
for jdec in range(5):
pardeg=60.-30.*jdec
parra=np.array(range(361))
skpar=SkyCoord(ra=parra*u.degree,dec=pardeg*u.degree)
paraltaz=skpar.transform_to(AltAz(obstime=showtime,location=obsloc))
paraz2plot=np.pi/2.+np.array((3.14159265/180.)*u.degree*paraltaz.az)
parzd2plot=np.array(90.*u.degree-paraltaz.alt)
plt.plot(paraz2plot,parzd2plot,linewidth=1,color='gray',linestyle=':')
# plot Messier objects
maltaz=mcoords.transform_to(AltAz(obstime=showtime,location=obsloc))
maz2plot=np.pi/2.+np.array((3.1415926/180.)*u.degree*maltaz.az)
mzd2plot=np.array(90.*u.degree-maltaz.alt)
upm=(mzd2plot < 90.).nonzero()
#plt.scatter(maz2plot[upm],mzd2plot[upm],s=100,c=messier['Mclass'][upm],cmap='rainbow',alpha=0.4,linewidth=0)
plt.title(str(sitenames[site])+' '+showtime.iso+' UT\n')
labelcolors=np.array(['blue','blue','green','orange','red'])
mlabels=np.array(['{0}'.format(i+1) for i in range(110)])
for j in range(110):
plt.annotate(mlabels[j],xy=(maz2plot[j],mzd2plot[j]),xytext=(0,0),textcoords='offset points',color=labelcolors[messier['Mclass'][j]],size='small')
#add Magellanic clouds
sklmc=SkyCoord(ra=15.0*5.25*u.degree,dec=-68.7*u.degree)
sksmc=SkyCoord(ra=0.77*15.0*u.degree,dec=-73.0*u.degree)
lmcaltaz=sklmc.transform_to(AltAz(obstime=showtime,location=obsloc))
smcaltaz=sksmc.transform_to(AltAz(obstime=showtime,location=obsloc))
plt.scatter(np.pi/2.+np.array((3.1415926/180.)*u.degree*lmcaltaz.az),90.*u.degree-lmcaltaz.alt,s=250,c='green',alpha=0.3)
plt.scatter(np.pi/2.+np.array((3.1415926/180.)*u.degree*smcaltaz.az),90.*u.degree-smcaltaz.alt,s=120,c='green',alpha=0.3)
#add constellation lines
conlines=np.genfromtxt('constellations.txt',names="star1, star2")
nstar1=np.array(conlines['star1'])
nstar2=np.array(conlines['star2'])
nstars=nstar1.size
starnumbers=np.array(data['starnum'])
for jstar in range(nstars):
indexstar1=np.where(starnumbers==nstar1[jstar])[0]
indexstar2=np.where(data['starnum']==nstar2[jstar])[0]
plotx=np.array((az2plot[indexstar1],az2plot[indexstar2]))
ploty=np.array((zd2plot[indexstar1],zd2plot[indexstar2]))
plt.plot(plotx,ploty,linewidth=1,color='black',zorder=0)
plt.annotate('Messier Objects:',xy=(0.04,0.18),xycoords='figure fraction')
plt.annotate('Nebula',xy=(0.05,0.145),xycoords='figure fraction',color='blue')
plt.annotate('Galaxy',xy=(0.05,0.11),xycoords='figure fraction',color='green')
plt.annotate('Open cluster',xy=(0.05,0.075),xycoords='figure fraction',color='orange')
plt.annotate('Globular cluster',xy=(0.05,0.04),xycoords='figure fraction',color='red')
plt.show()
if site==0:
plt.savefig('SkyplotCTIO.png')
if site==1:
plt.savefig('SkyplotKPNO.png')
|
import time
from collections import deque
import gym
import numpy as np
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import explained_variance, tf_util, ActorCriticRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.a2c.utils import discount_with_dones, Scheduler, find_trainable_variables, mse, \
total_episode_reward_logger
from stable_baselines.ppo2.ppo2 import safe_mean
class A2C(ActorCriticRLModel):
"""
The A2C (Advantage Actor Critic) model class, https://arxiv.org/abs/1602.01783
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) Discount factor
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param vf_coef: (float) Value function coefficient for the loss calculation
:param ent_coef: (float) Entropy coefficient for the loss caculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param learning_rate: (float) The learning rate
:param alpha: (float) RMSProp decay parameter (default: 0.99)
:param epsilon: (float) RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update)
(default: 1e-5)
:param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
(used only for loading)
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
"""
def __init__(self, policy, env, gamma=0.99, n_steps=5, vf_coef=0.25, ent_coef=0.01, max_grad_norm=0.5,
learning_rate=7e-4, alpha=0.99, epsilon=1e-5, lr_schedule='constant', verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False):
super(A2C, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs)
self.n_steps = n_steps
self.gamma = gamma
self.vf_coef = vf_coef
self.ent_coef = ent_coef
self.max_grad_norm = max_grad_norm
self.alpha = alpha
self.epsilon = epsilon
self.lr_schedule = lr_schedule
self.learning_rate = learning_rate
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.graph = None
self.sess = None
self.learning_rate_ph = None
self.n_batch = None
self.actions_ph = None
self.advs_ph = None
self.rewards_ph = None
self.pg_loss = None
self.vf_loss = None
self.entropy = None
self.params = None
self.apply_backprop = None
self.train_model = None
self.step_model = None
self.step = None
self.proba_step = None
self.value = None
self.initial_state = None
self.learning_rate_schedule = None
self.summary = None
self.episode_reward = None
# if we are loading, it is possible the environment is not known, however the obs and action space are known
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.train_model
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, self.actions_ph, policy.policy
return policy.obs_ph, self.actions_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the A2C model must be an " \
"instance of common.policies.ActorCriticPolicy."
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(graph=self.graph)
self.n_batch = self.n_envs * self.n_steps
n_batch_step = None
n_batch_train = None
if issubclass(self.policy, RecurrentActorCriticPolicy):
n_batch_step = self.n_envs
n_batch_train = self.n_envs * self.n_steps
step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,
self.n_steps, n_batch_train, reuse=True, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.actions_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
self.advs_ph = tf.placeholder(tf.float32, [None], name="advs_ph")
self.rewards_ph = tf.placeholder(tf.float32, [None], name="rewards_ph")
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
neglogpac = train_model.proba_distribution.neglogp(self.actions_ph)
self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())
self.pg_loss = tf.reduce_mean(self.advs_ph * neglogpac)
self.vf_loss = mse(tf.squeeze(train_model.value_flat), self.rewards_ph)
# https://arxiv.org/pdf/1708.04782.pdf#page=9, https://arxiv.org/pdf/1602.01783.pdf#page=4
# and https://github.com/dennybritz/reinforcement-learning/issues/34
# suggest to add an entropy component in order to improve exploration.
loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef
tf.summary.scalar('entropy_loss', self.entropy)
tf.summary.scalar('policy_gradient_loss', self.pg_loss)
tf.summary.scalar('value_function_loss', self.vf_loss)
tf.summary.scalar('loss', loss)
self.params = find_trainable_variables("model")
grads = tf.gradients(loss, self.params)
if self.max_grad_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))
tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', self.rewards_ph)
tf.summary.histogram('learning_rate', self.learning_rate)
tf.summary.histogram('advantage', self.advs_ph)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.alpha,
epsilon=self.epsilon)
self.apply_backprop = trainer.apply_gradients(grads)
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.proba_step = step_model.proba_step
self.value = step_model.value
self.initial_state = step_model.initial_state
tf.global_variables_initializer().run(session=self.sess)
self.summary = tf.summary.merge_all()
def _train_step(self, obs, states, rewards, masks, actions, values, update, writer=None):
"""
applies a training step to the model
:param obs: ([float]) The input observations
:param states: ([float]) The states (used for recurrent policies)
:param rewards: ([float]) The rewards from the environment
:param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)
:param actions: ([float]) The actions taken
:param values: ([float]) The logits values
:param update: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:return: (float, float, float) policy loss, value loss, policy entropy
"""
advs = rewards - values
cur_lr = None
for _ in range(len(obs)):
cur_lr = self.learning_rate_schedule.value()
assert cur_lr is not None, "Error: the observation input array cannon be empty"
td_map = {self.train_model.obs_ph: obs, self.actions_ph: actions, self.advs_ph: advs,
self.rewards_ph: rewards, self.learning_rate_ph: cur_lr}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.dones_ph] = masks
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if self.full_tensorboard_log and (1 + update) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop],
td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % (update * (self.n_batch + 1)))
else:
summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)
writer.add_summary(summary, update * (self.n_batch + 1))
else:
policy_loss, value_loss, policy_entropy, _ = self.sess.run(
[self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)
return policy_loss, value_loss, policy_entropy
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name="A2C",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn(seed)
self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps,
schedule=self.lr_schedule)
runner = A2CRunner(self.env, self, n_steps=self.n_steps, gamma=self.gamma)
self.episode_reward = np.zeros((self.n_envs,))
# Training stats (when using Monitor wrapper)
ep_info_buf = deque(maxlen=100)
t_start = time.time()
for update in range(1, total_timesteps // self.n_batch + 1):
# true_reward is the reward without discount
obs, states, rewards, masks, actions, values, ep_infos, true_reward = runner.run()
ep_info_buf.extend(ep_infos)
_, value_loss, policy_entropy = self._train_step(obs, states, rewards, masks, actions, values,
self.num_timesteps // (self.n_batch + 1), writer)
n_seconds = time.time() - t_start
fps = int((update * self.n_batch) / n_seconds)
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, self.num_timesteps)
self.num_timesteps += self.n_batch + 1
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if self.verbose >= 1 and (update % log_interval == 0 or update == 1):
explained_var = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", self.num_timesteps)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(explained_var))
if len(ep_info_buf) > 0 and len(ep_info_buf[0]) > 0:
logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.dump_tabular()
return self
def save(self, save_path):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"alpha": self.alpha,
"epsilon": self.epsilon,
"lr_schedule": self.lr_schedule,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
class A2CRunner(AbstractEnvRunner):
def __init__(self, env, model, n_steps=5, gamma=0.99):
"""
A runner to learn the policy of an environment for an a2c model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
"""
super(A2CRunner, self).__init__(env=env, model=model, n_steps=n_steps)
self.gamma = gamma
def run(self):
"""
Run a learning step of the model
:return: ([float], [float], [float], [bool], [float], [float])
observations, states, rewards, masks, actions, values
"""
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], []
mb_states = self.states
ep_infos = []
for _ in range(self.n_steps):
actions, values, states, _ = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
obs, rewards, dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
mb_dones.append(self.dones)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1)
mb_actions = np.asarray(mb_actions, dtype=self.env.action_space.dtype).swapaxes(0, 1)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(0, 1)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
true_rewards = np.copy(mb_rewards)
last_values = self.model.value(self.obs, self.states, self.dones).tolist()
# discount/bootstrap off value fn
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
# convert from [n_env, n_steps, ...] to [n_steps * n_env, ...]
mb_rewards = mb_rewards.reshape(-1, *mb_rewards.shape[2:])
mb_actions = mb_actions.reshape(-1, *mb_actions.shape[2:])
mb_values = mb_values.reshape(-1, *mb_values.shape[2:])
mb_masks = mb_masks.reshape(-1, *mb_masks.shape[2:])
true_rewards = true_rewards.reshape(-1, *true_rewards.shape[2:])
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, ep_infos, true_rewards
|
import os
import pandas as pd
import requests
from datetime import datetime
from furl import furl
SQUASH_API_URL = os.environ.get('SQUASH_API_URL',
'http://localhost:8000/dashboard/api/')
def get_endpoint_urls():
"""
Lookup API endpoint URLs
"""
r = requests.get(SQUASH_API_URL)
r.raise_for_status()
return r.json()
def get_data(endpoint, params=None):
"""Return data as a dict from
an API endpoint """
api = get_endpoint_urls()
# e.g. http://localhost:8000/AMx?ci_id=1&ci_dataset=cfht&metric=AM1
r = requests.get(api[endpoint],
params=params)
r.raise_for_status()
return r.json()
def get_data_as_pandas_df(endpoint, params=None):
"""
Return data as a pandas dataframe from
an API endpoint
"""
result = get_data(endpoint, params)
data = pd.DataFrame.from_dict(result, orient='index').transpose()
return data
def get_datasets(default=None):
"""Get a list of datasets from the API
and a default value
Returns
-------
datasets : list
list of dataset names
default : str
if a valid default value is provided, overwrite
the default value obtained from the API
"""
datasets = get_data('datasets')
default_dataset = get_data('defaults')['ci_dataset']
if default:
if default in datasets:
default_dataset = default
return {'datasets': datasets, 'default': default_dataset}
def get_metrics(default=None):
"""Get the list of metrics from the API
and a default value
Returns
-------
metrics : list
list of metric names
default : str
if a valid default value is provided, overwrite
the default value returned from the API
"""
r = get_data('metrics')
metrics = [m['metric'] for m in r['results']]
default_metric = get_data('defaults')['metric']
if default:
if default in metrics:
default_metric = default
return {'metrics': metrics, 'default': default_metric}
def get_value(specs, name):
""" Helper function to unpack metric specification
values
Parameters
----------
specs: dict
a dict with keys value and name
name: str
the spec name
Return
------
value: float or None
value of the spec if exists, None otherwise
"""
value = None
for s in specs:
if s['name'] == name:
value = s['value']
break
return value
def get_specs(name):
"""Get metric specifications thresholds
from its name
Parameters
----------
name: str
a valid metric name
Returns
-------
unit: str
metric unit
description:
metric description
minimum: float
metric minimum specification
design: float
metric design specification
stretch: float
metric stretch goal
"""
r = get_data('metrics')
unit = str()
description = str()
specs = []
minimum = None
design = None
stretch = None
for m in r['results']:
if m['metric'] == name:
unit = m['unit']
description = m['description']
specs = eval(str(m['specs']))
break
if specs:
minimum = get_value(specs, 'minimum')
design = get_value(specs, 'design')
stretch = get_value(specs, 'stretch')
return {'unit': unit, 'description': description,
'minimum': minimum, 'design': design, 'stretch': stretch}
def get_url_args(doc, defaults=None):
"""Return url args recovered from django_full_path cookie in
the bokeh request header.
If defaults values are provided, overwrite the default values
obtained from the API
"""
args = get_data('defaults')
# overwrite api default values
if defaults:
for key in defaults:
args[key] = defaults[key]
r = doc().session_context.request
if r:
if 'django_full_path' in r.cookies:
django_full_path = r.cookies['django_full_path'].value
tmp = furl(django_full_path).args
for key in tmp:
# overwrite default values with those passed
# as url args, make sure the url arg (key) is valid
if key in args:
args[key] = tmp[key]
# the bokeh app name is the second segment of the url path
args['bokeh_app'] = furl(django_full_path).path.segments[1]
return args
# TODO: these functions are used by the monitor app and need refactoring
def get_initial_page(page_size, num_pages, window):
# Page size in hours assuming CI_TIME_INTERVAL
CI_TIME_INTERVAL = 8
page_window = page_size * CI_TIME_INTERVAL
if window == 'weeks':
initial_page = num_pages - int((24*7)/page_window)
elif window == 'months':
# maximum window of 3 months
initial_page = num_pages - int((24*30*3)/page_window)
elif window == 'years':
# maximum window of 1 year
initial_page = num_pages - int((24*365)/page_window)
else:
# everything
initial_page = 1
# Make sure we have enough pages for the input time window
if initial_page < 1:
initial_page = 1
return initial_page
def get_meas_by_dataset_and_metric(selected_dataset, selected_metric, window):
""" Get measurements for a given dataset and metric from the measurements
api endpoint
Parameters
----------
selected_dataset : str
the current selected dataset
selected_metric : str
the current selected metric
Returns
-------
ci_id : list
list of job ids from the CI system
dates : list
list of datetimes for each job measurement
measurements : list
flat list of dicts where the key is the metric and the value
is its measurement
ci_url : list
list of URLs for the jobs in the CI system
"""
api = get_endpoint_urls()
# http://localhost:8000/dashboard/api/measurements/?job__ci_dataset=cfht&metric=AM1
r = requests.get(api['measurements'],
params={'job__ci_dataset': selected_dataset,
'metric': selected_metric})
r.raise_for_status()
results = r.json()
# results are paginated, walk through each page
# TODO: figure out how to retrieve the number of pages in DRF
count = results['count']
page_size = len(results['results'])
measurements = []
if page_size > 0:
# ceiling integer
num_pages = int(count/page_size) + (count % page_size > 0)
initial_page = get_initial_page(page_size, num_pages, window)
for page in range(initial_page, num_pages + 1):
r = requests.get(
api['measurements'],
params={'job__ci_dataset': selected_dataset,
'metric': selected_metric,
'page': page})
r.raise_for_status()
measurements.extend(r.json()['results'])
ci_ids = [int(m['ci_id']) for m in measurements]
# 2016-08-10T05:22:37.700146Z
# after DM-7517 jobs return is sorted by date and the same is done for
# the measurements
dates = [datetime.strptime(m['date'], '%Y-%m-%dT%H:%M:%S.%fZ')
for m in measurements]
values = [m['value'] for m in measurements]
ci_urls = [m['ci_url'] for m in measurements]
packages = [m['changed_packages'] for m in measurements]
# list of package names, name is the first element in the tuple
names = []
for i, sublist in enumerate(packages):
names.append([])
for package in sublist:
names[i].append(package[0])
# list of git urls, git package commit sha and base url are the second and
# third elements in the tuple
git_urls = []
for i, sublist in enumerate(packages):
git_urls.append([])
for package in sublist:
git_urls[i].append("{}/commit/{}".format(package[2].strip('.git'),
package[1]))
return {'ci_ids': ci_ids, 'dates': dates, 'values': values,
'ci_urls': ci_urls, 'names': names, 'git_urls': git_urls}
|
import time
jobNumber=10
for i in range(jobNumber):
qsub_command = "qsub job.sh"
print(qsub_command)
exit_status = subprocess.call(qsub_command, shell=True)
time.sleep(6)
|
from tkinter import *
import tkinter as tk
import studenttracking_main
import studenttracking_fnct
def load_gui(self):
self.lbl_subform = tk.Label(self.master,text='Submission Form')
self.lbl_subform.grid(row=0,column=1,padx=(27,0),pady=(10,0),sticky=N+W)
self.lbl_fname = tk.Label(self.master,text='First Name:')
self.lbl_fname.grid(row=2,column=0,padx=(27,0),pady=(10,0),sticky=N+W)
self.lbl_lname = tk.Label(self.master,text='Last Name:')
self.lbl_lname.grid(row=3,column=0,padx=(27,0),pady=(10,0),sticky=N+W)
self.lbl_phone = tk.Label(self.master,text='Phone:')
self.lbl_phone.grid(row=4,column=0,padx=(27,0),pady=(10,0),sticky=N+W)
self.lbl_email = tk.Label(self.master,text='Email:')
self.lbl_email.grid(row=5,column=0,padx=(27,0),pady=(10,0),sticky=N+W)
self.lbl_course = tk.Label(self.master,text='Course:')
self.lbl_course.grid(row=7,column=0,padx=(27,0),pady=(10,0),sticky=N+W)
self.lbl_info = tk.Label(self.master,text='Information')
self.lbl_info.grid(row=0,column=4,padx=(27,0),pady=(10,0),sticky=N+W)
self.txt_fname = tk.Entry(self.master,text='')
self.txt_fname.grid(row=2,column=1,rowspan=1,columnspan=2,padx=(30,40),pady=(10,0),sticky=N+E+W)
self.txt_lname = tk.Entry(self.master,text='')
self.txt_lname.grid(row=3,column=1,rowspan=1,columnspan=2,padx=(30,40),pady=(10,0),sticky=N+E+W)
self.txt_phone = tk.Entry(self.master,text='')
self.txt_phone.grid(row=4,column=1,rowspan=1,columnspan=2,padx=(30,40),pady=(10,0),sticky=N+E+W)
self.txt_email = tk.Entry(self.master,text='')
self.txt_email.grid(row=5,column=1,rowspan=1,columnspan=2,padx=(30,40),pady=(10,0),sticky=N+E+W)
self.txt_course = tk.Entry(self.master,text='')
self.txt_course.grid(row=7,column=1,rowspan=1,columnspan=2,padx=(30,40),pady=(10,0),sticky=N+E+W)
self.scrollbar1 = Scrollbar(self.master,orient=VERTICAL)
self.lstList1 = Listbox(self.master,exportselection=0,yscrollcommand=self.scrollbar1.set)
self.lstList1.bind('<<ListboxSelect>>',lambda event: studenttracking_fnct.onSelect(self,event))
self.scrollbar1.config(command=self.lstList1.yview)
self.scrollbar1.grid(row=1,column=7,rowspan=8,columnspan=1,padx=(0,0),pady=(0,0),sticky=N+E+S)
self.lstList1.grid(row=2,column=3,rowspan=7,columnspan=4,padx=(0,0),pady=(0,0),sticky=N+E+S+W)
self.btn_submit = tk.Button(self.master,width=12,height=2,text='Submit',command=lambda: studenttracking_fnct.submit(self))
self.btn_submit.grid(row=7,column=0,padx=(25,0),pady=(45,10),sticky=W)
self.btn_delete = tk.Button(self.master,width=12,height=2,text='Delete',command=lambda: studenttracking_fnct.onDelete(self))
self.btn_delete.grid(row=7,column=1,padx=(25,0),pady=(45,10),sticky=W)
studenttracking_fnct.create_db(self)
studenttracking_fnct.onRefresh(self)
if __name__ == "__main__":
pass
|
from datetime import datetime, timedelta
from djmail.template_mail import MagicMailBuilder, InlineCSSTemplateMail
from unittest.mock import patch
from django_comments import get_form_target
from django_comments.models import Comment
from django_comments.signals import comment_was_posted
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import mail
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from TWLight.applications.factories import ApplicationFactory
from TWLight.applications.models import Application
from TWLight.resources.factories import PartnerFactory
from TWLight.resources.models import Partner
from TWLight.resources.tests import EditorCraftRoom
from TWLight.users.factories import EditorFactory, UserFactory
from TWLight.users.groups import get_coordinators
from TWLight.users.models import Authorization
# We need to import these in order to register the signal handlers; if we don't,
# when we test that those handler functions have been called, we will get
# False even when they work in real life.
from .tasks import (
send_comment_notification_emails,
send_approval_notification_email,
send_rejection_notification_email,
send_user_renewal_notice_emails,
send_proxy_bundle_launch_notice,
contact_us_emails,
)
class ApplicationCommentTest(TestCase):
def setUp(self):
super(ApplicationCommentTest, self).setUp()
self.editor = EditorFactory(user__email="editor@example.com").user
coordinators = get_coordinators()
self.coordinator1 = EditorFactory(
user__email="c1@example.com", user__username="c1"
).user
self.coordinator2 = EditorFactory(
user__email="c2@example.com", user__username="c2"
).user
coordinators.user_set.add(self.coordinator1)
coordinators.user_set.add(self.coordinator2)
self.partner = PartnerFactory()
def _create_comment(self, app, user):
CT = ContentType.objects.get_for_model
comm = Comment.objects.create(
content_type=CT(Application),
object_pk=app.pk,
user=user,
user_name=user.username,
comment="Content!",
site=Site.objects.get_current(),
)
comm.save()
return comm
def _set_up_email_test_objects(self):
app = ApplicationFactory(editor=self.editor.editor, partner=self.partner)
factory = RequestFactory()
request = factory.post(get_form_target())
return app, request
def test_comment_email_sending_1(self):
"""
A coordinator posts a comment to an Editor's application and an email
is send to that Editor. An email is not sent to the coordinator.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
comment1 = self._create_comment(app, self.coordinator1)
comment_was_posted.send(sender=Comment, comment=comment1, request=request)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.editor.email])
def test_comment_email_sending_2(self):
"""
After a coordinator posts a comment, the Editor posts an additional
comment. An email is sent to the coordinator who posted the earlier
comment. An email is not sent to the editor.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
_ = self._create_comment(app, self.coordinator1)
comment2 = self._create_comment(app, self.editor)
comment_was_posted.send(sender=Comment, comment=comment2, request=request)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.coordinator1.email])
def test_comment_email_sending_3(self):
"""
After the editor and coordinator post a comment, an additional
coordinator posts a comment. One email is sent to the first coordinator,
and a distinct email is sent to the editor.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
_ = self._create_comment(app, self.coordinator1)
_ = self._create_comment(app, self.editor)
comment3 = self._create_comment(app, self.coordinator2)
comment_was_posted.send(sender=Comment, comment=comment3, request=request)
self.assertEqual(len(mail.outbox), 2)
# Either order of email sending is fine.
try:
self.assertEqual(mail.outbox[0].to, [self.coordinator1.email])
self.assertEqual(mail.outbox[1].to, [self.editor.email])
except AssertionError:
self.assertEqual(mail.outbox[1].to, [self.coordinator1.email])
self.assertEqual(mail.outbox[0].to, [self.editor.email])
def test_comment_email_sending_4(self):
"""
A comment made on an application that's any further along the process
than PENDING (i.e. a coordinator has taken some action on it) should
fire an email to the coordinator who took the last action on it.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
# Create a coordinator with a test client session
coordinator = EditorCraftRoom(self, Terms=True, Coordinator=True)
self.partner.coordinator = coordinator.user
self.partner.save()
# Approve the application
url = reverse("applications:evaluate", kwargs={"pk": app.pk})
response = self.client.post(
url, data={"status": Application.QUESTION}, follow=True
)
comment4 = self._create_comment(app, self.editor)
comment_was_posted.send(sender=Comment, comment=comment4, request=request)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [coordinator.user.email])
def test_comment_email_sending_5(self):
"""
A comment from the applying editor made on an application that
has had no actions taken on it and no existing comments should
not fire an email to anyone.
"""
app, request = self._set_up_email_test_objects()
request.user = UserFactory()
self.assertEqual(len(mail.outbox), 0)
comment5 = self._create_comment(app, self.editor)
comment_was_posted.send(sender=Comment, comment=comment5, request=request)
self.assertEqual(len(mail.outbox), 0)
# We'd like to mock out send_comment_notification_emails and test that
# it is called when comment_was_posted is fired, but we can't; the signal
# handler is attached to the real send_comment_notification_emails, not
# the mocked one.
class ApplicationStatusTest(TestCase):
@patch("TWLight.emails.tasks.send_approval_notification_email")
def test_approval_calls_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.APPROVED
app.save()
self.assertTrue(mock_email.called)
@patch("TWLight.emails.tasks.send_approval_notification_email")
def test_reapproval_does_not_call_email_function(self, mock_email):
"""
Saving an Application with APPROVED status, when it already had an
APPROVED status, should not re-send the email.
"""
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.APPROVED
app.save()
app.save()
self.assertEqual(mock_email.call_count, 1)
@patch("TWLight.emails.tasks.send_rejection_notification_email")
def test_rejection_calls_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.NOT_APPROVED
app.save()
self.assertTrue(mock_email.called)
@patch("TWLight.emails.tasks.send_rejection_notification_email")
def test_rerejection_does_not_call_email_function(self, mock_email):
app = ApplicationFactory(status=Application.PENDING)
app.status = Application.NOT_APPROVED
app.save()
app.save()
self.assertEqual(mock_email.call_count, 1)
def test_pending_does_not_call_email_function(self):
"""
Applications saved with a PENDING status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.PENDING)
self.assertEqual(len(mail.outbox), orig_outbox)
def test_question_does_not_call_email_function(self):
"""
Applications saved with a QUESTION status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.QUESTION)
self.assertEqual(len(mail.outbox), orig_outbox)
def test_sent_does_not_call_email_function(self):
"""
Applications saved with a SENT status should not generate email.
"""
orig_outbox = len(mail.outbox)
_ = ApplicationFactory(status=Application.SENT)
self.assertEqual(len(mail.outbox), orig_outbox)
@patch("TWLight.emails.tasks.send_waitlist_notification_email")
def test_waitlist_calls_email_function(self, mock_email):
partner = PartnerFactory(status=Partner.WAITLIST)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertTrue(mock_email.called)
partner.delete()
app.delete()
@patch("TWLight.emails.tasks.send_waitlist_notification_email")
def test_nonwaitlist_does_not_call_email_function(self, mock_email):
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.delete()
app.delete()
partner = PartnerFactory(status=Partner.NOT_AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.delete()
app.delete()
@patch("TWLight.emails.tasks.send_waitlist_notification_email")
def test_waitlisting_partner_calls_email_function(self, mock_email):
"""
Switching a Partner to WAITLIST status should call the email function
for apps to that partner with open statuses.
"""
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.PENDING, partner=partner)
self.assertFalse(mock_email.called)
partner.status = Partner.WAITLIST
partner.save()
self.assertTrue(mock_email.called)
mock_email.assert_called_with(app)
@patch("TWLight.emails.tasks.send_waitlist_notification_email")
def test_waitlisting_partner_does_not_call_email_function(self, mock_email):
"""
Switching a Partner to WAITLIST status should NOT call the email
function for apps to that partner with closed statuses.
"""
partner = PartnerFactory(status=Partner.AVAILABLE)
app = ApplicationFactory(status=Application.APPROVED, partner=partner)
app = ApplicationFactory(status=Application.NOT_APPROVED, partner=partner)
app = ApplicationFactory(status=Application.SENT, partner=partner)
self.assertFalse(mock_email.called)
partner.status = Partner.WAITLIST
partner.save()
self.assertFalse(mock_email.called)
class ContactUsTest(TestCase):
def setUp(self):
super(ContactUsTest, self).setUp()
self.editor = EditorFactory(user__email="editor@example.com").user
@patch("TWLight.emails.tasks.contact_us_emails")
def test_contact_us_emails(self, mock_email):
factory = RequestFactory()
request = factory.post(get_form_target())
request.user = UserFactory()
editor = EditorFactory()
reply_to = ["editor@example.com"]
cc = ["editor@example.com"]
self.assertEqual(len(mail.outbox), 0)
mail_instance = MagicMailBuilder(template_mail_cls=InlineCSSTemplateMail)
email = mail_instance.contact_us_email(
"wikipedialibrary@wikimedia.org",
{"editor_wp_username": editor.wp_username, "body": "This is a test email"},
)
email.extra_headers["Reply-To"] = ", ".join(reply_to)
email.extra_headers["Cc"] = ", ".join(cc)
email.send()
self.assertEqual(len(mail.outbox), 1)
def test_user_submit_contact_us_emails(self):
EditorCraftRoom(self, Terms=True, Coordinator=False)
self.assertEqual(len(mail.outbox), 0)
contact_us_url = reverse("contact")
contact_us = self.client.get(contact_us_url, follow=True)
contact_us_form = contact_us.context["form"]
data = contact_us_form.initial
data["email"] = "editor@example.com"
data["message"] = "This is a test"
data["cc"] = True
data["submit"] = True
self.client.post(contact_us_url, data)
self.assertEqual(len(mail.outbox), 1)
def test_not_logged_in_user_submit_contact_us_emails(self):
self.assertEqual(len(mail.outbox), 0)
contact_us_url = reverse("contact")
contact_us = self.client.get(contact_us_url, follow=True)
contact_us_form = contact_us.context["form"]
data = contact_us_form.initial
data["email"] = "editor@example.com"
data["message"] = "This is a test"
data["submit"] = True
data["cc"] = True
self.client.post(contact_us_url, data)
self.assertEqual(len(mail.outbox), 0)
class UserRenewalNoticeTest(TestCase):
def setUp(self):
super(UserRenewalNoticeTest, self).setUp()
editor = EditorFactory(user__email="editor@example.com")
self.user = editor.user
self.coordinator = EditorFactory().user
coordinators = get_coordinators()
coordinators.user_set.add(self.coordinator)
self.partner = PartnerFactory()
self.authorization = Authorization()
self.authorization.user = self.user
self.authorization.authorizer = self.coordinator
self.authorization.partner = self.partner
self.authorization.date_expires = datetime.today() + timedelta(weeks=2)
self.authorization.save()
def test_single_user_renewal_notice(self):
"""
Given one authorization that expires in two weeks, ensure
that our email task sends an email to that user.
"""
call_command("user_renewal_notice")
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.user.email])
def test_user_renewal_notice_disabled(self):
"""
Users have the option to disable renewal notices. If users have
disabled emails, we shouldn't send them one.
"""
self.user.userprofile.send_renewal_notices = False
self.user.userprofile.save()
call_command("user_renewal_notice")
self.assertEqual(len(mail.outbox), 0)
def test_user_renewal_notice_doesnt_duplicate(self):
"""
If we run the command a second time, the same user shouldn't receive
a second email.
"""
call_command("user_renewal_notice")
self.assertEqual(len(mail.outbox), 1)
call_command("user_renewal_notice")
self.assertEqual(len(mail.outbox), 1)
def test_user_renewal_notice_past_date(self):
"""
If the authorization expired before today, the user shouldn't
receive a notice.
"""
self.authorization.date_expires = datetime.today() - timedelta(weeks=1)
self.authorization.save()
call_command("user_renewal_notice")
self.assertEqual(len(mail.outbox), 0)
def test_user_renewal_notice_future_date(self):
"""
If the authorization doesn't expire for months, the user
shouldn't receive a notice.
"""
self.authorization.date_expires = datetime.today() + timedelta(weeks=8)
self.authorization.save()
call_command("user_renewal_notice")
self.assertEqual(len(mail.outbox), 0)
def test_user_renewal_notice_future_date_1(self):
"""
If we have multiple authorizations to send emails for, let's make
sure we send distinct emails to the right places.
"""
editor2 = EditorFactory(user__email="editor2@example.com")
authorization2 = Authorization()
authorization2.user = editor2.user
authorization2.authorizer = self.coordinator
authorization2.partner = self.partner
authorization2.date_expires = datetime.today() + timedelta(weeks=1)
authorization2.save()
call_command("user_renewal_notice")
self.assertEqual(len(mail.outbox), 2)
# Make sure that the two emails went to the two expected
# email addresses.
# This looks a little complicated because mail.outbox[0].to is a
# (one element) list, and we need to compare sets to ensure we've
# got 1 of each email.
self.assertEqual(
{mail.outbox[0].to[0], mail.outbox[1].to[0]},
{"editor@example.com", "editor2@example.com"},
)
class CoordinatorReminderEmailTest(TestCase):
def setUp(self):
super(CoordinatorReminderEmailTest, self).setUp()
editor = EditorFactory()
self.user = editor.user
editor2 = EditorFactory()
self.user2 = editor2.user
self.coordinator = EditorFactory(user__email="editor@example.com").user
coordinators = get_coordinators()
coordinators.user_set.add(self.coordinator)
self.partner = PartnerFactory(coordinator=self.coordinator)
self.partner2 = PartnerFactory(coordinator=self.coordinator)
def test_send_coordinator_reminder_email(self):
ApplicationFactory(
partner=self.partner, status=Application.PENDING, editor=self.user.editor
)
# Coordinator only wants reminders for apps under discussion
self.coordinator.userprofile.pending_app_reminders = False
self.coordinator.userprofile.approved_app_reminders = False
self.coordinator.userprofile.save()
call_command("send_coordinator_reminders")
self.assertEqual(len(mail.outbox), 0)
ApplicationFactory(
partner=self.partner2, status=Application.QUESTION, editor=self.user2.editor
)
call_command("send_coordinator_reminders")
self.assertEqual(len(mail.outbox), 1)
# We include the count for all waiting (PENDING, QUESTION,
# APPROVED) apps whenever we send an email, but trigger
# emails only based on preferences i.e. if a coordinator
# has enabled reminders only for QUESTION, we send a
# reminder only when we have an app of status: QUESTION,
# but include info on all apps in the email.
self.assertNotIn("One pending application", mail.outbox[0].body)
self.assertIn("One under discussion application", mail.outbox[0].body)
self.assertNotIn("One approved application", mail.outbox[0].body)
ApplicationFactory(
partner=self.partner, status=Application.APPROVED, editor=self.user2.editor
)
ApplicationFactory(
partner=self.partner2, status=Application.SENT, editor=self.user.editor
)
# Clear mail outbox since approvals send emails
mail.outbox = []
# Coordinator only wants reminders for apps under discussion
self.coordinator.userprofile.pending_app_reminders = True
self.coordinator.userprofile.approved_app_reminders = True
self.coordinator.userprofile.save()
call_command("send_coordinator_reminders")
self.assertEqual(len(mail.outbox), 1)
self.assertIn("One pending application", mail.outbox[0].body)
self.assertIn("One under discussion application", mail.outbox[0].body)
self.assertIn("One approved application", mail.outbox[0].body)
class ProxyBundleLaunchTest(TestCase):
def setUp(self):
super(ProxyBundleLaunchTest, self).setUp()
editor = EditorFactory(user__email="editor@example.com")
self.user = editor.user
def test_proxy_bundle_launch_email_1(self):
"""
With one user, calling the proxy/bundle launch command
should send a single email, to that user.
"""
call_command("proxy_bundle_launch")
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.user.email])
def test_proxy_bundle_launch_email_2(self):
"""
Adding another user should result in two sent emails.
"""
_ = EditorFactory(user__email="editor@example.com")
call_command("proxy_bundle_launch")
self.assertEqual(len(mail.outbox), 2)
def test_proxy_bundle_launch_email_3(self):
"""
The proxy/bundle launch command should record the
email was sent
"""
self.assertFalse(self.user.userprofile.proxy_notification_sent)
call_command("proxy_bundle_launch")
self.user.userprofile.refresh_from_db()
self.assertTrue(self.user.userprofile.proxy_notification_sent)
def test_proxy_bundle_launch_email_4(self):
"""
The proxy/bundle launch command should not send
to a user we recorded as having received the email
already.
"""
self.user.userprofile.proxy_notification_sent = True
self.user.userprofile.save()
call_command("proxy_bundle_launch")
self.assertEqual(len(mail.outbox), 0)
|
import json
import uuid
import os
from datetime import datetime
from flask import current_app, request, Response, abort, send_from_directory
from webargs import fields
from webargs.flaskparser import use_args, FlaskParser
from enum import Enum
from random import seed, randint
from .. import socketio
from . import main
from .config import MachineType, brew_active_sessions_path, zseries_firmware_path
from .firmware import firmware_filename, firmware_upgrade_required, minimum_firmware
from .model import PicoBrewSession
from .routes_frontend import get_zseries_recipes, parse_brew_session, list_brew_session_files, load_brew_sessions
from .session_parser import active_brew_sessions
from .units import convert_temp
arg_parser = FlaskParser()
seed(1)
events = {}
class SessionType(int, Enum):
RINSE = 0
CLEAN = 1
DRAIN = 2
RACK_BEER = 3
CIRCULATE = 4
SOUS_VIDE = 5
BEER = 6
STILL = 11
COFFEE = 12
CHILL = 13
MANUAL = 14
class ZProgramId(int, Enum):
RINSE = 1
DRAIN = 2
RACK_BEER = 3
CIRRCULATE = 4
SOUS_VIDE = 6
CLEAN = 12
BEER_OR_COFFEE = 24
STILL = 26
CHILL = 27
# Get Firmware: /firmware/zseries/<version>
# Response: RAW Bin File
@main.route('/firmware/zseries/<file>', methods=['GET'])
def process_zseries_firmware(file):
current_app.logger.debug('DEBUG: ZSeries fetch firmware file={}'.format(file))
return send_from_directory(zseries_firmware_path(), file)
# ZState: PUT /Vendors/input.cshtml?type=ZState&token={}
# Response: Machine State Response (firmware, boilertype, session stats, reg token)
zseries_query_args = {
'type': fields.Str(required=False), # API request type identifier
'token': fields.Str(required=True), # alpha-numeric unique identifier for Z
'id': fields.Str(required=False), # alpha-numeric unique identifier for Z session/recipe
'ctl': fields.Str(required=False) # recipe list request doesn't use `type` param
}
# ZFetchRecipeSummary: POST /Vendors/input.cshtml?ctl=RecipeRefListController&token={}
# Response: Recipes Response
# ZSessionUpdate: POST /Vendors/input.cshtml?type=ZSessionLog&token={}
# Response: Echo Session Log Request with ID and Date
# ZSession: POST /Vendors/input.cshtml?type=ZSession&token={}&id={} // id == session_id is only present on "complete session" request
# Response: Machine State Response (firmware, boilertype, session stats, reg token)
# StillRequest: POST /Vendors/input.cshtml?type=StillRequest&token={}
# Response: Still Machine State Response (clean acknowlegement, current firmware, update firmware, user registration)
@main.route('/Vendors/input.cshtml', methods=['POST'])
@use_args(zseries_query_args, location='querystring')
def process_zseries_post_request(args):
type = request.args.get('type')
controller = request.args.get('ctl')
# current_app.logger.debug('DEBUG: ZSeries POST request args = {}; request = {}'.format(args, request.json))
if controller == 'RecipeRefListController':
body = request.json
ret = {
"Kind": body['Kind'],
"MaxCount": body['MaxCount'],
"Offset": body['Offset'],
"Recipes": get_zseries_recipe_metadata_list()
}
return Response(json.dumps(ret), mimetype='application/json')
elif type == 'ZSessionLog':
return update_session_log(request.args.get('token'), request.json)
elif type == 'ZSession':
return create_or_close_session(request)
elif type == 'StillRequest':
return register_picostill(request.json)
else:
abort(404)
# ZState: PUT /Vendors/input.cshtml?type=ZState&token={}
# Response: Machine State Response (firmware, boilertype, session stats, reg token)
# ZSession: PUT /Vendors/input.cshtml?type=ZSession&token={}&id={} // id == session_id is only present on "complete session" request
# Response: Machine State Response (firmware, boilertype, session stats, reg token)
@main.route('/Vendors/input.cshtml', methods=['PUT'])
@use_args(zseries_query_args, location='querystring')
def process_zseries_put_request(args):
type = request.args.get('type')
if type == 'ZState' and request.json['CurrentFirmware']:
return process_zstate(request)
elif type == 'ZSession':
return create_or_close_session(request)
else:
abort(404)
# ZRecipeDetails: GET /Vendors/input.cshtml?type=Recipe&token={}&id={} // id == recipe_id
# Response: Remaining Recipe Steps (based on last session update from machine)
# ZResumeSession: GET /Vendors/input.cshtml?type=ResumableSession&token={}&id={} // id == session_id
# Response: Remaining Recipe Steps (based on last session update from machine)
@main.route('/Vendors/input.cshtml', methods=['GET'])
@use_args(zseries_query_args, location='querystring')
def process_zseries_get_request(args):
type = request.args.get('type')
identifier = request.args.get('id')
# current_app.logger.debug('DEBUG: ZSeries GET request args = {};'.format(args))
if type == 'Recipe' and identifier is not None:
return process_recipe_request(identifier)
elif type == 'ResumableSession' and identifier is not None:
return process_recover_session(request.args.get('token'), identifier)
else:
abort(404)
# GET /Vendors/input.cshtml?type=Recipe&token={}&id={} // id == recipe_id
def process_recipe_request(recipe_id):
recipe = get_recipe_by_id(recipe_id)
return recipe.serialize()
# Request: /Vendors/input.cshtml?type=ZState&token=<token>
# { "BoilerType": 1|2, "CurrentFirmware": "1.2.3" }
# Response (example):
# {
# "Alias": "ZSeries",
# "BoilerType": 1,
# "CurrentFirmware": "0.0.116",
# "IsRegistered": true,
# "IsUpdated": true,
# "ProgramUri": null,
# "RegistrationToken": "-1",
# "SessionStats": {
# "DirtySessionsSinceClean": 1,
# "LastSessionType": 5,
# "ResumableSessionID": -1
# },
# "TokenExpired": false,
# "UpdateAddress": "-1",
# "UpdateToFirmware": null,
# "ZBackendError": 0
# }
def process_zstate(args):
uid = request.args['token']
if uid not in active_brew_sessions:
active_brew_sessions[uid] = PicoBrewSession(MachineType.ZSERIES)
json = request.json
update_required = firmware_upgrade_required(MachineType.ZSERIES, json['CurrentFirmware'])
firmware_source = "https://picobrew.com/firmware/zseries/{}".format(firmware_filename(MachineType.ZSERIES, minimum_firmware(MachineType.ZSERIES)))
returnVal = {
"Alias": zseries_alias(uid),
"BoilerType": json.get('BoilerType', None), # TODO sometimes machine loses boilertype, need to resync with known state
"IsRegistered": True, # likely we don't care about registration with BYOS
"IsUpdated": False if update_required else True,
"ProgramUri": None, # what is this?
"RegistrationToken": -1,
"SessionStats": {
"DirtySessionsSinceClean": dirty_sessions_since_clean(uid),
"LastSessionType": last_session_type(uid),
"ResumableSessionID": resumable_session_id(uid)
},
"UpdateAddress": firmware_source if update_required else "-1",
"UpdateToFirmware": None,
"ZBackendError": 0
}
return returnVal
def dirty_sessions_since_clean(uid):
brew_session_files = list_brew_session_files(uid)
post_clean_sessions = []
clean_found = False
for s in brew_session_files:
session_type = SessionType(session_type_from_filename(s))
if (session_type == SessionType.CLEAN):
clean_found = True
if (not clean_found and session_type in [SessionType.BEER.value, SessionType.COFFEE.value, SessionType.SOUS_VIDE.value]):
post_clean_sessions.append(s)
return len(post_clean_sessions)
def last_session_type(uid):
brew_sessions = list_brew_session_files(uid)
if len(brew_sessions) == 0:
return SessionType.CLEAN
else:
return SessionType(session_type_from_filename(brew_sessions[0])) or SessionType.RINSE
def resumable_session_id(uid):
if uid not in active_brew_sessions:
return -1
return active_brew_sessions[uid].id
def zseries_alias(uid):
if uid not in active_brew_sessions:
return "ZSeries"
return active_brew_sessions[uid].alias or "ZSeries"
def create_or_close_session(args):
session_id = request.args.get('id')
if session_id:
return close_session(request.args.get('token'), session_id, request.json)
else:
return create_session(request.args.get('token'), request.json)
# Request: /Vendor/input.cshtml?type=StillRequest&token=<>
# { "HasCleanedAck": false, "MachineType": 2, "MachineUID": "240ac41d9ae4", "PicoStillUID": "30aea46e6a40" }
# Response:
# {
# "HasCleanedAck": false,
# "MachineType": 2,
# "MachineUID": "240ac41d9ae4",
# "PicoStill": {
# "CleanedAckDate": null,
# "CreationDate": "2018-07-06T15:05:56.57",
# "CurrentFirmware": "0.0.30",
# "FactoryFlashVersion": null,
# "ID": 638,
# "LastCommunication": "2020-07-11T00:09:51.17",
# "Notes": null,
# "ProfileID": 28341,
# "SerialNumber": "ST180706080552",
# "UID": "30aea46e6a40",
# "UpdateToFirmware": null
# },
# "PicoStillUID": "30aea46e6a40"
# }
def register_picostill(args):
return {
"HasCleanedAck": args.get('HasCleanedAck'),
"MachineType": args.get('MachineType'),
"MachineUID": args.get('MachineUID'),
"PicoStill": {
"CleanedAckDate": datetime.utcnow().isoformat() if args.get('HasCleanedAck') else None, # date of last cleaning
"CreationDate": "2018-07-06T15:05:56.57", # date of manufacturing? (never sent to server)
"CurrentFirmware": "0.0.30",
"FactoryFlashVersion": None,
"ID": 1234, # auto incremented picostill number? (never sent to server)
"LastCommunication": datetime.utcnow().isoformat(),
"Notes": None,
"ProfileID": 28341, # how to get the userId?
"SerialNumber": "ST123456780123", # device serial number (never sent to server)
"UID": args.get('PicoStillUID'),
"UpdateToFirmware": None
},
"PicoStillUID": args.get('PicoStillUID'),
}
# Request: /Vendors/input.cshtml?type=ZSession&token=<token>&id=<session_id>
# (example - beer session):
# {
# "DurationSec": 11251,
# "FirmwareVersion": "0.0.119",
# "GroupSession": true,
# "MaxTemp": 98.22592515,
# "MaxTempAddedSec": 0,
# "Name": "All Good Things",
# "PressurePa": 101975.6172,
# "ProgramParams": {
# "Abv": -1, # not a customization feature on the Z
# "Duration": 0,
# "Ibu": -1,
# "Intensity": 0,
# "Temperature": 0,
# "Water": 13.1
# },
# "RecipeID": "150xxx",
# "SessionType": 6, # see options in SessionType
# "ZProgramId": 24 # see options in ZProgram
# }
# Response (example - begin session):
# {
# "Active": false,
# "CityLat": xx.xxxxxx,
# "CityLng": -yyy.yyyyyy,
# "ClosingDate": "2020-05-04T19:54:58.74",
# "CreationDate": "2020-05-04T19:46:04.153",
# "Deleted": false,
# "DurationSec": 578,
# "FirmwareVersion": "0.0.119",
# "GUID": "<all upper case machine guid>",
# "GroupSession": false,
# "ID": <session-id>,
# "LastLogID": 11407561,
# "Lat": xx.xxxxxx,
# "Lng": -yyy.yyyyyy,
# "MaxTemp": 98.24455443,
# "MaxTempAddedSec": 0,
# "Name": "RINSE",
# "Notes": null,
# "Pressure": 0,
# "ProfileID": zzzz,
# "ProgramParams": {
# "Abv": null, # not a customization feature on the Z
# "Duration": 0.0,
# "Ibu": null,
# "Intensity": 0.0,
# "Temperature": 0.0,
# "Water": 0.0
# },
# "RecipeGuid": null,
# "RecipeID": null,
# "SecondsRemaining": 0,
# "SessionLogs": [],
# "SessionType": 0,
# "StillUID": null,
# "StillVer": null,
# "ZProgramId": 1,
# "ZSeriesID": www
# }
# Request (start still session)
# {
# "DurationSec": 14,
# "FirmwareVersion": "0.0.119",
# "GroupSession": true,
# "MaxTemp": 97.71027899,
# "MaxTempAddedSec": 0,
# "Name": "PICOSTILL",
# "PressurePa": 100490.6641,
# "ProgramParams": {
# "Abv": -1,
# "Duration": 0,
# "Ibu": -1,
# "Intensity": 0,
# "Temperature": 0,
# "Water": 0
# },
# "RecipeID": -1,
# "SessionType": 11,
# "StillUID": "30aea46e6a40",
# "StillVer": "0.0.30",
# "ZProgramId": 26
# }
#
def create_session(token, body):
uid = token # token uniquely identifies the machine
still_uid = body.get('StillUID') # token uniquely identifying the still (req: for still sessions)
recipe = get_recipe_by_name(body['Name'])
# error out if recipe isn't known where session is beer type (ie 6)
# due to rinse, rack beer, clean, coffee, sous vide, etc not having server known "recipes"
if recipe is None and body['SessionType'] == SessionType.BEER:
error = {
'error': 'recipe \'{}\' not found - unable to start session'.format(body['Name'])
}
return Response(json.dumps(error), status=404, mimetype='application/json')
elif recipe:
current_app.logger.debug('recipe for session: {}'.format(recipe.serialize()))
if uid not in active_brew_sessions:
active_brew_sessions[uid] = PicoBrewSession(MachineType.ZSERIES)
session_guid = uuid.uuid4().hex[:32]
session_id = increment_session_id(uid)
active_session = active_brew_sessions[uid]
active_session.session = session_guid
active_session.id = session_id
active_session.created_at = datetime.utcnow()
active_session.name = recipe.name if recipe else body['Name']
active_session.type = body['SessionType']
# replace spaces and '#' with other character sequences
encoded_recipe = active_brew_sessions[uid].name.replace(' ', '_').replace("#", "%23")
filename = '{0}#{1}#{2}#{3}#{4}.json'.format(
datetime.now().strftime('%Y%m%d_%H%M%S'),
uid,
active_session.session,
encoded_recipe,
active_session.type)
active_session.filepath = brew_active_sessions_path().joinpath(filename)
current_app.logger.debug('ZSeries - session file created {}'.format(active_session.filepath))
if session_id not in events:
events[session_id] = []
active_session.file = open(active_session.filepath, 'w')
active_session.file.write('[')
active_session.file.flush()
ret = {
"Active": False,
"ClosingDate": None,
"CreationDate": active_session.created_at.isoformat(),
"Deleted": False,
"DurationSec": body['DurationSec'],
"FirmwareVersion": body['FirmwareVersion'],
"GroupSession": body['GroupSession'] or False, # Z2 or Z+PicoStill Session
"GUID": active_session.session,
"ID": active_session.id,
"LastLogID": active_session.id,
"MaxTemp": body['MaxTemp'],
"MaxTempAddedSec": body['MaxTempAddedSec'],
"Name": active_session.name,
"Notes": None,
"Pressure": body['PressurePa'], # related to an attached picostill
"ProfileID": 28341, # how to get the userId
"SecondsRemaining": 0,
"SessionLogs": [],
"SessionType": active_session.type,
"StillUID": still_uid,
"StillVer": body.get('StillVer'),
"ZProgramId": body['ZProgramId'],
"ZSeriesID": uid
}
if 'ProgramParams' in body:
ret.update({
"ProgramParams": body['ProgramParams']
})
if 'RecipeID' in body:
ret.update({
"RecipeGuid": None,
"RecipeID": body['RecipeID']
})
return ret
def update_session_log(token, body):
session_id = body['ZSessionID']
active_session = active_brew_sessions[token]
if active_session.id == -1:
# update reference to corrupted active_session
# upon file load with -1 (assume this is the right session to log with)
active_session.id = session_id
elif active_session.id != session_id: # session_id is hex string; session.id is number
current_app.logger.warn('WARN: ZSeries reported session_id not active session')
error = {
'error': 'matching server log identifier {} does not match requested session_id {}'.format(active_session.id, session_id)
}
return Response(json.dumps(error), status=400, mimetype='application/json')
if active_session not in events:
events[active_session] = []
if active_session.recovery != body['StepName']:
events[active_session].append(body['StepName'])
active_session.step = body['StepName']
log_time = datetime.utcnow()
session_data = {
'time': ((log_time - datetime(1970, 1, 1)).total_seconds() * 1000),
'timeStr': log_time.isoformat(),
'timeLeft': body['SecondsRemaining'],
'step': body['StepName'],
# temperatures from Z are in celsius vs prior device series
'target': convert_temp(body['TargetTemp'], 'F'),
'ambient': convert_temp(body['AmbientTemp'], 'F'),
'drain': convert_temp(body['DrainTemp'], 'F'),
'wort': convert_temp(body['WortTemp'], 'F'),
'therm': convert_temp(body['ThermoBlockTemp'], 'F'),
'recovery': body['StepName'],
'position': body['ValvePosition']
}
event = None
if active_session in events and len(events[active_session]) > 0:
if len(events[active_session]) > 1:
current_app.logger.debug('DEBUG: ZSeries events > 1 - size = {}'.format(len(events[active_session])))
event = events[active_session].pop(0)
session_data.update({'event': event})
active_session.data.append(session_data)
active_session.recovery = body['StepName']
active_session.remaining_time = body['SecondsRemaining']
# for Z graphs we have more data available: wort, hex/therm, target, drain, ambient
graph_update = json.dumps({'time': session_data['time'],
'data': [session_data['target'], session_data['wort'], session_data['therm'], session_data['drain'], session_data['ambient']],
'session': active_session.name,
'step': active_session.step,
'event': event,
})
socketio.emit('brew_session_update|{}'.format(token), graph_update)
active_session.file.write('\n\t{},'.format(json.dumps(session_data)))
active_session.file.flush()
ret = {
"ID": randint(0, 10000),
"LogDate": session_data['timeStr'],
}
ret.update(body)
return ret
def close_session(uid, session_id, body):
active_session = active_brew_sessions[uid]
ret = {
"Active": False,
"ClosingDate": datetime.utcnow().isoformat(),
"CreationDate": active_session.created_at.isoformat(),
"Deleted": False,
"DurationSec": body['DurationSec'],
"FirmwareVersion": body['FirmwareVersion'],
"GUID": active_session.session,
"ID": active_session.id,
"LastLogID": active_session.id,
"MaxTemp": body['MaxTemp'],
"MaxTempAddedSec": body['MaxTempAddedSec'],
"Name": active_session.name,
"Notes": None,
"Pressure": 0, # is this related to an attached picostill?
"ProfileID": 28341, # how to get the userId
"SecondsRemaining": 0,
"SessionLogs": [],
"SessionType": body['SessionType'],
"StillUID": body.get('StillUID'),
"StillVer": body.get('StillVer'),
"ZProgramId": body['ZProgramId'],
"ZSeriesID": uid
}
if 'ProgramParams' in body:
ret.update({
"ProgramParams": body['ProgramParams']
})
if 'RecipeID' in body:
ret.update({
"RecipeGuid": None,
"RecipeID": body['RecipeID']
})
active_session.file.seek(0, os.SEEK_END)
active_session.file.seek(active_session.file.tell() - 1, os.SEEK_SET) # Remove trailing , from last data set
active_session.file.write('\n]\n')
active_session.cleanup()
return ret
# GET /Vendors/input.cshtml?type=ResumableSession&token=<token>&id=<session_id> HTTP/1.1
def process_recover_session(token, session_id):
# TODO can one recover a RINSE / CLEAN or otherwise non-BEER or COFFEE session?
uid = get_machine_by_session(session_id)
if uid is None:
error = {
'error': 'session_id {} not found to be active - unable to resume session'.format(session_id)
}
return Response(json.dumps(error), status=400, mimetype='application/json')
active_session = active_brew_sessions[uid]
if active_session.id == session_id: # session_id is hex string; session.id is number
recipe = get_recipe_by_name(active_session.name)
current_step = active_session.recovery
remaining_time = active_session.remaining_time
steps = []
step_found = False
for s in recipe.steps:
if (s.name == current_step):
step_found = True
if (step_found):
steps.append(s)
if (not step_found or len(steps) == 0):
current_app.logger.warn("most recently logged step not found in linked recipe steps")
error = {
'error': 'active brew session\'s most recently logged step not found in linked recipe'
}
return Response(json.dumps(error), status=400, mimetype='application/json')
if (len(steps) >= 1):
current_app.logger.debug("ZSeries step_count={}, active_step={}, time_remaining={}".format(len(steps), current_step, remaining_time))
# modify runtime of the first step (most recently active)
steps[0].step_time = remaining_time
recipe.steps = steps
ret = {
"Recipe": json.loads(recipe.serialize()),
"SessionID": active_session.id,
"SessionType": active_session.type,
"ZPicoRecipe": None # does this identity the Z pak recipe?
}
return ret
else:
error = {
'error': 'matching server log identifier {} does not match requested session_id {}'.format(active_session.id, session_id)
}
return Response(json.dumps(error), status=400, mimetype='application/json')
# -------- Utility --------
def get_zseries_recipe_list():
recipe_list = []
for r in get_zseries_recipes():
recipe_list.append(r)
return recipe_list
def get_zseries_recipe_metadata_list():
recipe_metadata = []
for r in get_zseries_recipes():
meta = {
"ID": r.id,
"Name": r.name,
"Kind": r.kind_code,
"Uri": None,
"Abv": -1,
"Ibu": -1
}
recipe_metadata.append(meta)
return recipe_metadata
def get_recipe_by_id(recipe_id):
recipe = next((r for r in get_zseries_recipes() if str(r.id) == str(recipe_id)), None)
return recipe
def get_recipe_by_name(recipe_name):
recipe = next((r for r in get_zseries_recipes() if r.name == recipe_name), None)
return recipe
def increment_session_id(uid):
return len(list_brew_session_files(uid)) + (1 if active_brew_sessions[uid].session != '' else 1)
def get_machine_by_session(session_id):
return next((uid for uid in active_brew_sessions if active_brew_sessions[uid].session == session_id or active_brew_sessions[uid].id == int(session_id) or active_brew_sessions[uid].id == -1), None)
def get_archived_sessions_by_machine(uid):
brew_sessions = load_brew_sessions(uid=uid)
return brew_sessions
def session_type_from_filename(filename):
info = filename.stem.split('#')
session_type = SessionType.BEER
try:
if len(info) > 4:
session_type = int(info[4])
except Exception as error:
current_app.logger.warn("error occurred reading {}".format(filename),)
return session_type
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(hinoka): Use logging.
import cStringIO
import codecs
import copy
import ctypes
import json
import optparse
import os
import pprint
import random
import re
import subprocess
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import uuid
import os.path as path
# How many bytes at a time to read from pipes.
BUF_SIZE = 256
# Define a bunch of directory paths.
# Relative to this script's filesystem path.
THIS_DIR = path.dirname(path.abspath(__file__))
DEPOT_TOOLS_DIR = path.abspath(path.join(THIS_DIR, '..', '..', '..', '..'))
CHROMIUM_GIT_HOST = 'https://chromium.googlesource.com'
CHROMIUM_SRC_URL = CHROMIUM_GIT_HOST + '/chromium/src.git'
BRANCH_HEADS_REFSPEC = '+refs/branch-heads/*'
TAGS_REFSPEC = '+refs/tags/*'
# Regular expression that matches a single commit footer line.
COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s*(.*)')
# Footer metadata keys for regular and gsubtreed mirrored commit positions.
COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
COMMIT_ORIGINAL_POSITION_FOOTER_KEY = 'Cr-Original-Commit-Position'
# Regular expression to parse gclient's revinfo entries.
REVINFO_RE = re.compile(r'^([^:]+):\s+([^@]+)@(.+)$')
# Copied from scripts/recipes/chromium.py.
GOT_REVISION_MAPPINGS = {
CHROMIUM_SRC_URL: {
'got_revision': 'src/',
'got_nacl_revision': 'src/native_client/',
'got_swarm_client_revision': 'src/tools/swarm_client/',
'got_swarming_client_revision': 'src/tools/swarming_client/',
'got_v8_revision': 'src/v8/',
'got_webkit_revision': 'src/third_party/WebKit/',
'got_webrtc_revision': 'src/third_party/webrtc/',
}
}
GCLIENT_TEMPLATE = """solutions = %(solutions)s
cache_dir = r%(cache_dir)s
%(target_os)s
%(target_os_only)s
"""
# How many times to try before giving up.
ATTEMPTS = 5
GIT_CACHE_PATH = path.join(DEPOT_TOOLS_DIR, 'git_cache.py')
GCLIENT_PATH = path.join(DEPOT_TOOLS_DIR, 'gclient.py')
# If there is less than 100GB of disk space on the system, then we do
# a shallow checkout.
SHALLOW_CLONE_THRESHOLD = 100 * 1024 * 1024 * 1024
class SubprocessFailed(Exception):
def __init__(self, message, code, output):
Exception.__init__(self, message)
self.code = code
self.output = output
class PatchFailed(SubprocessFailed):
pass
class GclientSyncFailed(SubprocessFailed):
pass
class InvalidDiff(Exception):
pass
RETRY = object()
OK = object()
FAIL = object()
class PsPrinter(object):
def __init__(self, interval=300):
self.interval = interval
self.active = sys.platform.startswith('linux2')
self.thread = None
@staticmethod
def print_pstree():
"""Debugging function used to print "ps auxwwf" for stuck processes."""
subprocess.call(['ps', 'auxwwf'])
def poke(self):
if self.active:
self.cancel()
self.thread = threading.Timer(self.interval, self.print_pstree)
self.thread.start()
def cancel(self):
if self.active and self.thread is not None:
self.thread.cancel()
self.thread = None
def call(*args, **kwargs): # pragma: no cover
"""Interactive subprocess call."""
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
kwargs.setdefault('bufsize', BUF_SIZE)
cwd = kwargs.get('cwd', os.getcwd())
stdin_data = kwargs.pop('stdin_data', None)
if stdin_data:
kwargs['stdin'] = subprocess.PIPE
out = cStringIO.StringIO()
new_env = kwargs.get('env', {})
env = copy.copy(os.environ)
env.update(new_env)
kwargs['env'] = env
if new_env:
print '===Injecting Environment Variables==='
for k, v in sorted(new_env.items()):
print '%s: %s' % (k, v)
print '===Running %s ===' % (' '.join(args),)
print 'In directory: %s' % cwd
start_time = time.time()
proc = subprocess.Popen(args, **kwargs)
if stdin_data:
proc.stdin.write(stdin_data)
proc.stdin.close()
psprinter = PsPrinter()
# This is here because passing 'sys.stdout' into stdout for proc will
# produce out of order output.
hanging_cr = False
while True:
psprinter.poke()
buf = proc.stdout.read(BUF_SIZE)
if not buf:
break
if hanging_cr:
buf = '\r' + buf
hanging_cr = buf.endswith('\r')
if hanging_cr:
buf = buf[:-1]
buf = buf.replace('\r\n', '\n').replace('\r', '\n')
sys.stdout.write(buf)
out.write(buf)
if hanging_cr:
sys.stdout.write('\n')
out.write('\n')
psprinter.cancel()
code = proc.wait()
elapsed_time = ((time.time() - start_time) / 60.0)
outval = out.getvalue()
if code:
print '===Failed in %.1f mins of %s ===' % (elapsed_time, ' '.join(args))
print
raise SubprocessFailed('%s failed with code %d in %s.' %
(' '.join(args), code, cwd),
code, outval)
print '===Succeeded in %.1f mins of %s ===' % (elapsed_time, ' '.join(args))
print
return outval
def git(*args, **kwargs): # pragma: no cover
"""Wrapper around call specifically for Git commands."""
if args and args[0] == 'cache':
# Rewrite "git cache" calls into "python git_cache.py".
cmd = (sys.executable, '-u', GIT_CACHE_PATH) + args[1:]
else:
git_executable = 'git'
# On windows, subprocess doesn't fuzzy-match 'git' to 'git.bat', so we
# have to do it explicitly. This is better than passing shell=True.
if sys.platform.startswith('win'):
git_executable += '.bat'
cmd = (git_executable,) + args
return call(*cmd, **kwargs)
def get_gclient_spec(solutions, target_os, target_os_only, git_cache_dir):
return GCLIENT_TEMPLATE % {
'solutions': pprint.pformat(solutions, indent=4),
'cache_dir': '"%s"' % git_cache_dir,
'target_os': ('\ntarget_os=%s' % target_os) if target_os else '',
'target_os_only': '\ntarget_os_only=%s' % target_os_only
}
def solutions_printer(solutions):
"""Prints gclient solution to stdout."""
print 'Gclient Solutions'
print '================='
for solution in solutions:
name = solution.get('name')
url = solution.get('url')
print '%s (%s)' % (name, url)
if solution.get('deps_file'):
print ' Dependencies file is %s' % solution['deps_file']
if 'managed' in solution:
print ' Managed mode is %s' % ('ON' if solution['managed'] else 'OFF')
custom_vars = solution.get('custom_vars')
if custom_vars:
print ' Custom Variables:'
for var_name, var_value in sorted(custom_vars.iteritems()):
print ' %s = %s' % (var_name, var_value)
custom_deps = solution.get('custom_deps')
if 'custom_deps' in solution:
print ' Custom Dependencies:'
for deps_name, deps_value in sorted(custom_deps.iteritems()):
if deps_value:
print ' %s -> %s' % (deps_name, deps_value)
else:
print ' %s: Ignore' % deps_name
for k, v in solution.iteritems():
# Print out all the keys we don't know about.
if k in ['name', 'url', 'deps_file', 'custom_vars', 'custom_deps',
'managed']:
continue
print ' %s is %s' % (k, v)
print
def modify_solutions(input_solutions):
"""Modifies urls in solutions to point at Git repos.
returns: new solution dictionary
"""
assert input_solutions
solutions = copy.deepcopy(input_solutions)
for solution in solutions:
original_url = solution['url']
parsed_url = urlparse.urlparse(original_url)
parsed_path = parsed_url.path
solution['managed'] = False
# We don't want gclient to be using a safesync URL. Instead it should
# using the lkgr/lkcr branch/tags.
if 'safesync_url' in solution:
print 'Removing safesync url %s from %s' % (solution['safesync_url'],
parsed_path)
del solution['safesync_url']
return solutions
def remove(target, cleanup_dir):
"""Remove a target by moving it into cleanup_dir."""
if not path.exists(cleanup_dir):
os.makedirs(cleanup_dir)
dest = path.join(cleanup_dir, '%s_%s' % (
path.basename(target), uuid.uuid4().hex))
print 'Marking for removal %s => %s' % (target, dest)
try:
os.rename(target, dest)
except Exception as e:
print 'Error renaming %s to %s: %s' % (target, dest, str(e))
raise
def ensure_no_checkout(dir_names, cleanup_dir):
"""Ensure that there is no undesired checkout under build/."""
build_dir = os.getcwd()
has_checkout = any(path.exists(path.join(build_dir, dir_name, '.git'))
for dir_name in dir_names)
if has_checkout:
for filename in os.listdir(build_dir):
deletion_target = path.join(build_dir, filename)
print '.git detected in checkout, deleting %s...' % deletion_target,
remove(deletion_target, cleanup_dir)
print 'done'
def call_gclient(*args, **kwargs):
"""Run the "gclient.py" tool with the supplied arguments.
Args:
args: command-line arguments to pass to gclient.
kwargs: keyword arguments to pass to call.
"""
cmd = [sys.executable, '-u', GCLIENT_PATH]
cmd.extend(args)
return call(*cmd, **kwargs)
def gclient_configure(solutions, target_os, target_os_only, git_cache_dir):
"""Should do the same thing as gclient --spec='...'."""
with codecs.open('.gclient', mode='w', encoding='utf-8') as f:
f.write(get_gclient_spec(
solutions, target_os, target_os_only, git_cache_dir))
def gclient_sync(
with_branch_heads, with_tags, shallow, revisions, break_repo_locks,
disable_syntax_validation):
# We just need to allocate a filename.
fd, gclient_output_file = tempfile.mkstemp(suffix='.json')
os.close(fd)
args = ['sync', '--verbose', '--reset', '--force',
'--ignore_locks', '--output-json', gclient_output_file,
'--nohooks', '--noprehooks', '--delete_unversioned_trees']
if with_branch_heads:
args += ['--with_branch_heads']
if with_tags:
args += ['--with_tags']
if shallow:
args += ['--shallow']
if break_repo_locks:
args += ['--break_repo_locks']
if disable_syntax_validation:
args += ['--disable-syntax-validation']
for name, revision in sorted(revisions.iteritems()):
if revision.upper() == 'HEAD':
revision = 'origin/master'
args.extend(['--revision', '%s@%s' % (name, revision)])
try:
call_gclient(*args)
except SubprocessFailed as e:
# Throw a GclientSyncFailed exception so we can catch this independently.
raise GclientSyncFailed(e.message, e.code, e.output)
else:
with open(gclient_output_file) as f:
return json.load(f)
finally:
os.remove(gclient_output_file)
def gclient_revinfo():
return call_gclient('revinfo', '-a') or ''
def normalize_git_url(url):
"""Normalize a git url to be consistent.
This recognizes urls to the googlesoruce.com domain. It ensures that
the url:
* Do not end in .git
* Do not contain /a/ in their path.
"""
try:
p = urlparse.urlparse(url)
except Exception:
# Not a url, just return it back.
return url
if not p.netloc.endswith('.googlesource.com'):
# Not a googlesource.com URL, can't normalize this, just return as is.
return url
upath = p.path
if upath.startswith('/a'):
upath = upath[len('/a'):]
if upath.endswith('.git'):
upath = upath[:-len('.git')]
return 'https://%s%s' % (p.netloc, upath)
# TODO(hinoka): Remove this once all downstream recipes stop using this format.
def create_manifest_old():
manifest = {}
output = gclient_revinfo()
for line in output.strip().splitlines():
match = REVINFO_RE.match(line.strip())
if match:
manifest[match.group(1)] = {
'repository': match.group(2),
'revision': match.group(3),
}
else:
print "WARNING: Couldn't match revinfo line:\n%s" % line
return manifest
# TODO(hinoka): Include patch revision.
def create_manifest(gclient_output, patch_root, gerrit_ref):
"""Return the JSONPB equivilent of the source manifest proto.
The source manifest proto is defined here:
https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_engine/source_manifest.proto
This is based off of:
* The gclient_output (from calling gclient.py --output-json) which contains
the directory -> repo:revision mapping.
* Gerrit Patch info which contains info about patched revisions.
We normalize the URLs such that if they are googlesource.com urls, they:
"""
manifest = {
'version': 0, # Currently the only valid version is 0.
}
dirs = {}
if patch_root:
patch_root = patch_root.strip('/') # Normalize directory names.
for directory, info in gclient_output.get('solutions', {}).iteritems():
directory = directory.strip('/') # Normalize the directory name.
# There are two places to the the revision from, we do it in this order:
# 1. In the "revision" field
# 2. At the end of the URL, after @
repo = ''
revision = info.get('revision', '')
# The format of the url is "https://repo.url/blah.git@abcdefabcdef" or
# just "https://repo.url/blah.git"
url_split = info.get('url', '').split('@')
if not revision and len(url_split) == 2:
revision = url_split[1]
if url_split:
repo = normalize_git_url(url_split[0])
if repo:
dirs[directory] = {
'git_checkout': {
'repo_url': repo,
'revision': revision,
}
}
if patch_root == directory:
dirs[directory]['git_checkout']['patch_fetch_ref'] = gerrit_ref
manifest['directories'] = dirs
return manifest
def get_commit_message_footer_map(message):
"""Returns: (dict) A dictionary of commit message footer entries.
"""
footers = {}
# Extract the lines in the footer block.
lines = []
for line in message.strip().splitlines():
line = line.strip()
if len(line) == 0:
del lines[:]
continue
lines.append(line)
# Parse the footer
for line in lines:
m = COMMIT_FOOTER_ENTRY_RE.match(line)
if not m:
# If any single line isn't valid, continue anyway for compatibility with
# Gerrit (which itself uses JGit for this).
continue
footers[m.group(1)] = m.group(2).strip()
return footers
def get_commit_message_footer(message, key):
"""Returns: (str/None) The footer value for 'key', or None if none was found.
"""
return get_commit_message_footer_map(message).get(key)
# Derived from:
# http://code.activestate.com/recipes/577972-disk-usage/?in=user-4178764
def get_total_disk_space():
cwd = os.getcwd()
# Windows is the only platform that doesn't support os.statvfs, so
# we need to special case this.
if sys.platform.startswith('win'):
_, total, free = (ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong())
if sys.version_info >= (3,) or isinstance(cwd, unicode):
fn = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fn = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fn(cwd, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
# WinError() will fetch the last error code.
raise ctypes.WinError()
return (total.value, free.value)
else:
st = os.statvfs(cwd)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
return (total, free)
def _get_target_branch_and_revision(solution_name, git_url, revisions):
normalized_name = solution_name.strip('/')
if normalized_name in revisions:
configured = revisions[normalized_name]
elif git_url in revisions:
configured = revisions[git_url]
else:
return 'master', 'HEAD'
parts = configured.split(':', 1)
if len(parts) == 2:
# Support for "branch:revision" syntax.
return parts
return 'master', configured
def force_solution_revision(solution_name, git_url, revisions, cwd):
branch, revision = _get_target_branch_and_revision(
solution_name, git_url, revisions)
if revision and revision.upper() != 'HEAD':
treeish = revision
else:
# TODO(machenbach): This won't work with branch-heads, as Gerrit's
# destination branch would be e.g. refs/branch-heads/123. But here
# we need to pass refs/remotes/branch-heads/123 to check out.
# This will also not work if somebody passes a local refspec like
# refs/heads/master. It needs to translate to refs/remotes/origin/master
# first. See also https://crbug.com/740456 .
treeish = branch if branch.startswith('refs/') else 'origin/%s' % branch
# Note that -- argument is necessary to ensure that git treats `treeish`
# argument as revision or ref, and not as a file/directory which happens to
# have the exact same name.
git('checkout', '--force', treeish, '--', cwd=cwd)
def is_broken_repo_dir(repo_dir):
# Treat absence of 'config' as a signal of a partially deleted repo.
return not path.exists(os.path.join(repo_dir, '.git', 'config'))
def _maybe_break_locks(checkout_path, tries=3):
"""This removes all .lock files from this repo's .git directory.
In particular, this will cleanup index.lock files, as well as ref lock
files.
"""
def attempt():
git_dir = os.path.join(checkout_path, '.git')
for dirpath, _, filenames in os.walk(git_dir):
for filename in filenames:
if filename.endswith('.lock'):
to_break = os.path.join(dirpath, filename)
print 'breaking lock: %s' % to_break
try:
os.remove(to_break)
except OSError as ex:
print 'FAILED to break lock: %s: %s' % (to_break, ex)
raise
for _ in xrange(tries):
try:
attempt()
return
except Exception:
pass
def git_checkouts(solutions, revisions, shallow, refs, git_cache_dir,
cleanup_dir):
build_dir = os.getcwd()
first_solution = True
for sln in solutions:
sln_dir = path.join(build_dir, sln['name'])
_git_checkout(sln, sln_dir, revisions, shallow, refs, git_cache_dir,
cleanup_dir)
if first_solution:
git_ref = git('log', '--format=%H', '--max-count=1',
cwd=path.join(build_dir, sln['name'])
).strip()
first_solution = False
return git_ref
def _git_checkout(sln, sln_dir, revisions, shallow, refs, git_cache_dir,
cleanup_dir):
name = sln['name']
url = sln['url']
if url == CHROMIUM_SRC_URL or url + '.git' == CHROMIUM_SRC_URL:
# Experiments show there's little to be gained from
# a shallow clone of src.
shallow = False
s = ['--shallow'] if shallow else []
populate_cmd = (['cache', 'populate', '--ignore_locks', '-v',
'--cache-dir', git_cache_dir] + s + [url])
for ref in refs:
populate_cmd.extend(['--ref', ref])
# Just in case we're hitting a different git server than the one from
# which the target revision was polled, we retry some.
# One minute (5 tries with exp. backoff). We retry at least once regardless
# of deadline in case initial fetch takes longer than the deadline but does
# not contain the required revision.
deadline = time.time() + 60
tries = 0
while True:
git(*populate_cmd)
mirror_dir = git(
'cache', 'exists', '--quiet',
'--cache-dir', git_cache_dir, url).strip()
try:
# If repo deletion was aborted midway, it may have left .git in broken
# state.
if path.exists(sln_dir) and is_broken_repo_dir(sln_dir):
print 'Git repo %s appears to be broken, removing it' % sln_dir
remove(sln_dir, cleanup_dir)
# Use "tries=1", since we retry manually in this loop.
if not path.isdir(sln_dir):
git('clone', '--no-checkout', '--local', '--shared', mirror_dir,
sln_dir)
else:
git('remote', 'set-url', 'origin', mirror_dir, cwd=sln_dir)
git('fetch', 'origin', cwd=sln_dir)
for ref in refs:
refspec = '%s:%s' % (ref, ref.lstrip('+'))
git('fetch', 'origin', refspec, cwd=sln_dir)
# Windows sometimes has trouble deleting files.
# This can make git commands that rely on locks fail.
# Try a few times in case Windows has trouble again (and again).
if sys.platform.startswith('win'):
_maybe_break_locks(sln_dir, tries=3)
force_solution_revision(name, url, revisions, sln_dir)
git('clean', '-dff', cwd=sln_dir)
return
except SubprocessFailed as e:
# Exited abnormally, theres probably something wrong.
print 'Something failed: %s.' % str(e)
# Only kick in deadline after trying once, in case the revision hasn't
# yet propagated.
if tries >= 1 and time.time() > deadline:
overrun = time.time() - deadline
print 'Ran %s seconds past deadline. Aborting.' % overrun
raise
# Lets wipe the checkout and try again.
tries += 1
sleep_secs = 2**tries
print 'waiting %s seconds and trying again...' % sleep_secs
time.sleep(sleep_secs)
remove(sln_dir, cleanup_dir)
def _download(url):
"""Fetch url and return content, with retries for flake."""
for attempt in xrange(ATTEMPTS):
try:
return urllib2.urlopen(url).read()
except Exception:
if attempt == ATTEMPTS - 1:
raise
def apply_rietveld_issue(issue, patchset, root, server, _rev_map, _revision,
email_file, key_file, oauth2_file,
whitelist=None, blacklist=None):
apply_issue_bin = ('apply_issue.bat' if sys.platform.startswith('win')
else 'apply_issue')
cmd = [apply_issue_bin,
# The patch will be applied on top of this directory.
'--root_dir', root,
# Tell apply_issue how to fetch the patch.
'--issue', issue,
'--server', server,
# Always run apply_issue.py, otherwise it would see update.flag
# and then bail out.
'--force',
# Don't run gclient sync when it sees a DEPS change.
'--ignore_deps',
]
# Use an oauth key or json file if specified.
if oauth2_file:
cmd.extend(['--auth-refresh-token-json', oauth2_file])
elif email_file and key_file:
cmd.extend(['--email-file', email_file, '--private-key-file', key_file])
else:
cmd.append('--no-auth')
if patchset:
cmd.extend(['--patchset', patchset])
if whitelist:
for item in whitelist:
cmd.extend(['--whitelist', item])
elif blacklist:
for item in blacklist:
cmd.extend(['--blacklist', item])
# Only try once, since subsequent failures hide the real failure.
try:
call(*cmd)
except SubprocessFailed as e:
raise PatchFailed(e.message, e.code, e.output)
def apply_gerrit_ref(gerrit_repo, gerrit_ref, root, gerrit_reset,
gerrit_rebase_patch_ref):
gerrit_repo = gerrit_repo or 'origin'
assert gerrit_ref
base_rev = git('rev-parse', 'HEAD', cwd=root).strip()
print '===Applying gerrit ref==='
print 'Repo is %r @ %r, ref is %r, root is %r' % (
gerrit_repo, base_rev, gerrit_ref, root)
# TODO(tandrii): move the fix below to common rietveld/gerrit codepath.
# Speculative fix: prior bot_update run with Rietveld patch may leave git
# index with unmerged paths. bot_update calls 'checkout --force xyz' thus
# ignoring such paths, but potentially never cleaning them up. The following
# command will do so. See http://crbug.com/692067.
git('reset', '--hard', cwd=root)
try:
git('fetch', gerrit_repo, gerrit_ref, cwd=root)
git('checkout', 'FETCH_HEAD', cwd=root)
if gerrit_rebase_patch_ref:
print '===Rebasing==='
# git rebase requires a branch to operate on.
temp_branch_name = 'tmp/' + uuid.uuid4().hex
try:
ok = False
git('checkout', '-b', temp_branch_name, cwd=root)
try:
git('-c', 'user.name=chrome-bot',
'-c', 'user.email=chrome-bot@chromium.org',
'rebase', base_rev, cwd=root)
except SubprocessFailed:
# Abort the rebase since there were failures.
git('rebase', '--abort', cwd=root)
raise
# Get off of the temporary branch since it can't be deleted otherwise.
cur_rev = git('rev-parse', 'HEAD', cwd=root).strip()
git('checkout', cur_rev, cwd=root)
git('branch', '-D', temp_branch_name, cwd=root)
ok = True
finally:
if not ok:
# Get off of the temporary branch since it can't be deleted otherwise.
git('checkout', base_rev, cwd=root)
git('branch', '-D', temp_branch_name, cwd=root)
if gerrit_reset:
git('reset', '--soft', base_rev, cwd=root)
except SubprocessFailed as e:
raise PatchFailed(e.message, e.code, e.output)
def get_commit_position(git_path, revision='HEAD'):
"""Dumps the 'git' log for a specific revision and parses out the commit
position.
If a commit position metadata key is found, its value will be returned.
"""
# TODO(iannucci): Use git-footers for this.
git_log = git('log', '--format=%B', '-n1', revision, cwd=git_path)
footer_map = get_commit_message_footer_map(git_log)
# Search for commit position metadata
value = (footer_map.get(COMMIT_POSITION_FOOTER_KEY) or
footer_map.get(COMMIT_ORIGINAL_POSITION_FOOTER_KEY))
if value:
return value
return None
def parse_got_revision(gclient_output, got_revision_mapping):
"""Translate git gclient revision mapping to build properties."""
properties = {}
solutions_output = {
# Make sure path always ends with a single slash.
'%s/' % path.rstrip('/') : solution_output for path, solution_output
in gclient_output['solutions'].iteritems()
}
for property_name, dir_name in got_revision_mapping.iteritems():
# Make sure dir_name always ends with a single slash.
dir_name = '%s/' % dir_name.rstrip('/')
if dir_name not in solutions_output:
continue
solution_output = solutions_output[dir_name]
if solution_output.get('scm') is None:
# This is an ignored DEPS, so the output got_revision should be 'None'.
revision = commit_position = None
else:
# Since we are using .DEPS.git, everything had better be git.
assert solution_output.get('scm') == 'git'
revision = git('rev-parse', 'HEAD', cwd=dir_name).strip()
commit_position = get_commit_position(dir_name)
properties[property_name] = revision
if commit_position:
properties['%s_cp' % property_name] = commit_position
return properties
def emit_json(out_file, did_run, gclient_output=None, **kwargs):
"""Write run information into a JSON file."""
output = {}
output.update(gclient_output if gclient_output else {})
output.update({'did_run': did_run})
output.update(kwargs)
with open(out_file, 'wb') as f:
f.write(json.dumps(output, sort_keys=True))
def ensure_checkout(solutions, revisions, first_sln, target_os, target_os_only,
patch_root, issue, patchset, rietveld_server, gerrit_repo,
gerrit_ref, gerrit_rebase_patch_ref, revision_mapping,
apply_issue_email_file, apply_issue_key_file,
apply_issue_oauth2_file, shallow, refs, git_cache_dir,
cleanup_dir, gerrit_reset, disable_syntax_validation):
# Get a checkout of each solution, without DEPS or hooks.
# Calling git directly because there is no way to run Gclient without
# invoking DEPS.
print 'Fetching Git checkout'
git_ref = git_checkouts(solutions, revisions, shallow, refs, git_cache_dir,
cleanup_dir)
print '===Processing patch solutions==='
already_patched = []
patch_root = patch_root or ''
applied_gerrit_patch = False
print 'Patch root is %r' % patch_root
for solution in solutions:
print 'Processing solution %r' % solution['name']
if (patch_root == solution['name'] or
solution['name'].startswith(patch_root + '/')):
relative_root = solution['name'][len(patch_root) + 1:]
target = '/'.join([relative_root, 'DEPS']).lstrip('/')
print ' relative root is %r, target is %r' % (relative_root, target)
if issue:
apply_rietveld_issue(issue, patchset, patch_root, rietveld_server,
revision_mapping, git_ref, apply_issue_email_file,
apply_issue_key_file, apply_issue_oauth2_file,
whitelist=[target])
already_patched.append(target)
elif gerrit_ref:
apply_gerrit_ref(gerrit_repo, gerrit_ref, patch_root, gerrit_reset,
gerrit_rebase_patch_ref)
applied_gerrit_patch = True
# Ensure our build/ directory is set up with the correct .gclient file.
gclient_configure(solutions, target_os, target_os_only, git_cache_dir)
# Windows sometimes has trouble deleting files. This can make git commands
# that rely on locks fail.
break_repo_locks = True if sys.platform.startswith('win') else False
# We want to pass all non-solution revisions into the gclient sync call.
solution_dirs = {sln['name'] for sln in solutions}
gc_revisions = {
dirname: rev for dirname, rev in revisions.iteritems()
if dirname not in solution_dirs}
# Gclient sometimes ignores "unmanaged": "False" in the gclient solution
# if --revision <anything> is passed (for example, for subrepos).
# This forces gclient to always treat solutions deps as unmanaged.
for solution_name in list(solution_dirs):
gc_revisions[solution_name] = 'unmanaged'
# Let gclient do the DEPS syncing.
# The branch-head refspec is a special case because its possible Chrome
# src, which contains the branch-head refspecs, is DEPSed in.
gclient_output = gclient_sync(
BRANCH_HEADS_REFSPEC in refs,
TAGS_REFSPEC in refs,
shallow,
gc_revisions,
break_repo_locks,
disable_syntax_validation)
# Now that gclient_sync has finished, we should revert any .DEPS.git so that
# presubmit doesn't complain about it being modified.
if git('ls-files', '.DEPS.git', cwd=first_sln).strip():
git('checkout', 'HEAD', '--', '.DEPS.git', cwd=first_sln)
# Apply the rest of the patch here (sans DEPS)
if issue:
apply_rietveld_issue(issue, patchset, patch_root, rietveld_server,
revision_mapping, git_ref, apply_issue_email_file,
apply_issue_key_file, apply_issue_oauth2_file,
blacklist=already_patched)
elif gerrit_ref and not applied_gerrit_patch:
# If gerrit_ref was for solution's main repository, it has already been
# applied above. This chunk is executed only for patches to DEPS-ed in
# git repositories.
apply_gerrit_ref(gerrit_repo, gerrit_ref, patch_root, gerrit_reset,
gerrit_rebase_patch_ref)
# Reset the deps_file point in the solutions so that hooks get run properly.
for sln in solutions:
sln['deps_file'] = sln.get('deps_file', 'DEPS').replace('.DEPS.git', 'DEPS')
gclient_configure(solutions, target_os, target_os_only, git_cache_dir)
return gclient_output
def parse_revisions(revisions, root):
"""Turn a list of revision specs into a nice dictionary.
We will always return a dict with {root: something}. By default if root
is unspecified, or if revisions is [], then revision will be assigned 'HEAD'
"""
results = {root.strip('/'): 'HEAD'}
expanded_revisions = []
for revision in revisions:
# Allow rev1,rev2,rev3 format.
# TODO(hinoka): Delete this when webkit switches to recipes.
expanded_revisions.extend(revision.split(','))
for revision in expanded_revisions:
split_revision = revision.split('@')
if len(split_revision) == 1:
# This is just a plain revision, set it as the revision for root.
results[root] = split_revision[0]
elif len(split_revision) == 2:
# This is an alt_root@revision argument.
current_root, current_rev = split_revision
parsed_root = urlparse.urlparse(current_root)
if parsed_root.scheme in ['http', 'https']:
# We want to normalize git urls into .git urls.
normalized_root = 'https://' + parsed_root.netloc + parsed_root.path
if not normalized_root.endswith('.git'):
normalized_root += '.git'
elif parsed_root.scheme:
print 'WARNING: Unrecognized scheme %s, ignoring' % parsed_root.scheme
continue
else:
# This is probably a local path.
normalized_root = current_root.strip('/')
results[normalized_root] = current_rev
else:
print ('WARNING: %r is not recognized as a valid revision specification,'
'skipping' % revision)
return results
def parse_args():
parse = optparse.OptionParser()
parse.add_option('--issue', help='Issue number to patch from.')
parse.add_option('--patchset',
help='Patchset from issue to patch from, if applicable.')
parse.add_option('--apply_issue_email_file',
help='--email-file option passthrough for apply_patch.py.')
parse.add_option('--apply_issue_key_file',
help='--private-key-file option passthrough for '
'apply_patch.py.')
parse.add_option('--apply_issue_oauth2_file',
help='--auth-refresh-token-json option passthrough for '
'apply_patch.py.')
parse.add_option('--root', dest='patch_root',
help='DEPRECATED: Use --patch_root.')
parse.add_option('--patch_root', help='Directory to patch on top of.')
parse.add_option('--rietveld_server',
default='codereview.chromium.org',
help='Rietveld server.')
parse.add_option('--gerrit_repo',
help='Gerrit repository to pull the ref from.')
parse.add_option('--gerrit_ref', help='Gerrit ref to apply.')
parse.add_option('--gerrit_no_rebase_patch_ref', action='store_true',
help='Bypass rebase of Gerrit patch ref after checkout.')
parse.add_option('--gerrit_no_reset', action='store_true',
help='Bypass calling reset after applying a gerrit ref.')
parse.add_option('--specs', help='Gcilent spec.')
parse.add_option('--spec-path', help='Path to a Gcilent spec file.')
parse.add_option('--revision_mapping_file',
help=('Path to a json file of the form '
'{"property_name": "path/to/repo/"}'))
parse.add_option('--revision', action='append', default=[],
help='Revision to check out. Can be any form of git ref. '
'Can prepend root@<rev> to specify which repository, '
'where root is either a filesystem path or git https '
'url. To specify Tip of Tree, set rev to HEAD. ')
# TODO(machenbach): Remove the flag when all uses have been removed.
parse.add_option('--output_manifest', action='store_true',
help=('Deprecated.'))
parse.add_option('--clobber', action='store_true',
help='Delete checkout first, always')
parse.add_option('--output_json',
help='Output JSON information into a specified file')
parse.add_option('--no_shallow', action='store_true',
help='Bypass disk detection and never shallow clone. '
'Does not override the --shallow flag')
parse.add_option('--refs', action='append',
help='Also fetch this refspec for the main solution(s). '
'Eg. +refs/branch-heads/*')
parse.add_option('--with_branch_heads', action='store_true',
help='Always pass --with_branch_heads to gclient. This '
'does the same thing as --refs +refs/branch-heads/*')
parse.add_option('--with_tags', action='store_true',
help='Always pass --with_tags to gclient. This '
'does the same thing as --refs +refs/tags/*')
parse.add_option('--git-cache-dir', help='Path to git cache directory.')
parse.add_option('--cleanup-dir',
help='Path to a cleanup directory that can be used for '
'deferred file cleanup.')
parse.add_option(
'--disable-syntax-validation', action='store_true',
help='Disable validation of .gclient and DEPS syntax.')
options, args = parse.parse_args()
if options.spec_path:
if options.specs:
parse.error('At most one of --spec-path and --specs may be specified.')
with open(options.spec_path, 'r') as fd:
options.specs = fd.read()
if not options.output_json:
parse.error('--output_json is required')
if not options.git_cache_dir:
parse.error('--git-cache-dir is required')
if not options.refs:
options.refs = []
if options.with_branch_heads:
options.refs.append(BRANCH_HEADS_REFSPEC)
del options.with_branch_heads
if options.with_tags:
options.refs.append(TAGS_REFSPEC)
del options.with_tags
try:
if not options.revision_mapping_file:
parse.error('--revision_mapping_file is required')
with open(options.revision_mapping_file, 'r') as f:
options.revision_mapping = json.load(f)
except Exception as e:
print (
'WARNING: Caught execption while parsing revision_mapping*: %s'
% (str(e),)
)
# Because we print CACHE_DIR out into a .gclient file, and then later run
# eval() on it, backslashes need to be escaped, otherwise "E:\b\build" gets
# parsed as "E:[\x08][\x08]uild".
if sys.platform.startswith('win'):
options.git_cache_dir = options.git_cache_dir.replace('\\', '\\\\')
return options, args
def prepare(options, git_slns, active):
"""Prepares the target folder before we checkout."""
dir_names = [sln.get('name') for sln in git_slns if 'name' in sln]
if options.clobber:
ensure_no_checkout(dir_names, options.cleanup_dir)
# Make sure we tell recipes that we didn't run if the script exits here.
emit_json(options.output_json, did_run=active)
# Do a shallow checkout if the disk is less than 100GB.
total_disk_space, free_disk_space = get_total_disk_space()
total_disk_space_gb = int(total_disk_space / (1024 * 1024 * 1024))
used_disk_space_gb = int((total_disk_space - free_disk_space)
/ (1024 * 1024 * 1024))
percent_used = int(used_disk_space_gb * 100 / total_disk_space_gb)
step_text = '[%dGB/%dGB used (%d%%)]' % (used_disk_space_gb,
total_disk_space_gb,
percent_used)
shallow = (total_disk_space < SHALLOW_CLONE_THRESHOLD
and not options.no_shallow)
# The first solution is where the primary DEPS file resides.
first_sln = dir_names[0]
# Split all the revision specifications into a nice dict.
print 'Revisions: %s' % options.revision
revisions = parse_revisions(options.revision, first_sln)
print 'Fetching Git checkout at %s@%s' % (first_sln, revisions[first_sln])
return revisions, step_text, shallow
def checkout(options, git_slns, specs, revisions, step_text, shallow):
print 'Using Python version: %s' % (sys.version,)
print 'Checking git version...'
ver = git('version').strip()
print 'Using %s' % ver
first_sln = git_slns[0]['name']
dir_names = [sln.get('name') for sln in git_slns if 'name' in sln]
try:
# Outer try is for catching patch failures and exiting gracefully.
# Inner try is for catching gclient failures and retrying gracefully.
try:
checkout_parameters = dict(
# First, pass in the base of what we want to check out.
solutions=git_slns,
revisions=revisions,
first_sln=first_sln,
# Also, target os variables for gclient.
target_os=specs.get('target_os', []),
target_os_only=specs.get('target_os_only', False),
# Then, pass in information about how to patch.
patch_root=options.patch_root,
issue=options.issue,
patchset=options.patchset,
rietveld_server=options.rietveld_server,
gerrit_repo=options.gerrit_repo,
gerrit_ref=options.gerrit_ref,
gerrit_rebase_patch_ref=not options.gerrit_no_rebase_patch_ref,
revision_mapping=options.revision_mapping,
apply_issue_email_file=options.apply_issue_email_file,
apply_issue_key_file=options.apply_issue_key_file,
apply_issue_oauth2_file=options.apply_issue_oauth2_file,
# Finally, extra configurations such as shallowness of the clone.
shallow=shallow,
refs=options.refs,
git_cache_dir=options.git_cache_dir,
cleanup_dir=options.cleanup_dir,
gerrit_reset=not options.gerrit_no_reset,
disable_syntax_validation=options.disable_syntax_validation)
gclient_output = ensure_checkout(**checkout_parameters)
except GclientSyncFailed:
print 'We failed gclient sync, lets delete the checkout and retry.'
ensure_no_checkout(dir_names, options.cleanup_dir)
gclient_output = ensure_checkout(**checkout_parameters)
except PatchFailed as e:
# Tell recipes information such as root, got_revision, etc.
emit_json(options.output_json,
did_run=True,
root=first_sln,
patch_apply_return_code=e.code,
patch_root=options.patch_root,
patch_failure=True,
failed_patch_body=e.output,
step_text='%s PATCH FAILED' % step_text,
fixed_revisions=revisions)
raise
# Take care of got_revisions outputs.
revision_mapping = GOT_REVISION_MAPPINGS.get(git_slns[0]['url'], {})
if options.revision_mapping:
revision_mapping.update(options.revision_mapping)
# If the repo is not in the default GOT_REVISION_MAPPINGS and no
# revision_mapping were specified on the command line then
# default to setting 'got_revision' based on the first solution.
if not revision_mapping:
revision_mapping['got_revision'] = first_sln
got_revisions = parse_got_revision(gclient_output, revision_mapping)
if not got_revisions:
# TODO(hinoka): We should probably bail out here, but in the interest
# of giving mis-configured bots some time to get fixed use a dummy
# revision here.
got_revisions = { 'got_revision': 'BOT_UPDATE_NO_REV_FOUND' }
#raise Exception('No got_revision(s) found in gclient output')
# Tell recipes information such as root, got_revision, etc.
emit_json(options.output_json,
did_run=True,
root=first_sln,
patch_root=options.patch_root,
step_text=step_text,
fixed_revisions=revisions,
properties=got_revisions,
manifest=create_manifest_old(),
source_manifest=create_manifest(
gclient_output, options.patch_root, options.gerrit_ref))
def print_debug_info():
print "Debugging info:"
debug_params = {
'CURRENT_DIR': path.abspath(os.getcwd()),
'THIS_DIR': THIS_DIR,
'DEPOT_TOOLS_DIR': DEPOT_TOOLS_DIR,
}
for k, v in sorted(debug_params.iteritems()):
print "%s: %r" % (k, v)
def main():
# Get inputs.
options, _ = parse_args()
# Check if this script should activate or not.
active = True
# Print a helpful message to tell developers whats going on with this step.
print_debug_info()
# Parse, manipulate, and print the gclient solutions.
specs = {}
exec(options.specs, specs)
orig_solutions = specs.get('solutions', [])
git_slns = modify_solutions(orig_solutions)
solutions_printer(git_slns)
# Creating hardlinks during a build can interact with git reset in
# unfortunate ways if git's index isn't refreshed beforehand. (See
# crbug.com/330461#c13 for an explanation.)
try:
call_gclient('recurse', '-v', 'git', 'update-index', '--refresh')
except SubprocessFailed:
# Failure here (and nowhere else) may have adverse effects on the
# compile time of the build but shouldn't affect its ability to
# successfully complete.
print 'WARNING: Failed to update git indices.'
try:
# Dun dun dun, the main part of bot_update.
revisions, step_text, shallow = prepare(options, git_slns, active)
checkout(options, git_slns, specs, revisions, step_text, shallow)
except PatchFailed as e:
# Return a specific non-zero exit code for patch failure (because it is
# a failure), but make it different than other failures to distinguish
# between infra failures (independent from patch author), and patch
# failures (that patch author can fix). However, PatchFailure due to
# download patch failure is still an infra problem.
if e.code == 3:
# Patch download problem.
return 87
# Genuine patch problem.
return 88
if __name__ == '__main__':
sys.exit(main())
|
from django.core.management.base import BaseCommand, CommandError
from results.models import ResultStage, ResultCheck
def count_result(model_arg):
if model_arg == "resultcheck":
result_count = ResultCheck.objects.all().count()
else:
result_count = ResultStage.objects.all().count()
return result_count
def delete_result(model_arg):
if model_arg == "resultcheck":
ResultCheck.objects.all().delete()
else:
ResultStage.objects.all().delete()
def count_delete_result(model_arg):
count = str(count_result(model_arg))
delete_result(model_arg)
message = '%s objects deleted:\t%s' % (model_arg, count)
print "\n" + message + "\n"
class Command(BaseCommand):
help = 'Delete all items in ResultStage or ResultCheck model.'
def add_arguments(self, parser):
## positional requred arguments
parser.add_argument('model',
# action='store_true',
# dest='model',
# default='result',
help='Specify the model (resultstage or resultcheck) you want to delete'
)
def handle(self, *args, **options):
model_arg = options['model']
count_delete_result(model_arg)
|
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
from pyecore.resources import URI
class BytesURI(URI):
def __init__(self, uri, text=None):
super(BytesURI, self).__init__(uri)
if text is not None:
self.__stream = BytesIO(text)
def getvalue(self):
return self.__stream.getvalue()
def create_instream(self):
return self.__stream
def create_outstream(self):
self.__stream = BytesIO()
return self.__stream
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
print 'please provide "--count" or "--topcount"'
sys.exit(1)
def prepare_dictionary(filename):
f = open(filename, 'rU')
dict = {}
for line in f:
words = line.split(' ')
for raw_word in words:
lowercase_word = raw_word.lower()
word = lowercase_word.strip()
if (word.isalpha() != True): continue
if word in dict:
dict[word] += 1
else:
dict[word] = 1
f.close()
return dict
def print_words(filename):
dict = prepare_dictionary(filename)
for word in sorted(dict):
print word + ':' + str(dict[word])
def print_top(filename):
dict = prepare_dictionary(filename)
def MyFn(s):
return -dict[s]
count = 0
for word in sorted(dict, key = MyFn):
print word + ':' + str(dict[word])
count += 1
if count >= 20 :
break
if __name__ == '__main__':
main()
|
import util
import json
import numpy as np
import random
import tensorflow as tf
class DeepDog:
"""
The DeepDog class loads the training and test set images from
disk into RAM, and provides functions to get the test set
and mini batches of the training set.
"""
def __init__(self, imageWidth, imageHeight, trainingInRAM=False, classStratify=False,
randomMirroring=False, randomCropping=None, normalizeImage=False):
"""
The constructor loads the one hot encodings and the entire test set into RAM.
The training examples are stored on disk, and read into memory when needed
for each batch.
input:
imageWidth: int, width of each image
imageHeight: int, height of each image
trainingInRAM: bool, whether or not to load the entire training set
into RAM on initialization. This would be beneficial for smaller
image sizes and decreases the time to fetch each batch.
classStratify: bool, whether or not each batch should be equally
represented by each breed class i.e. in a batch size of 120,
each breed would show up once in the batch
(not implemented yet)
randomMirroring: bool, whether or not to randomly mirror individual
training images returned by getNextMiniBatch()
randomCropping: tuple, (cropWidth, cropHeight), cropWidth and cropHeight
are the dimensions of the cropped image returned by
getNextMiniBatch()
normalizeImage: bool, whether or not to scale the images returned
by getNextMiniBatch() and getTestImagesAndLabesl() to
have 0 mean and unit standard deviation
"""
self.MIRROR_PROBABILITY = 0.5
self.randomMirroring = randomMirroring
self.randomCropping = randomCropping
if self.randomCropping is not None:
self.cropWidth = self.randomCropping[0]
self.cropHeight = self.randomCropping[1]
self.normalizeImage = normalizeImage
self.image_width = imageWidth
self.image_height = imageHeight
self.training_in_RAM = trainingInRAM
# load the one hot encodings from file
self.one_hot_encodings = {}
self.loadOneHotEncodings()
self.numberBreeds = float(len(self.one_hot_encodings.keys()))
# load the test set from file
self.test_set_images, self.test_set_labels = [], []
self.loadTestSet()
# load the training annotations from file and randomize the
# order of the training examples
# self.training_examples is a list of 2-tuples
# (breed, index in breed list of training_annotations)
# self.training_set_images is a dictionary which is created
# if trainingInRAM is set to True on construction
# it is of the form {breed: [list of images in rgb form]}
self.training_annotations = {}
self.training_set_images = {}
self.training_examples = []
self.training_set_size = 0
self.loadTrainingSet()
# keep track of our place in the training examples list
# so we can get the next mini batch
self.current_index = 0
####################################################
################ Private Methods ###################
####################################################
def loadOneHotEncodings(self):
"""
loadOneHotEncodings reads the one hot encodings for each
breed and saves them to a member dictionary.
input: none
output: (doesn't return, saves to member variable)
self.one_hot_encodings: dictionary, {'breed': [1, 0, 0]}
"""
with open('one_hot_encodings.json', 'r') as data_file:
self.one_hot_encodings = json.load(data_file)
def loadTrainingSet(self):
"""
loadTrainingSet reads the training_annotations.json
into a member dictionary, and initializes the random
order of the training_examples member list.
input: none
output: (doesn't return, saves to member variables)
self.training_annotations: dictionary, {'breed': [list of annotations]}
self.training_examples: list of 2-tuples
[(breed, index into list of self.training_annotations), ...]
"""
print("Initializing training set order...\n")
# load the training_annotations
with open('training_annotations.json', 'r') as data_file:
self.training_annotations = json.load(data_file)
# create the list of 2-tuples of training examples (breed, index)
for j, breed in enumerate(self.training_annotations.keys()):
if self.training_in_RAM:
print(str(round(j / self.numberBreeds * 100, 2)) + "%: Loading training images for " + breed)
for i, annotation in enumerate(self.training_annotations[breed]):
self.training_examples.append((breed, i))
# if training_in_RAM is True, load the image from disk
if self.training_in_RAM:
currentImage = util.getResizedImageData(annotation, self.image_width, self.image_height)
if breed not in self.training_set_images:
self.training_set_images[breed] = [currentImage]
else:
self.training_set_images[breed].append(currentImage)
self.training_set_size = len(self.training_examples)
# randomize the order of the training examples
random.shuffle(self.training_examples)
print("Finished initializing training set order...\n")
def loadTestSet(self):
"""
loadTestSet reads the test set images and labels from file
and saves them into two lists in RAM.
input: none
output: (saves to member lists, doesn't return)
testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]
testLabels: numpy array [testSetSize x [numImageClasses]]
"""
print("Loading test set...\n")
testing_breeds = {}
with open('testing_annotations.json', 'r') as data_file:
testing_breeds = json.load(data_file)
for i, breed in enumerate(testing_breeds.keys()):
print(str(round(i / self.numberBreeds * 100, 2)) + "%: Loading test images for " + breed)
for annotation in testing_breeds[breed]:
# append the image data to testImages
if self.randomCropping is None:
self.test_set_images.append(util.getResizedImageData(annotation,
self.image_width, self.image_height))
else:
self.test_set_images.append(util.getResizedImageData(annotation,
self.cropWidth, self.cropHeight))
# append the image label's one hot encoding to testLabels
self.test_set_labels.append(self.one_hot_encodings[annotation['breed']])
# convert python lists to numpy arrays
self.test_set_images = np.array(self.test_set_images)
if self.normalizeImage:
print("Normalizing test images...")
self.test_set_images = tf.map_fn(tf.image.per_image_standardization, self.test_set_images)
self.test_set_labels = np.array(self.test_set_labels)
print("Finished loading test set.....\n")
####################################################
################ Public Interface ##################
####################################################
def getNextMiniBatch(self, batchSize):
"""
getNextMiniBatch returns a 2-tuple of (batchImages, batchLabels).
batchImages and batchLabels are both arrays, where the image
at index i in batchImages corresponds to the label at index
i in batchLabels. The batch images and labels are from
the training set.
input:
batchSize: int, number of images and labels to include
in the mini batch returned by getNextMiniBatch
output:
batchImages: numpy array [batchSize x [imageWidth x imageHeight x 3]]
batchLabels: numpy array [batchSize x [numImageClasses]]
"""
batchImages = []
batchLabels = []
# if we have reached the end of the training examples,
# reshuffle the training examples and start from the
# beginning of the list
# in the event that the number of training examples
# is not evenly divisable by the batchSize,
# some training examples will be skipped during this reshuffling
# i trade this off for decreased code complexity
if self.current_index + batchSize > self.training_set_size:
self.current_index = 0
random.shuffle(self.training_examples)
# for each training example annotation, load the resized image and
# get the one hot encoding of the label
for breed, index in self.training_examples[self.current_index:self.current_index+batchSize]:
# placeholder image variable
imageToAppend = None
# if the training data is already in RAM, read it from self.training_set_images
# otherwise, fetch the image from disk
if self.training_in_RAM:
imageToAppend = self.training_set_images[breed][index]
else:
annotation = self.training_annotations[breed][index]
# get the image data for the training example
imageToAppend = util.getResizedImageData(annotation,
self.image_width, self.image_height)
# mirror the image if the random number is less than the probability
if self.randomMirroring and random.random() < self.MIRROR_PROBABILITY:
imageToAppend = np.fliplr(imageToAppend)
# randomly crop the image
if self.randomCropping is not None:
widthDiff = self.image_width - self.cropWidth
heightDiff = self.image_height - self.cropHeight
widthOffset = int(random.random() * widthDiff)
heightOffset = int(random.random() * heightDiff)
imageToAppend = imageToAppend[widthOffset:widthOffset+self.cropWidth,
heightOffset:heightOffset+self.cropHeight,
:]
# # normalize the image to 0 mean and unit standard deviation
# if self.normalizeImage:
# imageToAppend = tf.image.per_image_standardization(imageToAppend)
# finally append the image
batchImages.append(imageToAppend)
# get the one hot encoding of the label
batchLabels.append(self.one_hot_encodings[breed])
self.current_index += batchSize
if self.normalizeImage:
batchImages = tf.map_fn(tf.image.per_image_standardization, batchImages)
return batchImages, np.array(batchLabels)
return np.array(batchImages), np.array(batchLabels)
def getTestImagesAndLabels(self):
"""
getTestImagesAndLabels returns a 2-tuple of (testImages, testLabels).
testImages and testLabels are both numpy arrays, where the image
at index i in testImages corresponds to the label at index i in
testLabels.
input: None
output:
testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]
testLabels: numpy array [testSetSize x [numImageClasses]]
"""
return self.test_set_images, self.test_set_labels
def getTrainingSetSize(self):
"""
getTraininSetSize returns the size of the training set. This
function is useful when computing the progress inside an epoch.
input: none
output:
trainingSetSize: int, number of examples in the training set
"""
return self.training_set_size
def main():
dd = DeepDog(64, 64)
im, la = dd.getNextMiniBatch(100)
print(im.shape, la.shape)
print(im)
print(la)
if __name__ == "__main__":
main()
|
from rest_framework import serializers
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
|
import os
from functools import partial
import PIL
import lmdb
import numpy as np
from ding.envs import SyncSubprocessEnvManager
from ding.utils.default_helper import deep_merge_dicts
from easydict import EasyDict
from tqdm import tqdm
from haco.DIDrive_core.data import CarlaBenchmarkCollector, BenchmarkDatasetSaver
from haco.DIDrive_core.envs import SimpleCarlaEnv, CarlaEnvWrapper
from haco.DIDrive_core.policy import AutoPIDPolicy
from haco.DIDrive_core.utils.others.tcp_helper import parse_carla_tcp
config = dict(
env=dict(
env_num=5,
simulator=dict(
disable_two_wheels=True,
planner=dict(
type='behavior',
resolution=1,
),
obs=(
dict(
name='rgb',
type='rgb',
size=[400, 300],
position=[1.3, 0.0, 2.3],
fov=100,
),
),
verbose=True,
),
col_is_failure=True,
stuck_is_failure=True,
ran_light_is_failure=True,
manager=dict(
auto_reset=False,
shared_memory=False,
context='spawn',
max_retry=1,
),
wrapper=dict(
speed_factor=25.,
scale=1,
crop=256,
),
),
server=[
dict(carla_host='localhost', carla_ports=[9000, 9010, 2]),
],
policy=dict(
target_speed=25,
tl_threshold=13,
noise=True,
noise_kwargs=dict(),
collect=dict(
n_episode=100,
dir_path='./datasets_train/cilrs_datasets_train',
preloads_name='cilrs_datasets_train.npy',
collector=dict(
suite='FullTown01-v1',
nocrash=True,
),
)
),
)
main_config = EasyDict(config)
def cilrs_postprocess(observasion, scale=1, crop=256):
rgb = observasion['rgb'].copy()
im = PIL.Image.fromarray(rgb)
(width, height) = (int(im.width // scale), int(im.height // scale))
rgb = im.resize((width, height))
rgb = np.asarray(rgb)
start_x = height // 2 - crop // 2
start_y = width // 2 - crop // 2
rgb = rgb[start_x:start_x + crop, start_y:start_y + crop]
sensor_data = {'rgb': rgb}
others = {}
return sensor_data, others
def wrapped_env(env_cfg, wrapper_cfg, host, port, tm_port=None):
return CarlaEnvWrapper(SimpleCarlaEnv(env_cfg, host, port, tm_port), wrapper_cfg)
def post_process(config):
epi_folder = [x for x in os.listdir(config.policy.collect.dir_path) if x.startswith('epi')]
all_img_list = []
all_mea_list = []
for item in tqdm(epi_folder):
lmdb_file = lmdb.open(os.path.join(config.policy.collect.dir_path, item, 'measurements.lmdb')).begin(write=False)
png_files = [
x for x in os.listdir(os.path.join(config.policy.collect.dir_path, item)) if (x.endswith('png') and x.startswith('rgb'))
]
png_files.sort()
for png_file in png_files:
index = png_file.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
data = {}
data['control'] = np.array([measurements[15], measurements[16], measurements[17]]).astype(np.float32)
data['speed'] = measurements[10] / config.env.wrapper.speed_factor
data['command'] = float(measurements[11])
new_dict = {}
new_dict['brake'] = data['control'][2]
new_dict['steer'] = (data['control'][0] + 1) / 2
new_dict['throttle'] = data['control'][1]
new_dict['speed'] = data['speed']
new_dict['command'] = data['command']
all_img_list.append(os.path.join(item, png_file))
all_mea_list.append(new_dict)
if not os.path.exists('_preloads'):
os.mkdir('_preloads')
np.save('_preloads/{}'.format(config.policy.collect.preloads_name), [all_img_list, all_mea_list])
def main(cfg, seed=0):
cfg.env.manager = deep_merge_dicts(SyncSubprocessEnvManager.default_config(), cfg.env.manager)
tcp_list = parse_carla_tcp(cfg.server)
env_num = cfg.env.env_num
assert len(tcp_list) >= env_num, \
"Carla server not enough! Need {} servers but only found {}.".format(env_num, len(tcp_list))
collector_env = SyncSubprocessEnvManager(
env_fn=[partial(wrapped_env, cfg.env, cfg.env.wrapper, *tcp_list[i]) for i in range(env_num)],
cfg=cfg.env.manager,
)
policy = AutoPIDPolicy(cfg.policy)
collector = CarlaBenchmarkCollector(cfg.policy.collect.collector, collector_env, policy.collect_mode)
if not os.path.exists(cfg.policy.collect.dir_path):
os.makedirs(cfg.policy.collect.dir_path)
collected_episodes = 0
data_postprocess = lambda x: cilrs_postprocess(x, scale=cfg.env.wrapper.scale, crop=cfg.env.wrapper.crop)
saver = BenchmarkDatasetSaver(cfg.policy.collect.dir_path, cfg.env.simulator.obs, data_postprocess)
print('[MAIN] Start collecting data')
saver.make_dataset_path(cfg.policy.collect)
while collected_episodes < cfg.policy.collect.n_episode:
# Sampling data from environments
n_episode = min(cfg.policy.collect.n_episode - collected_episodes, env_num * 2)
new_data = collector.collect(n_episode=n_episode)
saver.save_episodes_data(new_data, start_episode=collected_episodes)
collected_episodes += n_episode
print('[MAIN] Current collected: ', collected_episodes, '/', cfg.policy.collect.n_episode)
collector_env.close()
saver.make_index()
print('[MAIN] Making preloads')
post_process(cfg)
if __name__ == '__main__':
main(main_config)
|
from django.test import TestCase
from game.ai import TicTacToeAI
class TicTacToeAITest(TestCase):
def setUp(self):
board_state = [['o', ' ', 'x'],
['x', ' ', ' '],
['x', 'o', 'o']]
self.g = TicTacToeAI(board_state)
def test_possible_moves(self):
self.assertEqual(self.g.possible_moves(), [(0, 1), (1, 1), (1, 2)])
def test_winner_o(self):
self.assertEqual(self.g.board_status(), None)
self.g.move('o', 1, 1)
self.assertEqual(self.g.board_status(), 'o')
def test_winner_x(self):
self.assertEqual(self.g.board_status(), None)
self.g.move('x', 1, 1)
self.assertEqual(self.g.board_status(), 'x')
def test_draw_board_status(self):
draw_board_state = [['o', 'o', 'x'],
['x', 'x', 'o'],
['o', 'x', 'o']]
g = TicTacToeAI(draw_board_state)
self.assertEqual(g.board_status(), 'draw')
def test_score_possible_moves(self):
self.assertEqual(self.g.score_possible_moves(), [None, 1, None])
class TestSpecificGameStates(TestCase):
def test_state_1(self):
bs = [['o', 'x', 'x'],
['x', ' ', 'o'],
['x', 'o', 'o']]
g = TicTacToeAI(bs, 'x')
self.assertEqual(g.get_next_move(), (1, 1))
g = TicTacToeAI(bs, 'o')
self.assertEqual(g.get_next_move(), (1, 1))
def test_state_2(self):
bs = [['o', ' ', 'x'],
['x', ' ', ' '],
['x', 'o', 'o']]
g = TicTacToeAI(bs, 'x')
self.assertEqual(g.get_next_move(), (1, 1))
g = TicTacToeAI(bs, 'o')
self.assertEqual(g.get_next_move(), (1, 1))
def test_state_3(self):
bs = [['o', 'x', 'x'],
['x', ' ', ' '],
['x', 'o', 'o']]
g = TicTacToeAI(bs, 'x')
self.assertEqual(g.get_next_move(), (1, 1))
g = TicTacToeAI(bs, 'o')
self.assertEqual(g.get_next_move(), (1, 1))
def test_state_4(self):
bs = [['x', ' ', ' '],
[' ', ' ', ' '],
[' ', ' ', ' ']]
g = TicTacToeAI(bs, 'o')
self.assertIn(g.get_next_move(), [(0, 1), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)])
def test_state_5(self):
# loose position
bs = [['x', ' ', 'x'],
[' ', 'o', 'x'],
['o', ' ', ' ']]
g = TicTacToeAI(bs, 'o')
self.assertIn(g.get_next_move(), [(0, 1), (1, 0), (2, 1), (2, 2)])
def test_state_6(self):
bs = [['x', ' ', ' '],
[' ', 'o', ' '],
[' ', ' ', 'x']]
g = TicTacToeAI(bs, 'o')
for _ in range(10):
self.assertIn(g.get_next_move(), [(0, 1), (1, 0), (1, 2), (2, 1)])
def test_state_7(self):
# draw state
bs = [['x', 'x', 'o'],
['o', 'o', 'x'],
['x', ' ', ' ']]
g = TicTacToeAI(bs, 'o')
self.assertIn(g.get_next_move(), [(2, 1), (2, 2)])
def test_state_8(self):
# draw state
bs = [[' ', ' ', ' '],
[' ', 'o', ' '],
['x', 'o', 'x']]
g = TicTacToeAI(bs, 'x')
self.assertEqual(g.get_next_move(), (0, 1))
def test_state_9(self):
bs = [['x', 'o', 'x'],
[' ', 'x', 'o'],
['o', ' ', ' ']]
g = TicTacToeAI(bs, 'x')
for _ in range(10):
self.assertEqual(g.get_next_move(), (2, 2))
def test_state_10(self):
bs = [['x', 'o', 'x'],
['o', 'x', 'o'],
['o', ' ', ' ']]
g = TicTacToeAI(bs, 'x')
self.assertEqual(g.get_next_move(), (2, 2))
|
# -*- coding: utf-8 -*-
from typing import Union
class PortBindingGuest:
__slots__ = ("port", "protocol")
port: int
protocol: str
def __init__(self, port: Union[int, str], protocol: str):
if isinstance(port, int):
self.port = port
else:
self.port = int(port)
self.protocol = protocol
def __str__(self) -> str:
return f"{self.port}/{self.protocol}"
def __repr__(self):
return f"PortBindingGuest<{self.__str__()}>"
def __hash__(self) -> int:
return hash(self.__str__())
def __eq__(self, other) -> bool:
return hash(self) == hash(other)
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.xshard.pandas.preprocessing import read_csv
from zoo.xshard.pandas.preprocessing import read_json
|
# Copyright (c) 2021 Lan Peng and Chase Murray
# Licensed under the MIT License. See LICENSING for details.
from veroviz._common import *
from veroviz._internal import locs2Dict
from veroviz._internal import loc2Dict
from veroviz._geometry import geoDistance2D
def pgrGetSnapToRoadLatLon(gid, loc, databaseName):
"""
A function to get snapped latlng for one coordinate using pgRouting
Parameters
----------
gid: int
The gid of the street in pgRouting database
loc: list
The location to be snapped to road
databaseName: string, Require
If you are hosting a data provider on your local machine (e.g., pgRouting), you'll need to specify the name of the local database.
Returns
-------
list
A snapped locations in the format of [lat, lon], notice that this function will lost the info of altitude of the location.
"""
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" % (
databaseName,
config['VRV_SETTING_PGROUTING_USERNAME'],
config['VRV_SETTING_PGROUTING_HOST'],
config['VRV_SETTING_PGROUTING_PASSWORD']))
cur = conn.cursor()
# For maintainability
dicLoc = loc2Dict(loc)
sqlCommand = " select ST_X(point), ST_Y(point)"
sqlCommand += " from ("
sqlCommand += " select ST_ClosestPoint("
sqlCommand += " ST_GeomFromEWKT(CONCAT('SRID=4326; LINESTRING(',x1,' ',y1,', ',x2,' ',y2,')')),"
sqlCommand += " ST_GeomFromEWKT('SRID=4326;POINT(%s %s)')) as point" % (dicLoc['lon'], dicLoc['lat']) # Be very careful about lon and lat
sqlCommand += " from ways"
sqlCommand += " where gid=%s" % (gid)
sqlCommand += " ) a;"
cur.execute(sqlCommand)
row = cur.fetchone()
snapLoc = [row[1], row[0]]
conn.close()
return snapLoc
def pgrGetNearestStreet(loc, databaseName):
"""
A function to return the details of the nearest street given a known coordinate
Parameters
----------
loc: list
The locationi that trying to find the nearest street of
databaseName: string, Require
If you are hosting a data provider on your local machine (e.g., pgRouting), you'll need to specify the name of the local database.
Returns
-------
gid: int
gid from Ways table, identifier for street
sourceVid: int
sourceVid from Ways table, identifier for source vertice
targetVid: int
targetVid from Ways table, identifier for target vertice
sourceLat: int
sourceLat from Ways table, latitude for source vertice
sourceLon: int
sourceLon from Ways table, longitude for source vertice
targetLat: int
targetLat from Ways table, latitude for target vertice
targetLon: int
targetLon from Ways table, longitude for target vertice
cost_s: int
cost_s from Ways table, time needs from source to target
reverse_cost_s: int
reverse_cost_s from Ways table, time needs from target to source
one_way: int
one_way from Ways table, indicate if it is one way street
"""
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" % (
databaseName,
config['VRV_SETTING_PGROUTING_USERNAME'],
config['VRV_SETTING_PGROUTING_HOST'],
config['VRV_SETTING_PGROUTING_PASSWORD']))
cur = conn.cursor()
# For maintainability
dicLoc = loc2Dict(loc)
try:
sqlCommand = " select gid, source, target, y1, x1, y2, x2, cost_s, reverse_cost_s, one_way"
sqlCommand += " from "
sqlCommand += " ways"
sqlCommand += " where"
sqlCommand += " x1 >= %s - 0.01 and x1 <= %s + 0.01" % (dicLoc['lon'], dicLoc['lon']) # Eliminate most of the ways there
sqlCommand += " order by"
sqlCommand += " ST_Distance("
sqlCommand += " ST_GeogFromText('SRID=4326; POINT(%s %s)')," % (dicLoc['lon'], dicLoc['lat']) # Be very careful about lon and lat
sqlCommand += " ST_GeogFromText(CONCAT('SRID=4326; LINESTRING(',x1,' ',y1,', ',x2,' ',y2,')')))"
sqlCommand += " limit 1;"
cur.execute(sqlCommand)
row = cur.fetchone()
street = {
"gid" : int(row[0]),
"source" : int(row[1]),
"target" : int(row[2]),
"sourceLoc" : [row[3], row[4]],
"targetLoc" : [row[5], row[6]],
"cost_s" : row[7],
"reverse_cost_s" : row[8],
"one_way" : row[9]
}
except:
sqlCommand = " select gid, source, target, y1, x1, y2, x2, length_m, cost_s, reverse_cost_s, one_way"
sqlCommand += " from "
sqlCommand += " ways"
sqlCommand += " order by"
sqlCommand += " ST_Distance("
sqlCommand += " ST_GeogFromText('SRID=4326; POINT(%s %s)')," % (dicLoc['lon'], dicLoc['lat']) # Be very careful about lon and lat
sqlCommand += " ST_GeogFromText(CONCAT('SRID=4326; LINESTRING(',x1,' ',y1,', ',x2,' ',y2,')')))"
sqlCommand += " limit 1;"
cur.execute(sqlCommand)
row = cur.fetchone()
street = {
"gid" : int(row[0]),
"source" : int(row[1]),
"target" : int(row[2]),
"sourceLoc" : [row[3], row[4]],
"targetLoc" : [row[5], row[6]],
"cost_s" : row[7],
"reverse_cost_s" : row[8],
"one_way" : row[9]
}
conn.close()
return street
def pgrGetShapepointsTimeDist(startLoc, endLoc, databaseName):
"""
A function to get a list of shapepoints from start coordinate to end coordinate.
Parameters
----------
startLoc: list
Start location, the format is [lat, lon] (altitude, above sea level, set to be 0) or [lat, lon, alt]
endLat: float
Required, latitude of end coordinate
endLon: float
Required, longitude of end coordinate
databaseName: string, Require
If you are hosting a data provider on your local machine (e.g., pgRouting), you'll need to specify the name of the local database.
Returns
-------
path: list of lists
A list of coordinates in sequence that shape the route from startLoc to endLoc
timeSecs: list
time between current shapepoint and previous shapepoint, the first element should be 0
distMeters: list
distance between current shapepoint and previous shapepoint, the first element should be 0
"""
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" % (
databaseName,
config['VRV_SETTING_PGROUTING_USERNAME'],
config['VRV_SETTING_PGROUTING_HOST'],
config['VRV_SETTING_PGROUTING_PASSWORD']))
conn.autocommit = True
cur = conn.cursor()
# Calculate the distance between snapped location and source/target of closest street for the START coordinate
startStreet = pgrGetNearestStreet(startLoc, databaseName)
snapStartLoc = pgrGetSnapToRoadLatLon(startStreet['gid'], startLoc, databaseName)
dicSnapStartLoc = loc2Dict(snapStartLoc)
distSnapStart2Source = geoDistance2D(snapStartLoc, startStreet['sourceLoc'])
distSnapStart2Target = geoDistance2D(snapStartLoc, startStreet['targetLoc'])
# Calculate the distance between snapped location and source/target of closest street for the END coordinate
endStreet = pgrGetNearestStreet(endLoc, databaseName)
snapEndLoc = pgrGetSnapToRoadLatLon(endStreet['gid'], endLoc, databaseName)
dicSnapEndLoc = loc2Dict(snapEndLoc)
distSnapEnd2Source = geoDistance2D(snapEndLoc, endStreet['sourceLoc'])
distSnapEnd2Target = geoDistance2D(snapEndLoc, endStreet['targetLoc'])
# Find the number of vertices in the pgRouting database
sqlCommand = " select count(*) from ways_vertices_pgr;"
cur.execute(sqlCommand)
row = cur.fetchone()
newlyInsertVidNum = int(row[0]) + 1
# Testify and find a dummyClassID to put temp vertices and segments
dummyClassID = 821 # Hard-coded number, no specific meaning
# FIXME! For database security reason, we need to testify if class_id = 821 is not used in the original database
# insert the snapped location for START coordinate, and two segments from the coordinate to source/target of the closest street
sqlCommand = " insert into ways_vertices_pgr (id, lon, lat) values (%s, %s, %s);" % (
newlyInsertVidNum,
dicSnapStartLoc['lon'],
dicSnapStartLoc['lat'])
sqlCommand += " insert into ways (class_id, source, target, length_m, x1, y1, x2, y2, cost_s, reverse_cost_s) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" % (
dummyClassID,
newlyInsertVidNum,
startStreet['target'],
distSnapStart2Target,
dicSnapStartLoc['lon'],
dicSnapStartLoc['lat'],
startStreet['targetLoc'][1],
startStreet['targetLoc'][0],
startStreet['cost_s'] * distSnapStart2Target / (distSnapStart2Target + distSnapStart2Source),
startStreet['reverse_cost_s'] * distSnapStart2Target / (distSnapStart2Target + distSnapStart2Source))
sqlCommand += " insert into ways (class_id, source, target, length_m, x1, y1, x2, y2, cost_s, reverse_cost_s) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" % (
dummyClassID,
startStreet['source'],
newlyInsertVidNum,
distSnapStart2Source,
startStreet['sourceLoc'][1],
startStreet['sourceLoc'][0],
dicSnapStartLoc['lon'],
dicSnapStartLoc['lat'],
startStreet['cost_s'] * distSnapStart2Source / (distSnapStart2Target + distSnapStart2Source),
startStreet['reverse_cost_s'] * distSnapStart2Source / (distSnapStart2Target + distSnapStart2Source))
# insert the snapped location for END coordinate, and two segments from the coordinate to source/target of the closest street
sqlCommand += " insert into ways_vertices_pgr (id, lon, lat) values (%s, %s, %s);" % (
newlyInsertVidNum + 1,
dicSnapEndLoc['lon'],
dicSnapEndLoc['lat'])
sqlCommand += " insert into ways (class_id, source, target, length_m, x1, y1, x2, y2, cost_s, reverse_cost_s) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" % (
dummyClassID,
newlyInsertVidNum + 1,
endStreet['target'],
distSnapEnd2Target,
dicSnapEndLoc['lon'],
dicSnapEndLoc['lat'],
endStreet['targetLoc'][1],
endStreet['targetLoc'][0],
endStreet['cost_s'] * distSnapEnd2Target / (distSnapEnd2Target + distSnapEnd2Source),
endStreet['reverse_cost_s'] * distSnapEnd2Target / (distSnapEnd2Target + distSnapEnd2Source))
sqlCommand += " insert into ways (class_id, source, target, length_m, x1, y1, x2, y2, cost_s, reverse_cost_s) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" % (
dummyClassID,
endStreet['source'],
newlyInsertVidNum + 1,
distSnapEnd2Source,
endStreet['sourceLoc'][1],
endStreet['sourceLoc'][0],
dicSnapEndLoc['lon'],
dicSnapEndLoc['lat'],
endStreet['cost_s'] * distSnapEnd2Source / (distSnapEnd2Target + distSnapEnd2Source),
endStreet['reverse_cost_s'] * distSnapEnd2Source / (distSnapEnd2Target + distSnapEnd2Source))
# Do dijstra algorithm to find shortest path
sqlCommand += " select b.gid as gid, b.y1 as lats1, b.x1 as lons1, b.y2 as lats2, b.x2 as lons2, a.cost as secs, b.length_m as dist "
sqlCommand += " from "
sqlCommand += " pgr_dijkstra("
sqlCommand += " 'select gid as id, source, target, cost_s as cost, reverse_cost_s as reverse_cost from ways',"
sqlCommand += " %s," % (newlyInsertVidNum)
sqlCommand += " %s," % (newlyInsertVidNum + 1)
sqlCommand += " directed := true"
sqlCommand += " ) a"
sqlCommand += " left join"
sqlCommand += " ways b"
sqlCommand += " on a.edge = b.gid"
sqlCommand += " order by a.path_seq"
# Return the shapepoint result from dijstra algorithm
cur.execute(sqlCommand)
row = cur.fetchall()
summary = pd.DataFrame(row, columns=['gid', 'lats1', 'lons1', 'lats2', 'lons2', 'secs', 'dist'])
# Delete the temp data
sqlCommand = " delete from ways_vertices_pgr where id = (%s);" % (newlyInsertVidNum)
sqlCommand += " delete from ways_vertices_pgr where id = (%s);" % (newlyInsertVidNum + 1)
sqlCommand += " delete from ways where class_id = %s;" % (dummyClassID)
cur.execute(sqlCommand)
# The last row is junk info, drop it
summary.drop(summary.index[len(summary) - 1], inplace = True)
# Sorting the coordinates so that they can be linked to each other
lats1 = summary['lats1'].tolist()
lons1 = summary['lons1'].tolist()
lats2 = summary['lats2'].tolist()
lons2 = summary['lons2'].tolist()
path = []
path.append(startLoc)
for i in range(1, len(lats1)):
if (lats1[i] != lats1[i - 1] and lats1[i] != lats2[i - 1]):
path.append([lats1[i], lons1[i]])
else:
path.append([lats2[i], lons2[i]])
timeSecs = summary['secs'].tolist()
distMeters = summary['dist'].tolist()
conn.close()
return [path, timeSecs, distMeters]
def pgrGetTimeDist(fromLocs, toLocs, databaseName):
"""
This function generated time and distance matrix using pgRouting
Parameters
----------
fromLoc: list, Conditional
Used in 'one2many' mode. To state the coordinate of the starting node
locs: list of lists
Used in 'all2all', 'one2many', 'many2one' modes. A list of coordinates, in the format of [[lat1, lon1], [lat2, lon2], ...]
toLoc: list, Conditional
Used in 'many2one' mode. To state the coordinate of the ending node
databaseName: string
If you are hosting a data provider on your local machine (e.g., pgRouting), you'll need to specify the name of the local database.
Returns
-------
timeSecs: dictionary
The key of each item in this dictionary is in (coordID1, coordID2) format, the travelling time from first entry to second entry, the units are seconds
distMeters: dictionary
The key of each item in this dictionary is in (coordID1, coordID2) format, the travelling distance from first entry to second entry, the units are meters
"""
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'" % (
databaseName,
config['VRV_SETTING_PGROUTING_USERNAME'],
config['VRV_SETTING_PGROUTING_HOST'],
config['VRV_SETTING_PGROUTING_PASSWORD']))
conn.autocommit = True
cur = conn.cursor()
dummyClassID = 821 # Hard-coded number, no specific meaning
# FIXME! For database security reason, we need to testify if class_id = 821 is not used in the original database
sqlCommand = " select max(id) from ways_vertices_pgr;"
cur.execute(sqlCommand)
row = cur.fetchone()
newlyInsertVidNum = int(row[0]) + 1
locs = fromLocs.copy()
for i in range(len(toLocs)):
try:
locs.index(toLocs[i])
except ValueError:
locs.append(toLocs[i])
startVidList = []
endVidList = []
for i in range(len(fromLocs)):
startVidList.append(newlyInsertVidNum + locs.index(fromLocs[i]))
for i in range(len(toLocs)):
endVidList.append(newlyInsertVidNum + locs.index(toLocs[i]))
for i in range(len(locs)):
# Add dummy vertices
street = pgrGetNearestStreet(locs[i], databaseName)
snapLoc = pgrGetSnapToRoadLatLon(street['gid'], locs[i], databaseName)
dicSnapLoc = loc2Dict(snapLoc)
sqlCommand = " insert into ways_vertices_pgr (id, lon, lat) values (%s, %s, %s);" % (
newlyInsertVidNum + locs.index(locs[i]),
dicSnapLoc['lon'],
dicSnapLoc['lat'])
cur.execute(sqlCommand)
# Add four two road segments
distSource2Snapped = geoDistance2D(street['sourceLoc'], snapLoc)
distSnapped2Target = geoDistance2D(snapLoc, street['targetLoc'])
ratio = distSource2Snapped / (distSource2Snapped + distSnapped2Target)
dicSourceLoc = loc2Dict(street['sourceLoc'])
dicTargetLoc = loc2Dict(street['targetLoc'])
sqlCommand = " insert into ways (class_id, source, target, length_m, x1, y1, x2, y2, cost_s, reverse_cost_s) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" % (
dummyClassID,
street['source'],
newlyInsertVidNum + locs.index(locs[i]),
distSource2Snapped,
dicSourceLoc['lon'],
dicSourceLoc['lat'],
dicSnapLoc['lon'],
dicSnapLoc['lat'],
street['cost_s'] * ratio,
street['reverse_cost_s'] * ratio)
cur.execute(sqlCommand)
sqlCommand = " insert into ways (class_id, source, target, length_m, x1, y1, x2, y2, cost_s, reverse_cost_s) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" % (
dummyClassID,
newlyInsertVidNum + locs.index(locs[i]),
street['target'],
distSnapped2Target,
dicSnapLoc['lon'],
dicSnapLoc['lat'],
dicTargetLoc['lon'],
dicTargetLoc['lat'],
street['cost_s'] * (1 - ratio),
street['reverse_cost_s'] * (1 - ratio))
cur.execute(sqlCommand)
sqlCommand = " select "
sqlCommand += " start_vid as start_node, "
sqlCommand += " end_vid as end_node, "
sqlCommand += " sum(cost) as time, "
sqlCommand += " sum(length_m) as distance "
sqlCommand += " from ("
sqlCommand += " select "
sqlCommand += " a.*, "
sqlCommand += " b.length_m"
sqlCommand += " from pgr_dijkstra("
sqlCommand += " 'select gid as id, source, target, cost_s as cost, reverse_cost_s as reverse_cost from ways', "
sqlCommand += " ARRAY%s, " % (startVidList)
sqlCommand += " ARRAY%s, " % (endVidList)
sqlCommand += " directed := true) a "
sqlCommand += " left join "
sqlCommand += " ways b "
sqlCommand += " on "
sqlCommand += " a.edge = b.gid "
sqlCommand += " order by "
sqlCommand += " a.path_seq"
sqlCommand += " ) x "
sqlCommand += " group by "
sqlCommand += " start_vid, "
sqlCommand += " end_vid;"
cur.execute(sqlCommand)
row = cur.fetchall()
for i in range(len(startVidList)):
sqlCommand = " delete from ways_vertices_pgr where id = %s;" % (startVidList[i])
cur.execute(sqlCommand)
for i in range(len(endVidList)):
sqlCommand = " delete from ways_vertices_pgr where id = %s;" % (endVidList[i])
cur.execute(sqlCommand)
sqlCommand = " delete from ways where class_id = %s;" % (dummyClassID)
cur.execute(sqlCommand)
conn.close()
rawDist = {}
rawTime = {}
for i in range(len(row)):
rawTime[row[i][0], row[i][1]] = row[i][2]
rawDist[row[i][0], row[i][1]] = row[i][3]
distMeters = {}
timeSecs = {}
for i in range(len(fromLocs)):
for j in range(len(toLocs)):
try:
distMeters[i, j] = rawDist[startVidList[i], endVidList[j]]
except:
distMeters[i, j] = 0
try:
timeSecs[i, j] = rawTime[startVidList[i], endVidList[j]]
except:
timeSecs[i, j] = 0
return [timeSecs, distMeters]
|
"""This module contains the general information for PciEquipSlot ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class PciEquipSlotConsts:
pass
class PciEquipSlot(ManagedObject):
"""This is PciEquipSlot class."""
consts = PciEquipSlotConsts()
naming_props = set([u'id'])
mo_meta = {
"classic": MoMeta("PciEquipSlot", "pciEquipSlot", "equipped-slot-[id]", VersionMeta.Version151f, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'computeRackUnit'], [u'faultInst', u'gpuInventory'], ["Get"]),
"modular": MoMeta("PciEquipSlot", "pciEquipSlot", "equipped-slot-[id]", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'computeServerNode'], [u'faultInst'], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"controller_reported": MoPropertyMeta("controller_reported", "controllerReported", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version151f, MoPropertyMeta.NAMING, None, None, None, None, [], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"option_rom_status": MoPropertyMeta("option_rom_status", "optionROMStatus", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"smbios_id": MoPropertyMeta("smbios_id", "smbiosId", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"controller_reported": MoPropertyMeta("controller_reported", "controllerReported", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version2013e, MoPropertyMeta.NAMING, None, None, None, None, [], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"option_rom_status": MoPropertyMeta("option_rom_status", "optionROMStatus", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"smbios_id": MoPropertyMeta("smbios_id", "smbiosId", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"controllerReported": "controller_reported",
"dn": "dn",
"id": "id",
"model": "model",
"optionROMStatus": "option_rom_status",
"rn": "rn",
"smbiosId": "smbios_id",
"status": "status",
"vendor": "vendor",
"version": "version",
},
"modular": {
"childAction": "child_action",
"controllerReported": "controller_reported",
"dn": "dn",
"id": "id",
"model": "model",
"optionROMStatus": "option_rom_status",
"rn": "rn",
"smbiosId": "smbios_id",
"status": "status",
"vendor": "vendor",
"version": "version",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.controller_reported = None
self.model = None
self.option_rom_status = None
self.smbios_id = None
self.status = None
self.vendor = None
self.version = None
ManagedObject.__init__(self, "PciEquipSlot", parent_mo_or_dn, **kwargs)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 28 13:42:30 2017
@author: hexo
"""
import numpy as np
import pandas as pd
#读取第一个sheet页
df = pd.read_excel('D:\Tableau_data\示例 - 超市.xls',sheetname=0)
print(type(df))
#每一列的数据类型
print(df.dtypes)
#每种类型的数量
print(df.get_dtype_counts())
#还不知道这个ftype到底是干嘛的,sparse|dense,稀疏|密集,表示什么呢?
print(df.ftypes)
print(df.get_ftype_counts())
top_10_data=df.head(10)
#print(top_10_data)
print('----------------------------')
#axis=0表示纵轴,axis=1表示横轴
#这是每一列,每一列的均值
print(top_10_data.mean(axis=0))
print('----------------------------')
#这是每一行,每一行的均值
print(top_10_data.mean(axis=1))
print('----------------------------')
#sort_index
#坑啊,这个axis到底是个什么鬼(ok)
#但是这个level是干嘛的依然没有搞懂
#按第1列降序排列
#print(top_10_data.sort_index(axis=0,level=0,ascending=True))
#print(top_10_data.sort_index(axis=0,level=1,ascending=True))
print(top_10_data)
print('----------------------------')
#终于成功按照订单日期降序排列了!!!
#这里按多了排序的话,貌似只可以执行一个排序方式,都是降序
print(top_10_data.sort_values(by=['订单日期','行 ID'] , ascending=False).head(2))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 03:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='subtitle',
field=models.CharField(default='test', max_length=255),
preserve_default=False,
),
]
|
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerailizer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
serializer_class = UserSerailizer
class CreateToeknView(ObtainAuthToken):
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
serializer_class = UserSerailizer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
return self.request.user
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import auth
from tempest import clients
from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class IsolatedCreds(cred_provider.CredentialProvider):
def __init__(self, name, tempest_client=True, interface='json',
password='pass', network_resources=None):
super(IsolatedCreds, self).__init__(name, tempest_client, interface,
password, network_resources)
self.network_resources = network_resources
self.isolated_creds = {}
self.isolated_net_resources = {}
self.ports = []
self.tempest_client = tempest_client
self.interface = interface
self.password = password
self.identity_admin_client, self.network_admin_client = (
self._get_admin_clients())
def _get_admin_clients(self):
"""
Returns a tuple with instances of the following admin clients (in this
order):
identity
network
"""
if self.tempest_client:
os = clients.AdminManager(interface=self.interface)
else:
os = clients.OfficialClientManager(
auth.get_default_credentials('identity_admin')
)
return os.identity_client, os.network_client
def _create_tenant(self, name, description):
if self.tempest_client:
_, tenant = self.identity_admin_client.create_tenant(
name=name, description=description)
else:
tenant = self.identity_admin_client.tenants.create(
name,
description=description)
return tenant
def _get_tenant_by_name(self, name):
if self.tempest_client:
_, tenant = self.identity_admin_client.get_tenant_by_name(name)
else:
tenants = self.identity_admin_client.tenants.list()
for ten in tenants:
if ten['name'] == name:
tenant = ten
break
else:
raise exceptions.NotFound('No such tenant')
return tenant
def _create_user(self, username, password, tenant, email):
if self.tempest_client:
_, user = self.identity_admin_client.create_user(username,
password,
tenant['id'],
email)
else:
user = self.identity_admin_client.users.create(username, password,
email,
tenant_id=tenant.id)
return user
def _get_user(self, tenant, username):
if self.tempest_client:
_, user = self.identity_admin_client.get_user_by_username(
tenant['id'],
username)
else:
user = self.identity_admin_client.users.get(username)
return user
def _list_roles(self):
if self.tempest_client:
_, roles = self.identity_admin_client.list_roles()
else:
roles = self.identity_admin_client.roles.list()
return roles
def _assign_user_role(self, tenant, user, role_name):
role = None
try:
roles = self._list_roles()
if self.tempest_client:
role = next(r for r in roles if r['name'] == role_name)
else:
role = next(r for r in roles if r.name == role_name)
except StopIteration:
msg = 'No "%s" role found' % role_name
raise exceptions.NotFound(msg)
if self.tempest_client:
self.identity_admin_client.assign_user_role(tenant['id'],
user['id'], role['id'])
else:
self.identity_admin_client.roles.add_user_role(user.id, role.id,
tenant.id)
def _delete_user(self, user):
if self.tempest_client:
self.identity_admin_client.delete_user(user)
else:
self.identity_admin_client.users.delete(user)
def _delete_tenant(self, tenant):
if self.tempest_client:
self.identity_admin_client.delete_tenant(tenant)
else:
self.identity_admin_client.tenants.delete(tenant)
def _create_creds(self, suffix="", admin=False):
"""Create random credentials under the following schema.
If the name contains a '.' is the full class path of something, and
we don't really care. If it isn't, it's probably a meaningful name,
so use it.
For logging purposes, -user and -tenant are long and redundant,
don't use them. The user# will be sufficient to figure it out.
"""
if '.' in self.name:
root = ""
else:
root = self.name
tenant_name = data_utils.rand_name(root) + suffix
tenant_desc = tenant_name + "-desc"
tenant = self._create_tenant(name=tenant_name,
description=tenant_desc)
username = data_utils.rand_name(root) + suffix
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self._create_user(username, self.password,
tenant, email)
# NOTE(andrey-mp): user needs this role to create containers in swift
swift_operator_role = CONF.object_storage.operator_role
self._assign_user_role(tenant, user, swift_operator_role)
if admin:
self._assign_user_role(tenant, user, CONF.identity.admin_role)
return self._get_credentials(user, tenant)
def _get_credentials(self, user, tenant):
if self.tempest_client:
user_get = user.get
tenant_get = tenant.get
else:
user_get = user.__dict__.get
tenant_get = tenant.__dict__.get
return auth.get_credentials(
username=user_get('name'), user_id=user_get('id'),
tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
password=self.password)
def _create_network_resources(self, tenant_id):
network = None
subnet = None
router = None
# Make sure settings
if self.network_resources:
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
raise exceptions.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
raise exceptions.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
data_utils.rand_name_root = data_utils.rand_name(self.name)
if not self.network_resources or self.network_resources['network']:
network_name = data_utils.rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
subnet_name = data_utils.rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
router_name = data_utils.rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
if router:
self._clear_isolated_router(router['id'], router['name'])
if subnet:
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if network:
self._clear_isolated_network(network['id'], network['name'])
raise
return network, subnet, router
def _create_network(self, name, tenant_id):
if self.tempest_client:
resp, resp_body = self.network_admin_client.create_network(
name=name, tenant_id=tenant_id)
else:
body = {'network': {'tenant_id': tenant_id, 'name': name}}
resp_body = self.network_admin_client.create_network(body)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
if not self.tempest_client:
body = {'subnet': {'name': subnet_name, 'tenant_id': tenant_id,
'network_id': network_id, 'ip_version': 4}}
if self.network_resources:
body['enable_dhcp'] = self.network_resources['dhcp']
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.tempest_client:
if self.network_resources:
resp, resp_body = self.network_admin_client.\
create_subnet(
network_id=network_id, cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
ip_version=4)
else:
resp, resp_body = self.network_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
ip_version=4)
else:
body['subnet']['cidr'] = str(subnet_cidr)
resp_body = self.network_admin_client.create_subnet(body)
break
except exceptions.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
e = exceptions.BuildErrorException()
e.message = 'Available CIDR for subnet creation could not be found'
raise e
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
if self.tempest_client:
resp, resp_body = self.network_admin_client.create_router(
router_name,
external_gateway_info=external_net_id,
tenant_id=tenant_id)
else:
body = {'router': {'name': router_name, 'tenant_id': tenant_id,
'external_gateway_info': external_net_id,
'admin_state_up': True}}
resp_body = self.network_admin_client.create_router(body)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
if self.tempest_client:
self.network_admin_client.add_router_interface_with_subnet_id(
router_id, subnet_id)
else:
body = {'subnet_id': subnet_id}
self.network_admin_client.add_interface_router(router_id, body)
def get_primary_network(self):
return self.isolated_net_resources.get('primary')[0]
def get_primary_subnet(self):
return self.isolated_net_resources.get('primary')[1]
def get_primary_router(self):
return self.isolated_net_resources.get('primary')[2]
def get_admin_network(self):
return self.isolated_net_resources.get('admin')[0]
def get_admin_subnet(self):
return self.isolated_net_resources.get('admin')[1]
def get_admin_router(self):
return self.isolated_net_resources.get('admin')[2]
def get_alt_network(self):
return self.isolated_net_resources.get('alt')[0]
def get_alt_subnet(self):
return self.isolated_net_resources.get('alt')[1]
def get_alt_router(self):
return self.isolated_net_resources.get('alt')[2]
def get_credentials(self, credential_type):
if self.isolated_creds.get(credential_type):
credentials = self.isolated_creds[credential_type]
else:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
self.isolated_creds[credential_type] = credentials
# Maintained until tests are ported
LOG.info("Acquired isolated creds:\n credentials: %s"
% credentials)
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
self.isolated_net_resources[credential_type] = (
network, subnet, router,)
LOG.info("Created isolated network resources for : \n"
+ " credentials: %s" % credentials)
return credentials
def get_primary_creds(self):
return self.get_credentials('primary')
def get_admin_creds(self):
return self.get_credentials('admin')
def get_alt_creds(self):
return self.get_credentials('alt')
def _clear_isolated_router(self, router_id, router_name):
net_client = self.network_admin_client
try:
net_client.delete_router(router_id)
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
net_client = self.network_admin_client
try:
net_client.delete_subnet(subnet_id)
except exceptions.NotFound:
LOG.warn('subnet with name: %s not found for delete' %
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.network_admin_client
try:
net_client.delete_network(network_id)
except exceptions.NotFound:
LOG.warn('network with name: %s not found for delete' %
network_name)
def _clear_isolated_net_resources(self):
net_client = self.network_admin_client
for cred in self.isolated_net_resources:
network, subnet, router = self.isolated_net_resources.get(cred)
LOG.debug("Clearing network: %(network)s, "
"subnet: %(subnet)s, router: %(router)s",
{'network': network, 'subnet': subnet, 'router': router})
if (not self.network_resources or
self.network_resources.get('router')):
try:
if self.tempest_client:
net_client.remove_router_interface_with_subnet_id(
router['id'], subnet['id'])
else:
body = {'subnet_id': subnet['id']}
net_client.remove_interface_router(router['id'], body)
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router['name'])
self._clear_isolated_router(router['id'], router['name'])
if (not self.network_resources or
self.network_resources.get('subnet')):
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if (not self.network_resources or
self.network_resources.get('network')):
self._clear_isolated_network(network['id'], network['name'])
def clear_isolated_creds(self):
if not self.isolated_creds:
return
self._clear_isolated_net_resources()
for creds in self.isolated_creds.itervalues():
try:
self._delete_user(creds.user_id)
except exceptions.NotFound:
LOG.warn("user with name: %s not found for delete" %
creds.username)
try:
self._delete_tenant(creds.tenant_id)
except exceptions.NotFound:
LOG.warn("tenant with name: %s not found for delete" %
creds.tenant_name)
|
# pylint: disable=fixme, protected-access
"""The core module contains the SoCo class that implements
the main entry to the SoCo functionality
"""
import datetime
import logging
import re
import socket
from functools import wraps
from xml.sax.saxutils import escape
from xml.parsers.expat import ExpatError
import warnings
import xmltodict
import requests
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ConnectTimeout, ReadTimeout
from . import config
from .data_structures import (
DidlObject,
DidlPlaylistContainer,
DidlResource,
Queue,
to_didl_string,
)
from .cache import Cache
from .data_structures_entry import from_didl_string
from .exceptions import (
SoCoSlaveException,
SoCoUPnPException,
NotSupportedException,
SoCoNotVisibleException,
)
from .groups import ZoneGroup
from .music_library import MusicLibrary
from .services import (
DeviceProperties,
ContentDirectory,
RenderingControl,
AVTransport,
ZoneGroupTopology,
AlarmClock,
SystemProperties,
MusicServices,
AudioIn,
GroupRenderingControl,
)
from .utils import really_utf8, camel_to_underscore, deprecated
from .xml import XML
_LOG = logging.getLogger(__name__)
class _ArgsSingleton(type):
"""A metaclass which permits only a single instance of each derived class
sharing the same `_class_group` class attribute to exist for any given set
of positional arguments.
Attempts to instantiate a second instance of a derived class, or another
class with the same `_class_group`, with the same args will return the
existing instance.
For example:
>>> class ArgsSingletonBase(object):
... __metaclass__ = _ArgsSingleton
...
>>> class First(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
...
>>> class Second(ArgsSingletonBase):
... _class_group = "greeting"
... def __init__(self, param):
... pass
>>> assert First('hi') is First('hi')
>>> assert First('hi') is First('bye')
AssertionError
>>> assert First('hi') is Second('hi')
"""
_instances = {}
def __call__(cls, *args, **kwargs):
key = cls._class_group if hasattr(cls, "_class_group") else cls
if key not in cls._instances:
cls._instances[key] = {}
if args not in cls._instances[key]:
cls._instances[key][args] = super().__call__(*args, **kwargs)
return cls._instances[key][args]
class _SocoSingletonBase( # pylint: disable=too-few-public-methods,no-init
_ArgsSingleton("ArgsSingletonMeta", (object,), {})
):
"""The base class for the SoCo class.
Uses a Python 2 and 3 compatible method of declaring a metaclass. See, eg,
here: http://www.artima.com/weblogs/viewpost.jsp?thread=236234 and
here: http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
"""
def only_on_master(function):
"""Decorator that raises SoCoSlaveException on master call on slave."""
@wraps(function)
def inner_function(self, *args, **kwargs):
"""Master checking inner function."""
if not self.is_coordinator:
message = (
'The method or property "{}" can only be called/used '
"on the coordinator in a group".format(function.__name__)
)
raise SoCoSlaveException(message)
return function(self, *args, **kwargs)
return inner_function
# pylint: disable=R0904,too-many-instance-attributes
class SoCo(_SocoSingletonBase):
"""A simple class for controlling a Sonos speaker.
For any given set of arguments to __init__, only one instance of this class
may be created. Subsequent attempts to create an instance with the same
arguments will return the previously created instance. This means that all
SoCo instances created with the same ip address are in fact the *same* SoCo
instance, reflecting the real world position.
.. rubric:: Basic Methods
.. autosummary::
play_from_queue
play
play_uri
pause
stop
end_direct_control_session
seek
next
previous
mute
volume
play_mode
shuffle
repeat
cross_fade
ramp_to_volume
set_relative_volume
get_current_track_info
get_current_media_info
get_speaker_info
get_current_transport_info
.. rubric:: Queue Management
.. autosummary::
get_queue
queue_size
add_to_queue
add_uri_to_queue
add_multiple_to_queue
remove_from_queue
clear_queue
.. rubric:: Group Management
.. autosummary::
group
partymode
join
unjoin
all_groups
all_zones
visible_zones
.. rubric:: Player Identity and Settings
.. autosummary::
player_name
uid
household_id
is_visible
is_bridge
is_coordinator
is_soundbar
bass
treble
loudness
balance
night_mode
dialog_mode
supports_fixed_volume
fixed_volume
trueplay
status_light
buttons_enabled
.. rubric:: Playlists and Favorites
.. autosummary::
get_sonos_playlists
create_sonos_playlist
create_sonos_playlist_from_queue
remove_sonos_playlist
add_item_to_sonos_playlist
reorder_sonos_playlist
clear_sonos_playlist
move_in_sonos_playlist
remove_from_sonos_playlist
get_sonos_playlist_by_attr
get_favorite_radio_shows
get_favorite_radio_stations
get_sonos_favorites
.. rubric:: Miscellaneous
.. autosummary::
music_source
music_source_from_uri
is_playing_radio
is_playing_tv
is_playing_line_in
switch_to_line_in
switch_to_tv
available_actions
set_sleep_timer
get_sleep_timer
create_stereo_pair
separate_stereo_pair
get_battery_info
.. warning::
Properties on this object are not generally cached and may obtain
information over the network, so may take longer than expected to set
or return a value. It may be a good idea for you to cache the value in
your own code.
.. note::
Since all methods/properties on this object will result in an UPnP
request, they might result in an exception without it being mentioned
in the Raises section.
In most cases, the exception will be a
:class:`soco.exceptions.SoCoUPnPException`
(if the player returns an UPnP error code), but in special cases
it might also be another :class:`soco.exceptions.SoCoException`
or even a `requests` exception.
"""
_class_group = "SoCo"
# pylint: disable=super-on-old-class
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super().__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except OSError as error:
raise ValueError("Not a valid IP address string") from error
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.groupRenderingControl = GroupRenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.audioIn = AudioIn(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = Cache(default_timeout=5)
self._zgs_result = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
def __str__(self):
return "<{} object at ip {}>".format(self.__class__.__name__, self.ip_address)
def __repr__(self):
return '{}("{}")'.format(self.__class__.__name__, self.ip_address)
@property
def player_name(self):
"""str: The speaker's name."""
# We could get the name like this:
# result = self.deviceProperties.GetZoneAttributes()
# return result["CurrentZoneName"]
# but it is probably quicker to get it from the group topology
# and take advantage of any caching
self._parse_zone_group_state()
return self._player_name
@player_name.setter
def player_name(self, playername):
"""Set the speaker's name."""
self.deviceProperties.SetZoneAttributes(
[
("DesiredZoneName", playername),
("DesiredIcon", ""),
("DesiredConfiguration", ""),
]
)
@property
def uid(self):
"""str: A unique identifier.
Looks like: ``'RINCON_000XXXXXXXXXX1400'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._uid is not None:
return self._uid
# if not, we have to get it from the zone topology, which
# is probably quicker than any alternative, since the zgt is probably
# cached. This will set self._uid for us for next time, so we won't
# have to do this again
self._parse_zone_group_state()
return self._uid
# An alternative way of getting the uid is as follows:
# self.device_description_url = \
# 'http://{0}:1400/xml/device_description.xml'.format(
# self.ip_address)
# response = requests.get(self.device_description_url).text
# tree = XML.fromstring(response.encode('utf-8'))
# udn = tree.findtext('.//{urn:schemas-upnp-org:device-1-0}UDN')
# # the udn has a "uuid:" prefix before the uid, so we need to strip it
# self._uid = uid = udn[5:]
# return uid
@property
def household_id(self):
"""str: A unique identifier for all players in a household.
Looks like: ``'Sonos_asahHKgjgJGjgjGjggjJgjJG34'``
"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, return the cached version
if self._household_id is None:
self._household_id = self.deviceProperties.GetHouseholdID()[
"CurrentHouseholdID"
]
return self._household_id
@property
def is_visible(self):
"""bool: Is this zone visible?
A zone might be invisible if, for example, it is a bridge, or the slave
part of stereo pair.
"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
return self in self.visible_zones
@property
def is_bridge(self):
"""bool: Is this zone a bridge?"""
# Since this does not change over time (?) check whether we already
# know the answer. If so, there is no need to go further
if self._is_bridge is not None:
return self._is_bridge
# if not, we have to get it from the zone topology. This will set
# self._is_bridge for us for next time, so we won't have to do this
# again
self._parse_zone_group_state()
return self._is_bridge
@property
def is_coordinator(self):
"""bool: Is this zone a group coordinator?"""
# We could do this:
# invisible = self.deviceProperties.GetInvisible()['CurrentInvisible']
# but it is better to do it in the following way, which uses the
# zone group topology, to capitalise on any caching.
self._parse_zone_group_state()
return self._is_coordinator
@property
def is_soundbar(self):
"""bool: Is this zone a soundbar (i.e. has night mode etc.)?"""
if self._is_soundbar is None:
if not self.speaker_info:
self.get_speaker_info()
model_name = self.speaker_info["model_name"].lower()
self._is_soundbar = any(model_name.endswith(s) for s in SOUNDBARS)
return self._is_soundbar
@property
def play_mode(self):
"""str: The queue's play mode.
Case-insensitive options are:
* ``'NORMAL'`` -- Turns off shuffle and repeat.
* ``'REPEAT_ALL'`` -- Turns on repeat and turns off shuffle.
* ``'SHUFFLE'`` -- Turns on shuffle *and* repeat. (It's
strange, I know.)
* ``'SHUFFLE_NOREPEAT'`` -- Turns on shuffle and turns off
repeat.
* ``'REPEAT_ONE'`` -- Turns on repeat one and turns off shuffle.
* ``'SHUFFLE_REPEAT_ONE'`` -- Turns on shuffle *and* repeat one. (It's
strange, I know.)
"""
result = self.avTransport.GetTransportSettings(
[
("InstanceID", 0),
]
)
return result["PlayMode"]
@play_mode.setter
def play_mode(self, playmode):
"""Set the speaker's mode."""
playmode = playmode.upper()
if playmode not in PLAY_MODES.keys():
raise KeyError("'%s' is not a valid play mode" % playmode)
self.avTransport.SetPlayMode([("InstanceID", 0), ("NewPlayMode", playmode)])
@property
def shuffle(self):
"""bool: The queue's shuffle option.
True if enabled, False otherwise.
"""
return PLAY_MODES[self.play_mode][0]
@shuffle.setter
def shuffle(self, shuffle):
"""Set the queue's shuffle option."""
repeat = self.repeat
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
def repeat(self):
"""bool: The queue's repeat option.
True if enabled, False otherwise.
Can also be the string ``'ONE'`` for play mode
``'REPEAT_ONE'``.
"""
return PLAY_MODES[self.play_mode][1]
@repeat.setter
def repeat(self, repeat):
"""Set the queue's repeat option"""
shuffle = self.shuffle
self.play_mode = PLAY_MODE_BY_MEANING[(shuffle, repeat)]
@property
@only_on_master # Only for symmetry with the setter
def cross_fade(self):
"""bool: The speaker's cross fade state.
True if enabled, False otherwise
"""
response = self.avTransport.GetCrossfadeMode(
[
("InstanceID", 0),
]
)
cross_fade_state = response["CrossfadeMode"]
return bool(int(cross_fade_state))
@cross_fade.setter
@only_on_master
def cross_fade(self, crossfade):
"""Set the speaker's cross fade state."""
crossfade_value = "1" if crossfade else "0"
self.avTransport.SetCrossfadeMode(
[("InstanceID", 0), ("CrossfadeMode", crossfade_value)]
)
def ramp_to_volume(self, volume, ramp_type="SLEEP_TIMER_RAMP_TYPE"):
"""Smoothly change the volume.
There are three ramp types available:
* ``'SLEEP_TIMER_RAMP_TYPE'`` (default): Linear ramp from the
current volume up or down to the new volume. The ramp rate is
1.25 steps per second. For example: To change from volume 50 to
volume 30 would take 16 seconds.
* ``'ALARM_RAMP_TYPE'``: Resets the volume to zero, waits for about
30 seconds, and then ramps the volume up to the desired value at
a rate of 2.5 steps per second. For example: Volume 30 would take
12 seconds for the ramp up (not considering the wait time).
* ``'AUTOPLAY_RAMP_TYPE'``: Resets the volume to zero and then
quickly ramps up at a rate of 50 steps per second. For example:
Volume 30 will take only 0.6 seconds.
The ramp rate is selected by Sonos based on the chosen ramp type and
the resulting transition time returned.
This method is non blocking and has no network overhead once sent.
Args:
volume (int): The new volume.
ramp_type (str, optional): The desired ramp type, as described
above.
Returns:
int: The ramp time in seconds, rounded down. Note that this does
not include the wait time.
"""
response = self.renderingControl.RampToVolume(
[
("InstanceID", 0),
("Channel", "Master"),
("RampType", ramp_type),
("DesiredVolume", volume),
("ResetVolumeAfter", False),
("ProgramURI", ""),
]
)
return int(response["RampTime"])
def set_relative_volume(self, relative_volume):
"""Adjust the volume up or down by a relative amount.
If the adjustment causes the volume to overshoot the maximum value
of 100, the volume will be set to 100. If the adjustment causes the
volume to undershoot the minimum value of 0, the volume will be set
to 0.
Note that this method is an alternative to using addition and
subtraction assignment operators (+=, -=) on the `volume` property
of a `SoCo` instance. These operators perform the same function as
`set_relative_volume` but require two network calls per operation
instead of one.
Args:
relative_volume (int): The relative volume adjustment. Can be
positive or negative.
Returns:
int: The new volume setting.
Raises:
ValueError: If ``relative_volume`` cannot be cast as an integer.
"""
relative_volume = int(relative_volume)
# Sonos will automatically handle out-of-range adjustments
response = self.renderingControl.SetRelativeVolume(
[("InstanceID", 0), ("Channel", "Master"), ("Adjustment", relative_volume)]
)
return int(response["NewVolume"])
@only_on_master
def play_from_queue(self, index, start=True):
"""Play a track from the queue by index.
The index number is required as an argument, where the first index
is 0.
Args:
index (int): 0-based index of the track to play
start (bool): If the item that has been set should start playing
"""
# Grab the speaker's information if we haven't already since we'll need
# it in the next step.
if not self.speaker_info:
self.get_speaker_info()
# first, set the queue itself as the source URI
uri = "x-rincon-queue:{}#0".format(self.uid)
self.avTransport.SetAVTransportURI(
[("InstanceID", 0), ("CurrentURI", uri), ("CurrentURIMetaData", "")]
)
# second, set the track number with a seek command
self.avTransport.Seek(
[("InstanceID", 0), ("Unit", "TRACK_NR"), ("Target", index + 1)]
)
# finally, just play what's set if needed
if start:
self.play()
@only_on_master
def play(self):
"""Play the currently selected track."""
self.avTransport.Play([("InstanceID", 0), ("Speed", 1)])
@only_on_master
# pylint: disable=too-many-arguments
def play_uri(self, uri="", meta="", title="", start=True, force_radio=False):
"""Play a URI.
Playing a URI will replace what was playing with the stream
given by the URI. For some streams at least a title is
required as metadata. This can be provided using the ``meta``
argument or the ``title`` argument. If the ``title`` argument
is provided minimal metadata will be generated. If ``meta``
argument is provided the ``title`` argument is ignored.
Args:
uri (str): URI of the stream to be played.
meta (str): The metadata to show in the player, DIDL format.
title (str): The title to show in the player (if no meta).
start (bool): If the URI that has been set should start playing.
force_radio (bool): forces a uri to play as a radio stream.
On a Sonos controller music is shown with one of the following display
formats and controls:
* Radio format: Shows the name of the radio station and other available
data. No seek, next, previous, or voting capability.
Examples: TuneIn, radioPup
* Smart Radio: Shows track name, artist, and album. Limited seek, next
and sometimes voting capability depending on the Music Service.
Examples: Amazon Prime Stations, Pandora Radio Stations.
* Track format: Shows track name, artist, and album the same as when
playing from a queue. Full seek, next and previous capabilities.
Examples: Spotify, Napster, Rhapsody.
How it is displayed is determined by the URI prefix:
``x-sonosapi-stream:``, ``x-sonosapi-radio:``,
``x-rincon-mp3radio:``, ``hls-radio:`` default to radio or
smart radio format depending on the stream. Others default to
track format: ``x-file-cifs:``, ``aac:``, ``http:``,
``https:``, ``x-sonos-spotify:`` (used by Spotify),
``x-sonosapi-hls-static:`` (Amazon Prime), ``x-sonos-http:``
(Google Play & Napster).
Some URIs that default to track format could be radio streams,
typically ``http:``, ``https:`` or ``aac:``. To force display
and controls to Radio format set ``force_radio=True``
.. note:: Other URI prefixes exist but are less common.
If you have information on these please add to this doc string.
.. note:: A change in Sonos® (as of at least version 6.4.2)
means that the devices no longer accepts ordinary ``http:``
and ``https:`` URIs for radio stations. This method has the
option to replaces these prefixes with the one that Sonos®
expects: ``x-rincon-mp3radio:`` by using the
"force_radio=True" parameter. A few streams may fail if
not forced to to Radio format.
"""
if meta == "" and title != "":
meta_template = (
'<DIDL-Lite xmlns:dc="http://purl.org/dc/elements'
'/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" '
'xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/" '
'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">'
'<item id="R:0/0/0" parentID="R:0/0" restricted="true">'
"<dc:title>{title}</dc:title><upnp:class>"
"object.item.audioItem.audioBroadcast</upnp:class><desc "
'id="cdudn" nameSpace="urn:schemas-rinconnetworks-com:'
'metadata-1-0/">{service}</desc></item></DIDL-Lite>'
)
tunein_service = "SA_RINCON65031_"
# Radio stations need to have at least a title to play
meta = meta_template.format(title=escape(title), service=tunein_service)
# change uri prefix to force radio style display and commands
if force_radio:
colon = uri.find(":")
if colon > 0:
uri = "x-rincon-mp3radio{}".format(uri[colon:])
self.avTransport.SetAVTransportURI(
[("InstanceID", 0), ("CurrentURI", uri), ("CurrentURIMetaData", meta)]
)
# The track is enqueued, now play it if needed
if start:
return self.play()
return False
@only_on_master
def pause(self):
"""Pause the currently playing track."""
self.avTransport.Pause([("InstanceID", 0), ("Speed", 1)])
@only_on_master
def stop(self):
"""Stop the currently playing track."""
self.avTransport.Stop([("InstanceID", 0), ("Speed", 1)])
@only_on_master
def end_direct_control_session(self):
"""Ends all third-party controlled streaming sessions."""
self.avTransport.EndDirectControlSession([("InstanceID", 0)])
@only_on_master
def seek(self, position=None, track=None):
"""Seek to a given position.
You can seek both a relative position in the current track and a track
number in the queue.
It is even possible to seek to a tuple or dict containing the absolute
position (relative pos. and track nr.)::
t = ('0:00:00', 0)
player.seek(*t)
d = {'position': '0:00:00', 'track': 0}
player.seek(**d)
Args:
position (str): The desired timestamp in the current track,
specified in the format of HH:MM:SS or H:MM:SS
track (int): The (zero-based) track index in the queue
Raises:
ValueError: If neither position nor track are specified.
SoCoUPnPException: UPnP Error 701 if seeking is not supported,
UPnP Error 711 if the target is invalid.
Note:
The 'track' parameter can only be used if the queue is currently
playing. If not, use :py:meth:`play_from_queue`.
This is currently faster than :py:meth:`play_from_queue` if already
using the queue, as it does not reinstate the queue.
If speaker is already playing it will continue to play after
seek. If paused it will remain paused.
"""
if track is None and position is None:
raise ValueError("No position or track information given")
if track is not None:
self.avTransport.Seek(
[("InstanceID", 0), ("Unit", "TRACK_NR"), ("Target", track + 1)]
)
if position is not None:
if not re.match(r"^[0-9][0-9]?:[0-9][0-9]:[0-9][0-9]$", position):
raise ValueError("invalid timestamp, use HH:MM:SS format")
self.avTransport.Seek(
[("InstanceID", 0), ("Unit", "REL_TIME"), ("Target", position)]
)
@only_on_master
def next(self):
"""Go to the next track.
Keep in mind that next() can return errors
for a variety of reasons. For example, if the Sonos is streaming
Pandora and you call next() several times in quick succession an error
code will likely be returned (since Pandora has limits on how many
songs can be skipped).
"""
self.avTransport.Next([("InstanceID", 0), ("Speed", 1)])
@only_on_master
def previous(self):
"""Go back to the previously played track.
Keep in mind that previous() can return errors
for a variety of reasons. For example, previous() will return an error
code (error code 701) if the Sonos is streaming Pandora since you can't
go back on tracks.
"""
self.avTransport.Previous([("InstanceID", 0), ("Speed", 1)])
@property
def mute(self):
"""bool: The speaker's mute state.
True if muted, False otherwise.
"""
response = self.renderingControl.GetMute(
[("InstanceID", 0), ("Channel", "Master")]
)
mute_state = response["CurrentMute"]
return bool(int(mute_state))
@mute.setter
def mute(self, mute):
"""Mute (or unmute) the speaker."""
mute_value = "1" if mute else "0"
self.renderingControl.SetMute(
[("InstanceID", 0), ("Channel", "Master"), ("DesiredMute", mute_value)]
)
@property
def volume(self):
"""int: The speaker's volume.
An integer between 0 and 100.
"""
response = self.renderingControl.GetVolume(
[
("InstanceID", 0),
("Channel", "Master"),
]
)
volume = response["CurrentVolume"]
return int(volume)
@volume.setter
def volume(self, volume):
"""Set the speaker's volume."""
volume = int(volume)
volume = max(0, min(volume, 100)) # Coerce in range
self.renderingControl.SetVolume(
[("InstanceID", 0), ("Channel", "Master"), ("DesiredVolume", volume)]
)
@property
def bass(self):
"""int: The speaker's bass EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetBass(
[
("InstanceID", 0),
("Channel", "Master"),
]
)
bass = response["CurrentBass"]
return int(bass)
@bass.setter
def bass(self, bass):
"""Set the speaker's bass."""
bass = int(bass)
bass = max(-10, min(bass, 10)) # Coerce in range
self.renderingControl.SetBass([("InstanceID", 0), ("DesiredBass", bass)])
@property
def treble(self):
"""int: The speaker's treble EQ.
An integer between -10 and 10.
"""
response = self.renderingControl.GetTreble(
[
("InstanceID", 0),
("Channel", "Master"),
]
)
treble = response["CurrentTreble"]
return int(treble)
@treble.setter
def treble(self, treble):
"""Set the speaker's treble."""
treble = int(treble)
treble = max(-10, min(treble, 10)) # Coerce in range
self.renderingControl.SetTreble([("InstanceID", 0), ("DesiredTreble", treble)])
@property
def loudness(self):
"""bool: The speaker's loudness compensation.
True if on, False otherwise.
Loudness is a complicated topic. You can read about it on
Wikipedia: https://en.wikipedia.org/wiki/Loudness
"""
response = self.renderingControl.GetLoudness(
[
("InstanceID", 0),
("Channel", "Master"),
]
)
loudness = response["CurrentLoudness"]
return bool(int(loudness))
@loudness.setter
def loudness(self, loudness):
"""Switch on/off the speaker's loudness compensation."""
loudness_value = "1" if loudness else "0"
self.renderingControl.SetLoudness(
[
("InstanceID", 0),
("Channel", "Master"),
("DesiredLoudness", loudness_value),
]
)
@property
def balance(self):
"""The left/right balance for the speaker(s).
Returns:
tuple: A 2-tuple (left_channel, right_channel) of integers
between 0 and 100, representing the volume of each channel.
E.g., (100, 100) represents full volume to both channels,
whereas (100, 0) represents left channel at full volume,
right channel at zero volume.
"""
response_lf = self.renderingControl.GetVolume(
[
("InstanceID", 0),
("Channel", "LF"),
]
)
response_rf = self.renderingControl.GetVolume(
[
("InstanceID", 0),
("Channel", "RF"),
]
)
volume_lf = response_lf["CurrentVolume"]
volume_rf = response_rf["CurrentVolume"]
return int(volume_lf), int(volume_rf)
@balance.setter
def balance(self, left_right_tuple):
"""Set the left/right balance for the speaker(s)."""
left, right = left_right_tuple
left = int(left)
right = int(right)
left = max(0, min(left, 100)) # Coerce in range
right = max(0, min(right, 100)) # Coerce in range
self.renderingControl.SetVolume(
[("InstanceID", 0), ("Channel", "LF"), ("DesiredVolume", left)]
)
self.renderingControl.SetVolume(
[("InstanceID", 0), ("Channel", "RF"), ("DesiredVolume", right)]
)
@property
def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ(
[("InstanceID", 0), ("EQType", "NightMode")]
)
return bool(int(response["CurrentValue"]))
@night_mode.setter
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = "This device does not support night mode"
raise NotSupportedException(message)
self.renderingControl.SetEQ(
[
("InstanceID", 0),
("EQType", "NightMode"),
("DesiredValue", int(night_mode)),
]
)
@property
def dialog_mode(self):
"""bool: The speaker's dialog mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ(
[("InstanceID", 0), ("EQType", "DialogLevel")]
)
return bool(int(response["CurrentValue"]))
@dialog_mode.setter
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = "This device does not support dialog mode"
raise NotSupportedException(message)
self.renderingControl.SetEQ(
[
("InstanceID", 0),
("EQType", "DialogLevel"),
("DesiredValue", int(dialog_mode)),
]
)
@property
def trueplay(self):
"""bool: Whether Trueplay is enabled on this device.
True if on, False if off.
Devices that do not support Trueplay, or which do not have
a current Trueplay calibration, will return `None` on getting
the property, and raise a `NotSupportedException` when
setting the property.
Can only be set on visible devices. Attempting to set on non-visible
devices will raise a `SoCoNotVisibleException`.
"""
response = self.renderingControl.GetRoomCalibrationStatus([("InstanceID", 0)])
if response["RoomCalibrationAvailable"] == "0":
return None
else:
return response["RoomCalibrationEnabled"] == "1"
@trueplay.setter
def trueplay(self, trueplay):
"""Toggle the device's TruePlay setting. Only available to
Sonos speakers, not the Connect, Amp, etc., and only available to
speakers that have a current Trueplay calibration.
:param trueplay: Enable or disable Trueplay.
:type trueplay: bool
:raises NotSupportedException: If the device does not support
Trueplay or doesn't have a current calibration.
:raises SoCoNotVisibleException: If the device is not visible.
"""
response = self.renderingControl.GetRoomCalibrationStatus([("InstanceID", 0)])
if response["RoomCalibrationAvailable"] == "0":
raise NotSupportedException
if not self.is_visible:
raise SoCoNotVisibleException
trueplay_value = "1" if trueplay else "0"
self.renderingControl.SetRoomCalibrationStatus(
[
("InstanceID", 0),
("RoomCalibrationEnabled", trueplay_value),
]
)
@property
def supports_fixed_volume(self):
"""bool: Whether the device supports fixed volume output."""
response = self.renderingControl.GetSupportsOutputFixed([("InstanceID", 0)])
return response["CurrentSupportsFixed"] == "1"
@property
def fixed_volume(self):
"""bool: The device's fixed volume output setting.
True if on, False if off. Only applicable to certain
Sonos devices (Connect and Port at the time of writing).
All other devices always return False.
Attempting to set this property for a non-applicable
device will raise a `NotSupportedException`.
"""
response = self.renderingControl.GetOutputFixed([("InstanceID", 0)])
return response["CurrentFixed"] == "1"
@fixed_volume.setter
def fixed_volume(self, fixed_volume):
"""Switch on/off the device's fixed volume output setting.
Only applicable to certain Sonos devices.
:param fixed_volume: Enable or disable fixed volume output mode.
:type fixed_volume: bool
:raises NotSupportedException: If the device does not support
fixed volume output mode.
"""
try:
self.renderingControl.SetOutputFixed(
[
("InstanceID", 0),
("DesiredFixed", "1" if fixed_volume else "0"),
]
)
except SoCoUPnPException as error:
raise NotSupportedException from error
def _parse_zone_group_state(self):
"""The Zone Group State contains a lot of useful information.
Retrieve and parse it, and populate the relevant properties.
"""
# zoneGroupTopology.GetZoneGroupState()['ZoneGroupState'] returns XML like
# this:
#
# <ZoneGroups>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXXX1400:0">
# <ZoneGroupMember
# BootSeq="33"
# Configuration="1"
# Icon="x-rincon-roomicon:zoneextender"
# Invisible="1"
# IsZoneBridge="1"
# Location="http://192.168.1.100:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000ZZZ1400"
# ZoneName="BRIDGE"/>
# </ZoneGroup>
# <ZoneGroup Coordinator="RINCON_000XXX1400" ID="RINCON_000XXX1400:46">
# <ZoneGroupMember
# BootSeq="44"
# Configuration="1"
# Icon="x-rincon-roomicon:living"
# Location="http://192.168.1.101:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000XXX1400"
# ZoneName="Living Room"/>
# <ZoneGroupMember
# BootSeq="52"
# Configuration="1"
# Icon="x-rincon-roomicon:kitchen"
# Location="http://192.168.1.102:1400/xml/device_description.xml"
# MinCompatibleVersion="22.0-00000"
# SoftwareVersion="24.1-74200"
# UUID="RINCON_000YYY1400"
# ZoneName="Kitchen"/>
# </ZoneGroup>
# </ZoneGroups>
#
def parse_zone_group_member(member_element):
"""Parse a ZoneGroupMember or Satellite element from Zone Group
State, create a SoCo instance for the member, set basic attributes
and return it."""
# Create a SoCo instance for each member. Because SoCo
# instances are singletons, this is cheap if they have already
# been created, and useful if they haven't. We can then
# update various properties for that instance.
member_attribs = member_element.attrib
ip_addr = member_attribs["Location"].split("//")[1].split(":")[0]
zone = config.SOCO_CLASS(ip_addr)
# share our cache
zone._zgs_cache = self._zgs_cache
# uid doesn't change, but it's not harmful to (re)set it, in case
# the zone is as yet unseen.
zone._uid = member_attribs["UUID"]
zone._player_name = member_attribs["ZoneName"]
# add the zone to the set of all members, and to the set
# of visible members if appropriate
is_visible = member_attribs.get("Invisible") != "1"
if is_visible:
self._visible_zones.add(zone)
self._all_zones.add(zone)
return zone
# This is called quite frequently, so it is worth optimising it.
# Maintain a private cache. If the zgt has not changed, there is no
# need to repeat all the XML parsing. In addition, switch on network
# caching for a short interval (5 secs).
zgs = self.zoneGroupTopology.GetZoneGroupState(cache=self._zgs_cache)[
"ZoneGroupState"
]
if zgs == self._zgs_result:
return
self._zgs_result = zgs
tree = XML.fromstring(zgs.encode("utf-8"))
# Empty the set of all zone_groups
self._groups.clear()
# and the set of all members
self._all_zones.clear()
self._visible_zones.clear()
# Loop over each ZoneGroup Element
for group_element in tree.find("ZoneGroups").findall("ZoneGroup"):
coordinator_uid = group_element.attrib["Coordinator"]
group_uid = group_element.attrib["ID"]
group_coordinator = None
members = set()
for member_element in group_element.findall("ZoneGroupMember"):
zone = parse_zone_group_member(member_element)
# Perform extra processing relevant to direct zone group
# members
#
# If this element has the same UUID as the coordinator, it is
# the coordinator
if zone._uid == coordinator_uid:
group_coordinator = zone
zone._is_coordinator = True
else:
zone._is_coordinator = False
# is_bridge doesn't change, but it does no real harm to
# set/reset it here, just in case the zone has not been seen
# before
zone._is_bridge = member_element.attrib.get("IsZoneBridge") == "1"
# add the zone to the members for this group
members.add(zone)
# Loop over Satellite elements if present, and process as for
# ZoneGroup elements
for satellite_element in member_element.findall("Satellite"):
zone = parse_zone_group_member(satellite_element)
# Assume a satellite can't be a bridge or coordinator, so
# no need to check.
#
# Add the zone to the members for this group.
members.add(zone)
# Now create a ZoneGroup with this info and add it to the list
# of groups
self._groups.add(ZoneGroup(group_uid, group_coordinator, members))
@property
def all_groups(self):
"""set of :class:`soco.groups.ZoneGroup`: All available groups."""
self._parse_zone_group_state()
return self._groups.copy()
@property
def group(self):
""":class:`soco.groups.ZoneGroup`: The Zone Group of which this device
is a member.
None if this zone is a slave in a stereo pair.
"""
for group in self.all_groups:
if self in group:
return group
return None
# To get the group directly from the network, try the code below
# though it is probably slower than that above
# current_group_id = self.zoneGroupTopology.GetZoneGroupAttributes()[
# 'CurrentZoneGroupID']
# if current_group_id:
# for group in self.all_groups:
# if group.uid == current_group_id:
# return group
# else:
# return None
@property
def all_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All available zones."""
self._parse_zone_group_state()
return self._all_zones.copy()
@property
def visible_zones(self):
"""set of :class:`soco.groups.ZoneGroup`: All visible zones."""
self._parse_zone_group_state()
return self._visible_zones.copy()
def partymode(self):
"""Put all the speakers in the network in the same group, a.k.a Party
Mode.
This blog shows the initial research responsible for this:
http://blog.travelmarx.com/2010/06/exploring-sonos-via-upnp.html
The trick seems to be (only tested on a two-speaker setup) to tell each
speaker which to join. There's probably a bit more to it if multiple
groups have been defined.
"""
# Tell every other visible zone to join this one
# pylint: disable = expression-not-assigned
[zone.join(self) for zone in self.visible_zones if zone is not self]
def join(self, master):
"""Join this speaker to another "master" speaker."""
self.avTransport.SetAVTransportURI(
[
("InstanceID", 0),
("CurrentURI", "x-rincon:{}".format(master.uid)),
("CurrentURIMetaData", ""),
]
)
self._zgs_cache.clear()
def unjoin(self):
"""Remove this speaker from a group.
Seems to work ok even if you remove what was previously the group
master from it's own group. If the speaker was not in a group also
returns ok.
"""
self.avTransport.BecomeCoordinatorOfStandaloneGroup([("InstanceID", 0)])
self._zgs_cache.clear()
def create_stereo_pair(self, rh_slave_speaker):
"""Create a stereo pair.
This speaker becomes the master, left-hand speaker of the stereo
pair. The ``rh_slave_speaker`` becomes the right-hand speaker.
Note that this operation will succeed on dissimilar speakers, unlike
when using the official Sonos apps.
Args:
rh_slave_speaker (SoCo): The speaker that will be added as
the right-hand, slave speaker of the stereo pair.
Raises:
SoCoUPnPException: if either speaker is already part of a
stereo pair.
"""
# The pairing operation must be applied to the speaker that will
# become the master (the left-hand speaker of the pair).
# Note that if either speaker is part of a group, the call will
# succeed.
param = self.uid + ":LF,LF;" + rh_slave_speaker.uid + ":RF,RF"
self.deviceProperties.AddBondedZones([("ChannelMapSet", param)])
def separate_stereo_pair(self):
"""Separate a stereo pair.
This can be called on either the master (left-hand) speaker, or on the
slave (right-hand) speaker, to create two independent zones.
Raises:
SoCoUPnPException: if the speaker is not a member of a stereo pair.
"""
self.deviceProperties.RemoveBondedZones(
[("ChannelMapSet", ""), ("KeepGrouped", "0")]
)
def switch_to_line_in(self, source=None):
"""Switch the speaker's input to line-in.
Args:
source (SoCo): The speaker whose line-in should be played.
Default is line-in from the speaker itself.
"""
if source:
uid = source.uid
else:
uid = self.uid
self.avTransport.SetAVTransportURI(
[
("InstanceID", 0),
("CurrentURI", "x-rincon-stream:{}".format(uid)),
("CurrentURIMetaData", ""),
]
)
@property
def is_playing_radio(self):
"""bool: Is the speaker playing radio?"""
return self.music_source == MUSIC_SRC_RADIO
@property
def is_playing_line_in(self):
"""bool: Is the speaker playing line-in?"""
return self.music_source == MUSIC_SRC_LINE_IN
@property
def is_playing_tv(self):
"""bool: Is the playbar speaker input from TV?"""
return self.music_source == MUSIC_SRC_TV
@staticmethod
def music_source_from_uri(uri):
"""Determine a music source from a URI.
Arguments:
uri (str) : The URI representing the music source
Returns:
str: The current source of music.
Possible return values are:
* ``'NONE'`` -- speaker has no music to play.
* ``'LIBRARY'`` -- speaker is playing queued titles from the music
library.
* ``'RADIO'`` -- speaker is playing radio.
* ``'WEB_FILE'`` -- speaker is playing a music file via http/https.
* ``'LINE_IN'`` -- speaker is playing music from line-in.
* ``'TV'`` -- speaker is playing input from TV.
* ``'AIRPLAY'`` -- speaker is playing from AirPlay.
* ``'UNKNOWN'`` -- any other input.
The strings above can be imported as ``MUSIC_SRC_LIBRARY``,
``MUSIC_SRC_RADIO``, etc.
"""
for regex, source in SOURCES.items():
if re.match(regex, uri) is not None:
return source
return MUSIC_SRC_UNKNOWN
@property
def music_source(self):
"""str: The current music source (radio, TV, line-in, etc.).
Possible return values are the same as used in `music_source_from_uri()`.
"""
response = self.avTransport.GetPositionInfo(
[("InstanceID", 0), ("Channel", "Master")]
)
return self.music_source_from_uri(response["TrackURI"])
def switch_to_tv(self):
"""Switch the playbar speaker's input to TV."""
self.avTransport.SetAVTransportURI(
[
("InstanceID", 0),
("CurrentURI", "x-sonos-htastream:{}:spdif".format(self.uid)),
("CurrentURIMetaData", ""),
]
)
@property
def status_light(self):
"""bool: The white Sonos status light between the mute button and the
volume up button on the speaker.
True if on, otherwise False.
"""
result = self.deviceProperties.GetLEDState()
LEDState = result["CurrentLEDState"] # pylint: disable=invalid-name
return LEDState == "On"
@status_light.setter
def status_light(self, led_on):
"""Switch on/off the speaker's status light."""
led_state = "On" if led_on else "Off"
self.deviceProperties.SetLEDState(
[
("DesiredLEDState", led_state),
]
)
@property
def buttons_enabled(self):
"""bool: Whether the control buttons on the device are enabled.
`True` if the control buttons are enabled, `False` if disabled.
This property can only be set on visible speakers, and will enable
or disable the buttons for all speakers in any bonded set (e.g., a
stereo pair). Attempting to set it on invisible speakers
(e.g., the RH speaker of a stereo pair) will raise a
`SoCoNotVisibleException`.
"""
lock_state = self.deviceProperties.GetButtonLockState()[
"CurrentButtonLockState"
]
return lock_state == "Off"
@buttons_enabled.setter
def buttons_enabled(self, enabled):
"""Enable or disable the device's control buttons.
Args:
bool: True to enable the buttons, False to disable.
Raises:
SoCoNotVisibleException: If the speaker is not visible.
"""
if not self.is_visible:
raise SoCoNotVisibleException
lock_state = "Off" if enabled else "On"
self.deviceProperties.SetButtonLockState(
[
("DesiredButtonLockState", lock_state),
]
)
def get_current_track_info(self):
"""Get information about the currently playing track.
Returns:
dict: A dictionary containing information about the currently
playing track: playlist_position, duration, title, artist, album,
position and an album_art link.
If we're unable to return data for a field, we'll return an empty
string. This can happen for all kinds of reasons so be sure to check
values. For example, a track may not have complete metadata and be
missing an album name. In this case track['album'] will be an empty
string.
.. note:: Calling this method on a slave in a group will not
return the track the group is playing, but the last track
this speaker was playing.
"""
response = self.avTransport.GetPositionInfo(
[("InstanceID", 0), ("Channel", "Master")]
)
track = {
"title": "",
"artist": "",
"album": "",
"album_art": "",
"position": "",
}
track["playlist_position"] = response["Track"]
track["duration"] = response["TrackDuration"]
track["uri"] = response["TrackURI"]
track["position"] = response["RelTime"]
metadata = response["TrackMetaData"]
# Store the entire Metadata entry in the track, this can then be
# used if needed by the client to restart a given URI
track["metadata"] = metadata
def _parse_radio_metadata(metadata):
"""Try to parse trackinfo from radio metadata."""
radio_track = {}
trackinfo = (
metadata.findtext(
".//{urn:schemas-rinconnetworks-com:" "metadata-1-0/}streamContent"
)
or ""
)
index = trackinfo.find(" - ")
if index > -1:
radio_track["artist"] = trackinfo[:index]
radio_track["title"] = trackinfo[index + 3 :]
elif "TYPE=SNG|" in trackinfo:
# Examples from services:
# Apple Music radio:
# "TYPE=SNG|TITLE Couleurs|ARTIST M83|ALBUM Saturdays = Youth"
# SiriusXM:
# "BR P|TYPE=SNG|TITLE 7.15.17 LA|ARTIST Eagles|ALBUM "
tags = dict([p.split(" ", 1) for p in trackinfo.split("|") if " " in p])
if tags.get("TITLE"):
radio_track["title"] = tags["TITLE"]
if tags.get("ARTIST"):
radio_track["artist"] = tags["ARTIST"]
if tags.get("ALBUM"):
radio_track["album"] = tags["ALBUM"]
else:
# Might find some kind of title anyway in metadata
radio_track["title"] = metadata.findtext(
".//{http://purl.org/dc/" "elements/1.1/}title"
)
if not radio_track["title"]:
radio_track["title"] = trackinfo
return radio_track
# Duration seems to be '0:00:00' when listening to radio
if metadata != "" and track["duration"] == "0:00:00":
metadata = XML.fromstring(really_utf8(metadata))
track.update(_parse_radio_metadata(metadata))
# If the speaker is playing from the line-in source, querying for track
# metadata will return "NOT_IMPLEMENTED".
elif metadata not in ("", "NOT_IMPLEMENTED", None):
# Track metadata is returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(".//{http://purl.org/dc/elements/1.1/}title")
md_artist = metadata.findtext(
".//{http://purl.org/dc/elements/1.1/}creator"
)
md_album = metadata.findtext(
".//{urn:schemas-upnp-org:metadata-1-0/upnp/}album"
)
track["title"] = ""
if md_title:
track["title"] = md_title
track["artist"] = ""
if md_artist:
track["artist"] = md_artist
track["album"] = ""
if md_album:
track["album"] = md_album
album_art_url = metadata.findtext(
".//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI"
)
if album_art_url is not None:
track["album_art"] = self.music_library.build_album_art_full_uri(
album_art_url
)
return track
def get_current_media_info(self):
"""Get information about the currently playing media.
Returns:
dict: A dictionary containing information about the currently
playing media: uri, channel.
If we're unable to return data for a field, we'll return an empty
string.
"""
response = self.avTransport.GetMediaInfo([("InstanceID", 0)])
media = {"uri": "", "channel": ""}
media["uri"] = response["CurrentURI"]
metadata = response.get("CurrentURIMetaData")
if metadata:
metadata = XML.fromstring(really_utf8(metadata))
md_title = metadata.findtext(".//{http://purl.org/dc/elements/1.1/}title")
if md_title:
media["channel"] = md_title
return media
def get_speaker_info(self, refresh=False, timeout=None):
"""Get information about the Sonos speaker.
Arguments:
refresh(bool): Refresh the speaker info cache.
timeout: How long to wait for the server to send
data before giving up, as a float, or a
``(connect timeout, read timeout)`` tuple
e.g. (3, 5). Default is no timeout.
Returns:
dict: Information about the Sonos speaker, such as the UID,
MAC Address, and Zone Name.
"""
if self.speaker_info and refresh is False:
return self.speaker_info
else:
response = requests.get(
"http://" + self.ip_address + ":1400/xml/device_description.xml",
timeout=timeout,
)
dom = XML.fromstring(response.content)
device = dom.find("{urn:schemas-upnp-org:device-1-0}device")
if device is not None:
self.speaker_info["zone_name"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}roomName"
)
# no zone icon in device_description.xml -> player icon
self.speaker_info["player_icon"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}iconList/"
"{urn:schemas-upnp-org:device-1-0}icon/"
"{urn:schemas-upnp-org:device-1-0}url"
)
self.speaker_info["uid"] = self.uid
self.speaker_info["serial_number"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}serialNum"
)
self.speaker_info["software_version"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}softwareVersion"
)
self.speaker_info["hardware_version"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}hardwareVersion"
)
self.speaker_info["model_number"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}modelNumber"
)
self.speaker_info["model_name"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}modelName"
)
self.speaker_info["display_version"] = device.findtext(
"{urn:schemas-upnp-org:device-1-0}displayVersion"
)
# no mac address - extract from serial number
mac = self.speaker_info["serial_number"].split(":")[0]
self.speaker_info["mac_address"] = mac
return self.speaker_info
return None
def get_current_transport_info(self):
"""Get the current playback state.
Returns:
dict: The following information about the
speaker's playing state:
* current_transport_state (``PLAYING``, ``TRANSITIONING``,
``PAUSED_PLAYBACK``, ``STOPPED``)
* current_transport_status (OK, ?)
* current_speed(1, ?)
This allows us to know if speaker is playing or not. Don't know other
states of CurrentTransportStatus and CurrentSpeed.
"""
response = self.avTransport.GetTransportInfo(
[
("InstanceID", 0),
]
)
playstate = {
"current_transport_status": "",
"current_transport_state": "",
"current_transport_speed": "",
}
playstate["current_transport_state"] = response["CurrentTransportState"]
playstate["current_transport_status"] = response["CurrentTransportStatus"]
playstate["current_transport_speed"] = response["CurrentSpeed"]
return playstate
@property
@only_on_master
def available_actions(self):
"""The transport actions that are currently available on the
speaker.
:returns: list: A list of strings representing the available actions, such as
['Set', 'Stop', 'Play'].
Possible list items are: 'Set', 'Stop', 'Pause', 'Play',
'Next', 'Previous', 'SeekTime', 'SeekTrackNr'.
"""
result = self.avTransport.GetCurrentTransportActions([("InstanceID", 0)])
actions = result["Actions"]
# The actions might look like 'X_DLNA_SeekTime', but we only want the
# last part
return [action.split("_")[-1] for action in actions.split(", ")]
def get_queue(self, start=0, max_items=100, full_album_art_uri=False):
"""Get information about the queue.
:param start: Starting number of returned matches
:param max_items: Maximum number of returned matches
:param full_album_art_uri: If the album art URI should include the
IP address
:returns: A :py:class:`~.soco.data_structures.Queue` object
This method is heavily based on Sam Soffes (aka soffes) ruby
implementation
"""
queue = []
response = self.contentDirectory.Browse(
[
("ObjectID", "Q:0"),
("BrowseFlag", "BrowseDirectChildren"),
("Filter", "*"),
("StartingIndex", start),
("RequestedCount", max_items),
("SortCriteria", ""),
]
)
result = response["Result"]
metadata = {}
for tag in ["NumberReturned", "TotalMatches", "UpdateID"]:
metadata[camel_to_underscore(tag)] = int(response[tag])
# I'm not sure this necessary (any more). Even with an empty queue,
# there is still a result object. This shoud be investigated.
if not result:
# pylint: disable=star-args
return Queue(queue, **metadata)
items = from_didl_string(result)
for item in items:
# Check if the album art URI should be fully qualified
if full_album_art_uri:
self.music_library._update_album_art_to_full_uri(item)
queue.append(item)
# pylint: disable=star-args
return Queue(queue, **metadata)
@property
def queue_size(self):
"""int: Size of the queue."""
response = self.contentDirectory.Browse(
[
("ObjectID", "Q:0"),
("BrowseFlag", "BrowseMetadata"),
("Filter", "*"),
("StartingIndex", 0),
("RequestedCount", 1),
("SortCriteria", ""),
]
)
dom = XML.fromstring(really_utf8(response["Result"]))
queue_size = None
container = dom.find("{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container")
if container is not None:
child_count = container.get("childCount")
if child_count is not None:
queue_size = int(child_count)
return queue_size
def get_sonos_playlists(self, *args, **kwargs):
"""Convenience method for calling
``soco.music_library.get_music_library_information('sonos_playlists')``
Refer to the docstring for that method: `get_music_library_information`
"""
args = tuple(["sonos_playlists"] + list(args))
return self.music_library.get_music_library_information(*args, **kwargs)
@only_on_master
def add_uri_to_queue(self, uri, position=0, as_next=False):
"""Add the URI to the queue.
For arguments and return value see `add_to_queue`.
"""
# FIXME: The res.protocol_info should probably represent the mime type
# etc of the uri. But this seems OK.
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
item = DidlObject(resources=res, title="", parent_id="", item_id="")
return self.add_to_queue(item, position, as_next)
@only_on_master
def add_to_queue(self, queueable_item, position=0, as_next=False):
"""Add a queueable item to the queue.
Args:
queueable_item (DidlObject or MusicServiceItem): The item to be
added to the queue
position (int): The index (1-based) at which the URI should be
added. Default is 0 (add URI at the end of the queue).
as_next (bool): Whether this URI should be played as the next
track in shuffle mode. This only works if ``play_mode=SHUFFLE``.
Returns:
int: The index of the new item in the queue.
"""
metadata = to_didl_string(queueable_item)
response = self.avTransport.AddURIToQueue(
[
("InstanceID", 0),
("EnqueuedURI", queueable_item.resources[0].uri),
("EnqueuedURIMetaData", metadata),
("DesiredFirstTrackNumberEnqueued", position),
("EnqueueAsNext", int(as_next)),
]
)
qnumber = response["FirstTrackNumberEnqueued"]
return int(qnumber)
def add_multiple_to_queue(self, items, container=None):
"""Add a sequence of items to the queue.
Args:
items (list): A sequence of items to the be added to the queue
container (DidlObject, optional): A container object which
includes the items.
"""
if container is not None:
container_uri = container.resources[0].uri
container_metadata = to_didl_string(container)
else:
container_uri = "" # Sonos seems to accept this as well
container_metadata = "" # pylint: disable=redefined-variable-type
chunk_size = 16 # With each request, we can only add 16 items
item_list = list(items) # List for slicing
for index in range(0, len(item_list), chunk_size):
chunk = item_list[index : index + chunk_size]
uris = " ".join([item.resources[0].uri for item in chunk])
uri_metadata = " ".join([to_didl_string(item) for item in chunk])
self.avTransport.AddMultipleURIsToQueue(
[
("InstanceID", 0),
("UpdateID", 0),
("NumberOfURIs", len(chunk)),
("EnqueuedURIs", uris),
("EnqueuedURIsMetaData", uri_metadata),
("ContainerURI", container_uri),
("ContainerMetaData", container_metadata),
("DesiredFirstTrackNumberEnqueued", 0),
("EnqueueAsNext", 0),
]
)
@only_on_master
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = "0"
objid = "Q:0/" + str(index + 1)
self.avTransport.RemoveTrackFromQueue(
[
("InstanceID", 0),
("ObjectID", objid),
("UpdateID", updid),
]
)
@only_on_master
def clear_queue(self):
"""Remove all tracks from the queue."""
self.avTransport.RemoveAllTracksFromQueue(
[
("InstanceID", 0),
]
)
@deprecated("0.13", "soco.music_library.get_favorite_radio_shows", "0.15", True)
def get_favorite_radio_shows(self, start=0, max_items=100):
"""Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with ``'title'`` and ``'uri'``
keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (``max_items``), if it is, use ``start`` to page through and
get the entire list of favorites.
"""
message = (
"The output type of this method will probably change in "
"the future to use SoCo data structures"
)
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
@deprecated("0.13", "soco.music_library.get_favorite_radio_stations", "0.15", True)
def get_favorite_radio_stations(self, start=0, max_items=100):
"""Get favorite radio stations from Sonos' Radio app.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = (
"The output type of this method will probably change in "
"the future to use SoCo data structures"
)
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_STATIONS, start, max_items)
@deprecated("0.13", "soco.music_library.get_sonos_favorites", "0.15", True)
def get_sonos_favorites(self, start=0, max_items=100):
"""Get Sonos favorites.
See :meth:`get_favorite_radio_shows` for return type and remarks.
"""
message = (
"The output type of this method will probably change in "
"the future to use SoCo data structures"
)
warnings.warn(message, stacklevel=2)
return self.__get_favorites(SONOS_FAVORITES, start, max_items)
def __get_favorites(self, favorite_type, start=0, max_items=100):
"""Helper method for `get_favorite_radio_*` methods.
Args:
favorite_type (str): Specify either `RADIO_STATIONS` or
`RADIO_SHOWS`.
start (int): Which number to start the retrieval from. Used for
paging.
max_items (int): The total number of results to return.
"""
if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS):
favorite_type = SONOS_FAVORITES
response = self.contentDirectory.Browse(
[
(
"ObjectID",
"FV:2"
if favorite_type is SONOS_FAVORITES
else "R:0/{}".format(favorite_type),
),
("BrowseFlag", "BrowseDirectChildren"),
("Filter", "*"),
("StartingIndex", start),
("RequestedCount", max_items),
("SortCriteria", ""),
]
)
result = {}
favorites = []
results_xml = response["Result"]
if results_xml != "":
# Favorites are returned in DIDL-Lite format
metadata = XML.fromstring(really_utf8(results_xml))
for item in metadata.findall(
"{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container"
if favorite_type == RADIO_SHOWS
else "{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item"
):
favorite = {}
favorite["title"] = item.findtext(
"{http://purl.org/dc/elements/1.1/}title"
)
favorite["uri"] = item.findtext(
"{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res"
)
if favorite_type == SONOS_FAVORITES:
favorite["meta"] = item.findtext(
"{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD"
)
favorites.append(favorite)
result["total"] = response["TotalMatches"]
result["returned"] = len(favorites)
result["favorites"] = favorites
return result
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue(
[
("InstanceID", 0),
("Title", title),
("EnqueuedURI", ""),
("EnqueuedURIMetaData", ""),
]
)
item_id = response["AssignedObjectID"]
obj_id = item_id.split(":", 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id="SQ:", item_id=item_id
)
@only_on_master
# pylint: disable=invalid-name
def create_sonos_playlist_from_queue(self, title):
"""Create a new Sonos playlist from the current queue.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
# Note: probably same as Queue service method SaveAsSonosPlaylist
# but this has not been tested. This method is what the
# controller uses.
response = self.avTransport.SaveQueue(
[("InstanceID", 0), ("Title", title), ("ObjectID", "")]
)
item_id = response["AssignedObjectID"]
obj_id = item_id.split(":", 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id="SQ:", item_id=item_id
)
@only_on_master
def remove_sonos_playlist(self, sonos_playlist):
"""Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
"""
object_id = getattr(sonos_playlist, "item_id", sonos_playlist)
return self.contentDirectory.DestroyObject([("ObjectID", object_id)])
def add_item_to_sonos_playlist(self, queueable_item, sonos_playlist):
"""Adds a queueable item to a Sonos' playlist.
Args:
queueable_item (DidlObject): the item to add to the Sonos' playlist
sonos_playlist (DidlPlaylistContainer): the Sonos' playlist to
which the item should be added
"""
# Get the update_id for the playlist
response, _ = self.music_library._music_lib_search(sonos_playlist.item_id, 0, 1)
update_id = response["UpdateID"]
# Form the metadata for queueable_item
metadata = to_didl_string(queueable_item)
# Make the request
self.avTransport.AddURIToSavedQueue(
[
("InstanceID", 0),
("UpdateID", update_id),
("ObjectID", sonos_playlist.item_id),
("EnqueuedURI", queueable_item.resources[0].uri),
("EnqueuedURIMetaData", metadata),
# 2 ** 32 - 1 = 4294967295, this field has always this value. Most
# likely, playlist positions are represented as a 32 bit uint and
# this is therefore the largest index possible. Asking to add at
# this index therefore probably amounts to adding it "at the end"
("AddAtIndex", 4294967295),
]
)
@only_on_master
def set_sleep_timer(self, sleep_time_seconds):
"""Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
"""
# Note: A value of None for sleep_time_seconds is valid, and needs to
# be preserved distinctly separate from 0. 0 means go to sleep now,
# which will immediately start the sound tappering, and could be a
# useful feature, while None means cancel the current timer
try:
if sleep_time_seconds is None:
sleep_time = ""
else:
sleep_time = format(datetime.timedelta(seconds=int(sleep_time_seconds)))
self.avTransport.ConfigureSleepTimer(
[
("InstanceID", 0),
("NewSleepTimerDuration", sleep_time),
]
)
except SoCoUPnPException as err:
if "Error 402 received" in str(err):
raise ValueError(
"invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None"
) from err
raise
except ValueError as error:
raise ValueError(
"invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None"
) from error
@only_on_master
def get_sleep_timer(self):
"""Retrieves remaining sleep time, if any
Returns:
int or NoneType: Number of seconds left in timer. If there is no
sleep timer currently set it will return None.
"""
resp = self.avTransport.GetRemainingSleepTimerDuration(
[
("InstanceID", 0),
]
)
if resp["RemainingSleepTimerDuration"]:
times = resp["RemainingSleepTimerDuration"].split(":")
return int(times[0]) * 3600 + int(times[1]) * 60 + int(times[2])
else:
return None
@only_on_master
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos, update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, "item_id", sonos_playlist)
if isinstance(tracks, str):
track_list = [
tracks,
]
position_list = [
new_pos,
]
elif isinstance(tracks, int):
track_list = [
tracks,
]
if new_pos is None:
new_pos = ""
position_list = [
new_pos,
]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else "" for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response["UpdateID"]
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue(
[
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
]
)
change += int(response["QueueLengthChange"])
update_id = int(response["NewUpdateID"])
length = int(response["NewQueueLength"])
response = {"change": change, "update_id": update_id, "length": length}
return response
@only_on_master
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr("item_id", sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ",".join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(
sonos_playlist, tracks=tracks, new_pos="", update_id=update_id
)
else:
return {"change": 0, "update_id": update_id, "length": count}
@only_on_master
def move_in_sonos_playlist(self, sonos_playlist, track, new_pos, update_id=0):
"""Move a track to a new position within a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.move_in_sonos_playlist(sonos_playlist, track=0, new_pos=1)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): **0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
new_pos (int): **0**-based location to move the track.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(
sonos_playlist, int(track), int(new_pos), update_id
)
@only_on_master
def remove_from_sonos_playlist(self, sonos_playlist, track, update_id=0):
"""Remove a track from a Sonos Playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.remove_from_sonos_playlist(sonos_playlist, track=0)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
track (int): *0**-based position of the track to move. The first
track is track 0, just like indexing into a Python list.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
return self.reorder_sonos_playlist(sonos_playlist, int(track), None, update_id)
@only_on_master
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{}" for value "{}"'.format(attr_name, match))
def get_battery_info(self, timeout=3.0):
"""Get battery information for a Sonos speaker.
Obtains battery information for Sonos speakers that report it. This only
applies to Sonos Move speakers at the time of writing.
This method may only work on Sonos 'S2' systems.
Args:
timeout (float, optional): The timeout to use when making the
HTTP request.
Returns:
dict: A `dict` containing battery status data.
Example return value::
{'Health': 'GREEN',
'Level': 100,
'Temperature': 'NORMAL',
'PowerSource': 'SONOS_CHARGING_RING'}
Raises:
NotSupportedException: If the speaker does not report battery
information.
ConnectionError: If the HTTP connection failed, or returned an
unsuccessful status code.
TimeoutError: If making the HTTP connection, or reading the
response, timed out.
"""
# Retrieve information from the speaker's status URL
try:
response = requests.get(
"http://" + self.ip_address + ":1400/status/batterystatus",
timeout=timeout,
)
except (ConnectTimeout, ReadTimeout) as error:
raise TimeoutError from error
except RequestsConnectionError as error:
raise ConnectionError from error
if response.status_code != 200:
raise ConnectionError
# Convert the XML response and traverse to obtain the battery information
battery_info = {}
try:
zp_info = xmltodict.parse(response.text)["ZPSupportInfo"]
for info_item in zp_info["LocalBatteryStatus"]["Data"]:
battery_info[info_item["@name"]] = info_item["#text"]
try:
battery_info["Level"] = int(battery_info["Level"])
except (KeyError, ValueError):
pass
except (KeyError, ExpatError, TypeError) as error:
# Battery information not supported
raise NotSupportedException from error
return battery_info
# definition section
RADIO_STATIONS = 0
RADIO_SHOWS = 1
SONOS_FAVORITES = 2
NS = {
"dc": "{http://purl.org/dc/elements/1.1/}",
"upnp": "{urn:schemas-upnp-org:metadata-1-0/upnp/}",
"": "{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}",
}
# Valid play modes and their meanings as (shuffle, repeat) tuples
PLAY_MODES = {
"NORMAL": (False, False),
"SHUFFLE_NOREPEAT": (True, False),
"SHUFFLE": (True, True),
"REPEAT_ALL": (False, True),
"SHUFFLE_REPEAT_ONE": (True, "ONE"),
"REPEAT_ONE": (False, "ONE"),
}
# Inverse mapping of PLAY_MODES
PLAY_MODE_BY_MEANING = {meaning: mode for mode, meaning in PLAY_MODES.items()}
# Music source names
MUSIC_SRC_LIBRARY = "LIBRARY"
MUSIC_SRC_RADIO = "RADIO"
MUSIC_SRC_WEB_FILE = "WEB_FILE"
MUSIC_SRC_LINE_IN = "LINE_IN"
MUSIC_SRC_TV = "TV"
MUSIC_SRC_AIRPLAY = "AIRPLAY"
MUSIC_SRC_UNKNOWN = "UNKNOWN"
MUSIC_SRC_NONE = "NONE"
# URI prefixes for music sources
SOURCES = {
r"^$": MUSIC_SRC_NONE,
r"^x-file-cifs:": MUSIC_SRC_LIBRARY,
r"^x-rincon-mp3radio:": MUSIC_SRC_RADIO,
r"^x-sonosapi-stream:": MUSIC_SRC_RADIO,
r"^x-sonosapi-radio:": MUSIC_SRC_RADIO,
r"^x-sonosapi-hls:": MUSIC_SRC_RADIO,
r"^aac:": MUSIC_SRC_RADIO,
r"^hls-radio:": MUSIC_SRC_RADIO,
r"^https?:": MUSIC_SRC_WEB_FILE,
r"^x-rincon-stream:": MUSIC_SRC_LINE_IN,
r"^x-sonos-htastream:": MUSIC_SRC_TV,
r"^x-sonos-vli:.*,airplay:": MUSIC_SRC_AIRPLAY,
}
# Soundbar product names
SOUNDBARS = ("playbase", "playbar", "beam", "sonos amp", "arc", "arc sl")
if config.SOCO_CLASS is None:
config.SOCO_CLASS = SoCo
|
'''Wheels support.'''
from distutils.util import get_platform
import email
import itertools
import os
import re
import zipfile
from pkg_resources import Distribution, PathMetadata, parse_version
from pkg_resources.extern.six import PY3
from setuptools import Distribution as SetuptoolsDistribution
from setuptools import pep425tags
from setuptools.command.egg_info import write_requirements
WHEEL_NAME = re.compile(
r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
)\.whl$""",
re.VERBOSE).match
class Wheel(object):
def __init__(self, filename):
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError('invalid wheel name: %r' % filename)
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
'''List tags (py_version, abi, platform) supported by this wheel.'''
return itertools.product(self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'))
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = pep425tags.get_supported()
return next((True for t in self.tags() if t in supported_tags), False)
def egg_name(self):
return Distribution(
project_name=self.project_name, version=self.version,
platform=(None if self.platform == 'any' else get_platform()),
).egg_name() + '.egg'
def install_as_egg(self, destination_eggdir):
'''Install wheel as an egg directory.'''
with zipfile.ZipFile(self.filename) as zf:
dist_basename = '%s-%s' % (self.project_name, self.version)
dist_info = '%s.dist-info' % dist_basename
dist_data = '%s.data' % dist_basename
def get_metadata(name):
with zf.open('%s/%s' % (dist_info, name)) as fp:
value = fp.read().decode('utf-8') if PY3 else fp.read()
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
dist_metadata = get_metadata('METADATA')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
if not parse_version('1.0') <= wheel_version < parse_version('2.0dev0'):
raise ValueError('unsupported wheel format version: %s' % wheel_version)
# Extract to target directory.
os.mkdir(destination_eggdir)
zf.extractall(destination_eggdir)
# Convert metadata.
dist_info = os.path.join(destination_eggdir, dist_info)
dist = Distribution.from_location(
destination_eggdir, dist_info,
metadata=PathMetadata(destination_eggdir, dist_info)
)
# Note: we need to evaluate and strip markers now,
# as we can't easily convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req.marker = None
return str(req)
install_requires = list(sorted(map(raw_req, dist.requires())))
extras_require = {
extra: list(sorted(
req
for req in map(raw_req, dist.requires((extra,)))
if req not in install_requires
))
for extra in dist.extras
}
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
os.rename(dist_info, egg_info)
os.rename(os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'))
setup_dist = SetuptoolsDistribution(attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
))
write_requirements(setup_dist.get_command_obj('egg_info'),
None, os.path.join(egg_info, 'requires.txt'))
# Move data entries to their correct location.
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(destination_eggdir,
'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry))
os.rmdir(dist_data_scripts)
for subdir in filter(os.path.exists, (
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
)):
for entry in os.listdir(subdir):
os.rename(os.path.join(subdir, entry),
os.path.join(destination_eggdir, entry))
os.rmdir(subdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
|
#
# Generated with MetoceanFatigueAnalysisBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.condition import ConditionBlueprint
class MetoceanFatigueAnalysisBlueprint(ConditionBlueprint):
""""""
def __init__(self, name="MetoceanFatigueAnalysis", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("changeNumber","integer","",default=0))
self.attributes.append(BlueprintAttribute("resultContainer","sima/sima/ResultContainer","",True))
self.attributes.append(BlueprintAttribute("metoceanCondition","sima/sima/Condition","",False))
self.attributes.append(BlueprintAttribute("analysisCondition","sima/sima/Condition","",False))
|
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from feedbackproj.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
|
#!/usr/bin/env python
import boto3
import sys
import argparse
import ast
import urllib2
from subprocess import call
import time
from datetime import datetime
import shlex
def sqs_get_msg(qname):
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=qname)
client = boto3.client('sqs')
message = client.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=1, WaitTimeSeconds=20)
if message.get('Messages'):
m = message.get('Messages')[0]
body = ast.literal_eval(m['Body'])
receipt_handle = m['ReceiptHandle']
else:
body = {'LifecycleTransition': False}
receipt_handle = ""
return body, receipt_handle
def sqs_delete_msg(qname, receipt_handle):
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=qname)
client = boto3.client('sqs')
response = client.delete_message(QueueUrl=queue.url, ReceiptHandle=receipt_handle)
def get_ec2instanceid():
try:
response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/instance-id')
except:
sys.exit("%s I am not running in EC2. Aborting!!" % datetime.now().strftime('%H:%M:%S %D'))
instanceid = response.read()
return instanceid
def main():
parser = argparse.ArgumentParser(description='SQS Lifecycle hook consumer and trigger')
parser.add_argument('-q', '--queue', required=True,
help="Queue resource.")
parser.add_argument('-s', '--state', action='store', choices=['LAUNCHING','TERMINATING'], required=True,
help='Indicates if the consumer is waiting for LAUNCHING or TERMINATING state')
parser.add_argument('-g', '--group', required=True,
help='Auto Scaling Group Name')
parser.add_argument('-H', '--hookName', required=True,
help='Life Cycle Hook Name')
parser.add_argument('-e', '--execute', required=True,
help="The filepath of the triggered script")
parser.add_argument('-w', '--wait', default=60, type=int,
help="Time between query loops in seconds (default: 60)")
arg = parser.parse_args()
if arg.state == "LAUNCHING":
state = "autoscaling:EC2_INSTANCE_LAUNCHING"
elif arg.state == "TERMINATING":
state = "autoscaling:EC2_INSTANCE_TERMINATING"
cmd_args = shlex.split(arg.execute)
print ("%s Getting EC2 instance ID") % datetime.now().strftime('%H:%M:%S %D')
ec2instanceid = get_ec2instanceid()
print ("%s Listening for %s SQS messages using long polling") % (datetime.now().strftime('%H:%M:%S %D'), ec2instanceid)
while 1:
sqs_msg, sqs_receipt_handle = sqs_get_msg(arg.queue)
if sqs_msg['LifecycleTransition'] == "autoscaling:TEST_NOTIFICATION":
print ("%s Tests message consumed") % datetime.now().strftime('%H:%M:%S %D')
elif sqs_msg['LifecycleTransition'] == False:
print ("%s There are no messages in the queue. Sleeping and trying again") % datetime.now().strftime('%H:%M:%S %D')
elif (sqs_msg['LifecycleTransition'] == state) and (sqs_msg['EC2InstanceId'] == ec2instanceid):
sqs_delete_msg(arg.queue, sqs_receipt_handle)
print "%s %s hook message received" % (datetime.now().strftime('%H:%M:%S %D'), arg.state)
print "%s Executing filepath" % datetime.now().strftime('%H:%M:%S %D')
call(cmd_args)
print "%s Completing lifecyle action" % datetime.now().strftime('%H:%M:%S %D')
as_client = boto3.client('autoscaling')
response = as_client.complete_lifecycle_action(
LifecycleHookName=arg.hookName,
AutoScalingGroupName=arg.group,
LifecycleActionToken=sqs_msg['LifecycleActionToken'],
LifecycleActionResult='CONTINUE',
InstanceId=ec2instanceid
)
time.sleep(arg.wait)
if __name__ == '__main__':
sys.exit(main())
|
import argparse
import json
import os
import _jsonnet
import tqdm
from seq2struct import datasets
from seq2struct import models
from seq2struct.utils import registry
from seq2struct.utils import vocab
class Preprocessor:
def __init__(self, config):
self.config = config
self.model_preproc = registry.instantiate(
registry.lookup('model', config['model']).Preproc,
config['model'])
def preprocess(self):
self.model_preproc.clear_items()
for section in self.config['data']:
# if section=="train":
# continue
data = registry.construct('dataset', self.config['data'][section])
for item in tqdm.tqdm(data, desc=section, dynamic_ncols=True):
if True:
to_add, validation_info = self.model_preproc.validate_item(item, section)
if to_add:
self.model_preproc.add_item(item, section, validation_info)
else:
print("======== Error parsing: {}".format(" ".join(item.text)))
self.model_preproc.save()
def add_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
args = parser.parse_args()
return args
def main(args):
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
preprocessor = Preprocessor(config)
preprocessor.preprocess()
if __name__ == '__main__':
args = add_parser()
main(args)
|
import torch.nn as nn
import torch
import numpy as np
def compute_flops(module, inp, out):
if isinstance(module, nn.Conv2d):
return compute_Conv2d_flops(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_flops(module, inp, out)
elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):
return compute_Pool2d_flops(module, inp, out)
elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):
return compute_ReLU_flops(module, inp, out)
elif isinstance(module, nn.Upsample):
return compute_Upsample_flops(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_flops(module, inp, out)
# elif "loss" in module:
# pass
else:
print(f"[Flops]: {type(module).__name__} is not supported!")
return 0
pass
def compute_Conv2d_flops(module, inp, out):
# Can have multiple inputs, getting the first one
assert isinstance(module, nn.Conv2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
batch_size = inp.size()[0]
in_c = inp.size()[1]
k_h, k_w = module.kernel_size
out_c, out_h, out_w = out.size()[1:]
groups = module.groups
filters_per_channel = out_c // groups
conv_per_position_flops = k_h * k_w * in_c * filters_per_channel
active_elements_count = batch_size * out_h * out_w
total_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if module.bias is not None:
bias_flops = out_c * active_elements_count
total_flops = total_conv_flops + bias_flops
return total_flops
def compute_BatchNorm2d_flops(module, inp, out):
assert isinstance(module, nn.BatchNorm2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
in_c, in_h, in_w = inp.size()[1:]
batch_flops = np.prod(inp.shape)
if module.affine:
batch_flops *= 2
return batch_flops
def compute_ReLU_flops(module, inp, out):
assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU))
batch_size = inp.size()[0]
active_elements_count = batch_size
for s in inp.size()[1:]:
active_elements_count *= s
return active_elements_count
def compute_Pool2d_flops(module, inp, out):
assert isinstance(module, nn.MaxPool2d) or isinstance(module, nn.AvgPool2d)
assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())
return np.prod(inp.shape)
def compute_Linear_flops(module, inp, out):
assert isinstance(module, nn.Linear)
assert len(inp.size()) == 2 and len(out.size()) == 2
batch_size = inp.size()[0]
return batch_size * inp.size()[1] * out.size()[1]
def compute_Upsample_flops(module, inp, out):
assert isinstance(module, nn.Upsample)
output_size = out[0]
batch_size = inp.size()[0]
output_elements_count = batch_size
for s in output_size.shape[1:]:
output_elements_count *= s
return output_elements_count
|
# module solution.py
#
# Copyright (c) 2018 Rafael Reis
#
"""
solution module - Implements Solution, a class that describes a solution for the problem.
"""
__version__ = "1.0"
import copy
import sys
from random import shuffle
import numpy as np
def random(pctsp, start_size):
s = Solution(pctsp)
length = len(pctsp.prize)
# Modification: start from start_size but increase after maximum number of iterations in case no feasible solution
# is found. When the full length is used, there should always be a feasible solution
for size in range(start_size, length + 1):
if size: s.size = size
i = 0
min_solutions = 30
max_solutions = 1000
while i < min_solutions or (i < max_solutions and not s.is_valid()):
r = Solution(pctsp)
if size: r.size = size
cities = list(range(1, length, 1))
shuffle(cities) # Shuffle in place
r.route = [0] + cities # The city 0 is always the first
if r.quality < s.quality and r.is_valid():
s = r
i += 1
if s.is_valid():
break
assert s.is_valid()
return s
class Solution(object):
"""
Attributes:
route (:obj:`list` of :obj:`int`): The list of cities in the visiting order
size (:obj:`int`): The quantity of the first cities to be considered in the route list
quality (:obj:`int`): The quality of the solution
"""
def __init__(self, pctsp, size=None):
self._route = []
if size:
self.size = size
else:
self.size = len(
pctsp.prize) # Default size value is the total of cities
self.quality = sys.maxsize
self.pctsp = pctsp
self.prize = 0
"""
Computes the quality of the solution.
"""
def compute(self):
self.prize = 0
self.quality = 0
for i, city in enumerate(self._route):
if i < self.size:
self.prize += self.pctsp.prize[city]
if i > 0:
previousCity = self._route[i - 1]
self.quality += self.pctsp.cost[previousCity][city]
if i + 1 == self.size:
self.quality += self.pctsp.cost[city][0]
else:
self.quality += self.pctsp.penal[city]
def copy(self):
cp = copy.copy(self)
cp._route = list(self._route)
return cp
def swap(self, i, j):
city_i = self._route[i]
city_i_prev = self._route[i - 1]
city_i_next = self._route[(i + 1) % self.size]
city_j = self._route[j]
self.quality = (self.quality - self.pctsp.cost[city_i_prev][city_i] -
self.pctsp.cost[city_i][city_i_next] +
self.pctsp.cost[city_i_prev][city_j] +
self.pctsp.cost[city_j][city_i_next] -
self.pctsp.penal[city_j] + self.pctsp.penal[city_i])
self.prize = self.prize - self.pctsp.prize[city_i] + self.pctsp.prize[
city_j]
self._route[j], self._route[i] = self._route[i], self._route[j]
def is_valid(self):
return self.prize >= self.pctsp.prize_min
def add_city(self):
city_l = self._route[self.size - 1]
city_add = self._route[self.size]
self.quality = (self.quality - self.pctsp.cost[city_l][0] -
self.pctsp.penal[city_add] +
self.pctsp.cost[city_l][city_add] +
self.pctsp.cost[city_add][0])
self.size += 1
self.prize += self.pctsp.prize[city_add]
def remove_city(self, index):
city_rem = self._route[index]
city_rem_prev = self._route[index - 1]
city_rem_next = self._route[(index + 1) % self.size]
self.quality = (self.quality -
self.pctsp.cost[city_rem_prev][city_rem] -
self.pctsp.cost[city_rem][city_rem_next] +
self.pctsp.penal[city_rem] +
self.pctsp.cost[city_rem_prev][city_rem_next])
self.prize -= self.pctsp.prize[city_rem]
del self._route[index]
self._route.append(city_rem)
self.size -= 1
def remove_cities(self, quant):
for i in range(self.size - quant, self.size):
city_rem = self._route[i]
city_rem_prev = self._route[i - 1]
self.quality = (self.quality -
self.pctsp.cost[city_rem_prev][city_rem] +
self.pctsp.penal[city_rem])
self.prize -= self.pctsp.prize[city_rem]
city_rem = self._route[self.size - 1]
city_l = self._route[self.size - quant - 1]
self.quality = (self.quality - self.pctsp.cost[city_rem][0] +
self.pctsp.cost[city_l][0])
self.size -= quant
def print_route(self):
print(self._route)
@property
def route(self):
return self._route
@route.setter
def route(self, r):
self._route = r
self.compute()
|
import numpy as np
from ._CFunctions import _CWithinTimeRange
from ._CTConv import _CTConv
def WithinTimeRange(Timet,Time0,Time1,BoolOut=False):
'''
Performs a simple check on a test time (Timet) to see if it exists
between Time0 and time1.
Inputs
======
Timet : tuple | float
Test time - either a single floating point (array or
scalar) to denote hours of the day, or a tuple containing
(Date,Time).
Time0 : tuple | float
Start time, same format as above.
Time1 : tuple | float
End time, same format as above.
BoolOut : boolean
True by default, returns a boolean array with the same size as
Timet, where eath element in the range Time0 to Time1 is true.
When False, returns a list of indices within the time range.
Output
======
out : bool | int
If BoolOut == True boolean (array or scalar), True if within
time range.
When BoolOut == False, an integer array of indices is returned.
'''
sh = np.shape(Timet)
s0 = np.size(Time0)
s1 = np.size(Time1)
if s0 == 2:
D0 = Time0[0]
T0 = Time0[1]
else:
T0 = Time0
D0 = 20000101
if s1 == 2:
D1 = Time1[0]
T1 = Time1[1]
else:
T1 = Time1
D1 = 20000101
if sh[0] == 2 and np.size(sh) == 2:
#hopefully this is a list of date and time
D = np.array([Timet[0]]).flatten()
T = np.array([Timet[1]]).flatten()
else:
T = np.array(Timet)
D = np.zeros(T.size,dtype='int32') + 20000101
#convert the dtypes for compatibility with the C++ code
_n = _CTConv(np.size(D),'c_int')
_Date = _CTConv(D,'c_int_ptr')
_ut = _CTConv(T,'c_float_ptr')
_Date0 = _CTConv(D0,'c_int')
_ut0 = _CTConv(T0,'c_float')
_Date1 = _CTConv(D1,'c_int')
_ut1 = _CTConv(T1,'c_float')
_ni = np.zeros(1,dtype='int32')
_ind = np.zeros(_n,dtype='int32')
#call the C++ code
_CWithinTimeRange(_n,_Date,_ut,_Date0,_ut0,_Date1,_ut1,_ni,_ind)
#reduce the side of the index array
_ind = _ind[:_ni[0]]
#either return the indices or the boolean array
if BoolOut:
out = np.zeros(_n,dtype='bool8')
out[_ind] = True
return out
else:
return _ind
|
from hangul_romanize import Transliter
from hangul_romanize.rule import academic
class Word:
"""
Object representation of a word record that can update the success_rating of that record.
"""
_romanize = Transliter(academic).translit
def __init__(self, _id: int, english: str, korean: str, score: int):
self._id = _id
self._english = english
self._korean = korean
self._romanized = self._romanize(korean)
self._score = score
def __repr__(self) -> str:
return f"<Word(English: '{self._english}', Korean:'{self._korean}')>"
def get_id(self):
return self._id
def get_english(self):
return self._english
def get_korean(self):
return self._korean
def get_romanized(self):
return self._romanized
def get_score(self):
return self._score
def update_eng_to_kor(self, correct: bool) -> None:
self._score += -2 if correct else 1
if self._score < 10:
self._score = 0
def update_kor_to_eng(self, correct: bool) -> None:
if not correct:
self._score += 2
elif 10 < self._score:
self._score -= 1
if self._score < 10:
self._score = 0
def update_record(self):
from database.access import Access
Access.update_word_record(self)
|
import numpy as np
import scipy.special as sp
from termcolor import colored
import sys
if sys.platform == 'linux':
sys.path.append(r'../lib')
else:
sys.path.append(r'../build/x64/Release')
import NumCpp
####################################################################################
NUM_DECIMALS_ROUND = 1
####################################################################################
def doTest():
print(colored('Testing Integration Module', 'magenta'))
print(colored('Testing gauss_legendre', 'cyan'))
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_gauss_legendre(polyC, a, b), NUM_DECIMALS_ROUND)
if area == areaC:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
print(colored('Testing romberg', 'cyan'))
PERCENT_LEEWAY = 0.1
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_romberg(polyC, a, b), NUM_DECIMALS_ROUND)
# romberg is much less acurate so let's give it some leeway
areaLow, areaHigh = np.sort([area * (1 - PERCENT_LEEWAY), area * (1 + PERCENT_LEEWAY)])
if areaLow < areaC < areaHigh:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
print(colored('Testing simpson', 'cyan'))
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_simpson(polyC, a, b), NUM_DECIMALS_ROUND)
if area == areaC:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
print(colored('Testing trapazoidal', 'cyan'))
numCoefficients = np.random.randint(2, 5, [1, ]).item()
coefficients = np.random.randint(-20, 20, [numCoefficients, ])
coefficientsC = NumCpp.NdArray(1, numCoefficients)
coefficientsC.setArray(coefficients)
poly = np.poly1d(np.flipud(coefficients), False)
polyIntegral = poly.integ()
polyC = NumCpp.Poly1d(coefficientsC, False)
a, b = np.sort(np.random.rand(2) * 100 - 50)
area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)
areaC = np.round(NumCpp.integrate_trapazoidal(polyC, a, b), NUM_DECIMALS_ROUND)
if area == areaC:
print(colored('\tPASS', 'green'))
else:
print(area)
print(areaC)
print(colored('\tFAIL', 'red'))
####################################################################################
if __name__ == '__main__':
doTest()
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy.conf import settings
import os
import random
class RandomUserAgentMiddleware(object):
def process_request(self, request, spider):
ua = random.choice(settings.get('USER_AGENT_LIST'))
if ua:
request.headers.setdefault('User-Agent', ua)
|
'''OpenGL extension AMD.framebuffer_multisample_advanced
This module customises the behaviour of the
OpenGL.raw.GL.AMD.framebuffer_multisample_advanced to provide a more
Python-friendly API
Overview (from the spec)
This extension extends ARB_framebuffer_object by allowing compromises
between image quality and memory footprint of multisample
antialiasing.
ARB_framebuffer_object introduced RenderbufferStorageMultisample
as a method of defining the parameters for a multisample render
buffer. This function takes a <samples> parameter that has strict
requirements on behavior such that no compromises in the final image
quality are allowed. Additionally, ARB_framebuffer_object requires
that all framebuffer attachments have the same number of samples.
This extension extends ARB_framebuffer_object by providing a new
function, RenderbufferStorageMultisampleAdvancedAMD, that
distinguishes between samples and storage samples for color
renderbuffers where the number of storage samples can be less than
the number of samples. This extension also allows non-matching sample
counts between color and depth/stencil renderbuffers.
This extension does not require any specific combination of sample
counts to be supported.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/AMD/framebuffer_multisample_advanced.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.framebuffer_multisample_advanced import *
from OpenGL.raw.GL.AMD.framebuffer_multisample_advanced import _EXTENSION_NAME
def glInitFramebufferMultisampleAdvancedAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.