text
stringlengths 2
999k
|
|---|
import time
from collections import Counter, defaultdict
import warnings; warnings.filterwarnings('ignore')
import glob
import re
import ast
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from algorithms import ShapeletTransformer
from extractors.extractor import MultiGeneticExtractor
from data.load_all_datasets import load_data_train_test
from sklearn.metrics import accuracy_score, log_loss
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from tslearn.shapelets import ShapeletModel
def parse_shapelets(shapelets):
shapelets = shapelets.replace(']', '],')[:-2]
shapelets = re.sub(r'\s+', ', ', shapelets)
shapelets = re.sub(r',+', ',', shapelets)
shapelets = shapelets.replace('],[', '], [')
shapelets = shapelets.replace('[,', '[')
shapelets = '[' + shapelets + ']'
shapelets = re.sub(r',\s+]', ']', shapelets)
return ast.literal_eval(shapelets)
def fit_rf(X_distances_train, y_train, X_distances_test, y_test, out_path):
rf = GridSearchCV(RandomForestClassifier(), {'n_estimators': [10, 25, 50, 100, 500], 'max_depth': [None, 3, 7, 15]})
rf.fit(X_distances_train, y_train)
hard_preds = rf.predict(X_distances_test)
proba_preds = rf.predict_proba(X_distances_test)
print("[RF] Accuracy = {}".format(accuracy_score(y_test, hard_preds)))
print("[RF] Logloss = {}".format(log_loss(y_test, proba_preds)))
hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])
proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])
hard_preds.to_csv(out_path.split('.')[0]+'_rf_hard.csv')
proba_preds.to_csv(out_path.split('.')[0]+'_rf_proba.csv')
def fit_lr(X_distances_train, y_train, X_distances_test, y_test, out_path):
lr = GridSearchCV(LogisticRegression(), {'penalty': ['l1', 'l2'], 'C': [0.001, 0.01, 0.1, 1.0, 10.0]})
lr.fit(X_distances_train, y_train)
hard_preds = lr.predict(X_distances_test)
proba_preds = lr.predict_proba(X_distances_test)
print("[LR] Accuracy = {}".format(accuracy_score(y_test, hard_preds)))
print("[LR] Logloss = {}".format(log_loss(y_test, proba_preds)))
hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])
proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])
hard_preds.to_csv(out_path.split('.')[0]+'_lr_hard.csv')
proba_preds.to_csv(out_path.split('.')[0]+'_lr_proba.csv')
def fit_svm(X_distances_train, y_train, X_distances_test, y_test, out_path):
svc = GridSearchCV(SVC(kernel='linear', probability=True), {'C': [0.001, 0.01, 0.1, 1.0, 10.0]})
svc.fit(X_distances_train, y_train)
hard_preds = svc.predict(X_distances_test)
proba_preds = svc.predict_proba(X_distances_test)
print("[SVM] Accuracy = {}".format(accuracy_score(y_test, hard_preds)))
print("[SVM] Logloss = {}".format(log_loss(y_test, proba_preds)))
hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])
proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])
hard_preds.to_csv(out_path.split('.')[0]+'_svm_hard.csv')
proba_preds.to_csv(out_path.split('.')[0]+'_svm_proba.csv')
def fit_lts(X_train, y_train, X_test, y_test, shap_dict, reg, max_it, shap_out_path, pred_out_path, timing_out_path):
# Fit LTS model, print metrics on test-set, write away predictions and shapelets
clf = ShapeletModel(n_shapelets_per_size=shap_dict,
max_iter=max_it, verbose_level=0, batch_size=1,
optimizer='sgd', weight_regularizer=reg)
start = time.time()
clf.fit(
np.reshape(
X_train,
(X_train.shape[0], X_train.shape[1], 1)
),
y_train
)
learning_time = time.time() - start
print('Learning shapelets took {}s'.format(learning_time))
with open(shap_out_path, 'w+') as ofp:
for shap in clf.shapelets_:
ofp.write(str(np.reshape(shap, (-1))) + '\n')
with open(timing_out_path, 'w+') as ofp:
ofp.write(str(learning_time))
X_distances_train = clf.transform(X_train)
X_distances_test = clf.transform(X_test)
print('Max distance value = {}'.format(np.max(X_distances_train)))
fit_rf(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)
fit_lr(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)
fit_svm(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)
hyper_parameters_lts = {
'Adiac': [0.3, 0.2, 3, 0.01, 10000],
'Beef': [0.15, 0.125, 3, 0.01, 10000],
'BeetleFly': [0.15, 0.125, 1, 0.01, 5000],
'BirdChicken': [0.3, 0.075, 1, 0.1, 10000],
'ChlorineConcentration': [0.3, 0.2, 3, 0.01, 10000],
'Coffee': [0.05, 0.075, 2, 0.01, 5000],
'DiatomSizeReduction': [0.3, 0.175, 2, 0.01, 10000],
'ECGFiveDays': [0.05, 0.125, 2, 0.01, 10000],
'FaceFour': [0.3, 0.175, 3, 1.0, 5000],
'GunPoint': [0.15, 0.2, 3, 0.1, 10000],
'ItalyPowerDemand': [0.3, 0.2, 3, 0.01, 5000],
'Lightning7': [0.05, 0.075, 3, 1, 5000],
'MedicalImages': [0.3, 0.2, 2, 1, 10000],
'MoteStrain': [0.3, 0.2, 3, 1, 10000],
#NOT AVAILABLE#'Otoliths': [0.15, 0.125, 3, 0.01, 2000],
'SonyAIBORobotSurface1': [0.3, 0.125, 2, 0.01, 10000],
'SonyAIBORobotSurface2': [0.3, 0.125, 2, 0.01, 10000],
'Symbols': [0.05, 0.175, 1, 0.1, 5000],
'SyntheticControl': [0.15, 0.125, 3, 0.01, 5000],
'Trace': [0.15, 0.125, 2, 0.1, 10000],
'TwoLeadECG': [0.3, 0.075, 1, 0.1, 10000]
}
datasets = [
'Adiac',
'Beef',
'BeetleFly',
'BirdChicken',
'ChlorineConcentration',
'Coffee',
'ECGFiveDays',
'FaceFour',
'GunPoint',
'ItalyPowerDemand',
'Lightning7',
'MedicalImages',
'MoteStrain',
'SonyAIBORobotSurface1',
'SonyAIBORobotSurface2',
'Symbols',
'SyntheticControl',
'Trace',
'TwoLeadECG',
'DiatomSizeReduction'
]
learning_sizes = defaultdict(list)
genetic_sizes = defaultdict(list)
metadata = sorted(load_data_train_test(), key=lambda x: x['train']['n_samples']**2*x['train']['n_features']**3)
for dataset in metadata:
train_df = pd.read_csv(dataset['train']['data_path'])
test_df = pd.read_csv(dataset['test']['data_path'])
X_train = train_df.drop('target', axis=1).values
y_train = train_df['target']
X_test = test_df.drop('target', axis=1).values
y_test = test_df['target']
map_dict = {}
for j, c in enumerate(np.unique(y_train)):
map_dict[c] = j
y_train = y_train.map(map_dict)
y_test = y_test.map(map_dict)
y_train = y_train.values
y_test = y_test.values
nr_shap, l, r, reg, max_it = hyper_parameters_lts[dataset['train']['name']]
files = glob.glob('results/lts_vs_genetic/{}_genetic_shapelets*.txt'.format(dataset['train']['name']))
if len(files):
sizes = []
for f in files:
shaps = parse_shapelets(open(f, 'r').read())
genetic_sizes[dataset['train']['name']].append(len(shaps))
for s in shaps:
sizes.append(len(s))
shap_dict_cntr = Counter(np.random.choice(sizes, size=int(np.mean(genetic_sizes[dataset['train']['name']]))))
shap_dict = {}
for c in shap_dict_cntr:
shap_dict[int(c)] = int(shap_dict_cntr[c])
fit_lts(X_train, y_train, X_test, y_test, dict(shap_dict), reg, max_it,
'results/lts_smaller/{}_learned_shapelets_{}.txt'.format(dataset['train']['name'], int(time.time())),
'results/lts_smaller/{}_learned_shapelets_predictions_{}.csv'.format(dataset['train']['name'], int(time.time())),
'results/lts_smaller/{}_learned_runtime_{}.csv'.format(dataset['train']['name'], int(time.time()))
)
|
class SolutionTLE:
def shortestDistance(self, grid: List[List[int]]) -> int:
buildings = []
rows, cols = len(grid), len(grid[0])
for row in range(rows):
for col in range(cols):
if grid[row][col] == 1:
buildings.append((row, col))
def bfs(start):
row, col = start
visited = set()
queue = deque([(row, col, 0)])
distance = {}
while queue:
curr_row, curr_col, steps = queue.popleft()
for offset_row, offset_col in [(0, 1), (1, 0), (0, -1), (-1, 0)]:
next_row, next_col = curr_row + offset_row, curr_col + offset_col
if next_row < 0 or next_row >= rows \
or next_col < 0 or next_col >= cols:
continue
if grid[next_row][next_col] == 0:
if (next_row, next_col) not in visited:
visited.add((next_row, next_col))
distance[(next_row, next_col)] = steps + 1
queue.append((next_row, next_col, steps + 1))
return distance
total_distance = {}
for start in buildings:
distances = bfs(start)
for land, min_distance in distances.items():
if land not in total_distance:
total_distance[land] = (0, 0)
curr_count, curr_distance = total_distance[land]
total_distance[land] = (curr_count + 1, curr_distance + min_distance)
total_buildings = len(buildings)
min_distance_sum = float('inf')
for count, min_distance in total_distance.values():
if count == total_buildings:
min_distance_sum = min(min_distance_sum, min_distance)
return min_distance_sum if min_distance_sum != float('inf') else -1
class SolutionArray:
def shortestDistance(self, grid: List[List[int]]) -> int:
buildings = []
rows, cols = len(grid), len(grid[0])
for row in range(rows):
for col in range(cols):
if grid[row][col] == 1:
buildings.append((row, col))
def bfs(start):
row, col = start
visited = [[False]*cols for _ in range(rows)]
queue = deque([(row, col, 0)])
distance = {}
while queue:
curr_row, curr_col, steps = queue.popleft()
for offset_row, offset_col in [(0, 1), (1, 0), (0, -1), (-1, 0)]:
next_row, next_col = curr_row + offset_row, curr_col + offset_col
if next_row < 0 or next_row >= rows \
or next_col < 0 or next_col >= cols:
continue
if grid[next_row][next_col] == 0:
if not visited[next_row][next_col]:
visited[next_row][next_col] = True
distance[(next_row, next_col)] = steps + 1
queue.append((next_row, next_col, steps + 1))
return distance
total_distance = {}
for start in buildings:
distances = bfs(start)
for land, min_distance in distances.items():
if land not in total_distance:
total_distance[land] = (0, 0)
curr_count, curr_distance = total_distance[land]
total_distance[land] = (curr_count + 1, curr_distance + min_distance)
total_buildings = len(buildings)
min_distance_sum = float('inf')
for count, min_distance in total_distance.values():
if count == total_buildings:
min_distance_sum = min(min_distance_sum, min_distance)
return min_distance_sum if min_distance_sum != float('inf') else -1
|
from torch.distributed.rpc import RRef
from hearthstone.simulator.agent import AnnotatingAgent, Annotation, DiscoverChoiceAction, StandardAction, \
RearrangeCardsAction, HeroChoiceAction
class RemoteAgent(AnnotatingAgent):
def __init__(self, remote_agent: RRef):
self.remote_agent = remote_agent
async def hero_choice_action(self, player: 'Player') -> HeroChoiceAction:
return self.remote_agent.rpc_sync().hero_choice_action(player)
async def annotated_rearrange_cards(self, player: 'Player') -> (RearrangeCardsAction, Annotation):
return self.remote_agent.rpc_sync().annotated_rearrange_cards(player)
async def annotated_buy_phase_action(self, player: 'Player') -> (StandardAction, Annotation):
return self.remote_agent.rpc_sync().annotated_buy_phase_action(player)
async def annotated_discover_choice_action(self, player: 'Player') -> (DiscoverChoiceAction, Annotation):
return self.remote_agent.rpc_sync().annotated_discover_choice_action(player)
async def game_over(self, player: 'Player', ranking: int) -> Annotation:
return self.remote_agent.rpc_sync().game_over(player, ranking)
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 100, ar_order = 0);
|
"""
Base class for modular abelian varieties
AUTHORS:
- William Stein (2007-03)
TESTS::
sage: A = J0(33)
sage: D = A.decomposition(); D
[
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33),
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
]
sage: loads(dumps(D)) == D
True
sage: loads(dumps(A)) == A
True
"""
###########################################################################
# Copyright (C) 2007 William Stein <wstein@gmail.com> #
# Distributed under the terms of the GNU General Public License (GPL) #
# http://www.gnu.org/licenses/ #
###########################################################################
from sage.categories.all import ModularAbelianVarieties
from sage.structure.sequence import Sequence, Sequence_generic
from sage.structure.parent_base import ParentWithBase
from morphism import HeckeOperator, Morphism, DegeneracyMap
from torsion_subgroup import RationalTorsionSubgroup, QQbarTorsionSubgroup
from finite_subgroup import (FiniteSubgroup_lattice, FiniteSubgroup, TorsionPoint)
from cuspidal_subgroup import CuspidalSubgroup, RationalCuspidalSubgroup, RationalCuspSubgroup
from sage.rings.all import (ZZ, QQ, QQbar, LCM,
divisors, Integer, prime_range)
from sage.rings.ring import is_Ring
from sage.modules.free_module import is_FreeModule
from sage.modular.arithgroup.all import is_CongruenceSubgroup, is_Gamma0, is_Gamma1, is_GammaH
from sage.modular.modsym.all import ModularSymbols
from sage.modular.modsym.space import ModularSymbolsSpace
from sage.matrix.all import matrix, block_diagonal_matrix, identity_matrix
from sage.modules.all import vector
from sage.groups.all import AbelianGroup
from sage.databases.cremona import cremona_letter_code
from sage.misc.all import prod
from copy import copy
import homology
import homspace
import lseries
def is_ModularAbelianVariety(x):
"""
Return True if x is a modular abelian variety.
INPUT:
- ``x`` - object
EXAMPLES::
sage: from sage.modular.abvar.abvar import is_ModularAbelianVariety
sage: is_ModularAbelianVariety(5)
False
sage: is_ModularAbelianVariety(J0(37))
True
Returning True is a statement about the data type not whether or
not some abelian variety is modular::
sage: is_ModularAbelianVariety(EllipticCurve('37a'))
False
"""
return isinstance(x, ModularAbelianVariety_abstract)
class ModularAbelianVariety_abstract(ParentWithBase):
def __init__(self, groups, base_field, is_simple=None, newform_level=None,
isogeny_number=None, number=None, check=True):
"""
Abstract base class for modular abelian varieties.
INPUT:
- ``groups`` - a tuple of congruence subgroups
- ``base_field`` - a field
- ``is_simple`` - bool; whether or not self is
simple
- ``newform_level`` - if self is isogenous to a
newform abelian variety, returns the level of that abelian variety
- ``isogeny_number`` - which isogeny class the
corresponding newform is in; this corresponds to the Cremona letter
code
- ``number`` - the t number of the degeneracy map that
this abelian variety is the image under
- ``check`` - whether to do some type checking on the
defining data
EXAMPLES: One should not create an instance of this class, but we
do so anyways here as an example::
sage: A = sage.modular.abvar.abvar.ModularAbelianVariety_abstract((Gamma0(37),), QQ)
sage: type(A)
<class 'sage.modular.abvar.abvar.ModularAbelianVariety_abstract_with_category'>
All hell breaks loose if you try to do anything with `A`::
sage: A
<repr(<sage.modular.abvar.abvar.ModularAbelianVariety_abstract_with_category at 0x...>) failed: NotImplementedError: BUG -- lattice method must be defined in derived class>
All instances of this class are in the category of modular
abelian varieties::
sage: A.category()
Category of modular abelian varieties over Rational Field
sage: J0(23).category()
Category of modular abelian varieties over Rational Field
"""
if check:
if not isinstance(groups, tuple):
raise TypeError("groups must be a tuple")
for G in groups:
if not is_CongruenceSubgroup(G):
raise TypeError("each element of groups must be a congruence subgroup")
self.__groups = groups
if is_simple is not None:
self.__is_simple = is_simple
if newform_level is not None:
self.__newform_level = newform_level
if number is not None:
self.__degen_t = number
if isogeny_number is not None:
self.__isogeny_number = isogeny_number
if check and not is_Ring(base_field) and base_field.is_field():
raise TypeError("base_field must be a field")
ParentWithBase.__init__(self, base_field, category = ModularAbelianVarieties(base_field))
def groups(self):
r"""
Return an ordered tuple of the congruence subgroups that the
ambient product Jacobian is attached to.
Every modular abelian variety is a finite quotient of an abelian
subvariety of a product of modular Jacobians `J_\Gamma`.
This function returns a tuple containing the groups
`\Gamma`.
EXAMPLES::
sage: A = (J0(37) * J1(13))[0]; A
Simple abelian subvariety 13aG1(1,13) of dimension 2 of J0(37) x J1(13)
sage: A.groups()
(Congruence Subgroup Gamma0(37), Congruence Subgroup Gamma1(13))
"""
return self.__groups
#############################################################################
# lattice() *must* be defined by every derived class!!!!
def lattice(self):
"""
Return lattice in ambient cuspidal modular symbols product that
defines this modular abelian variety.
This must be defined in each derived class.
OUTPUT: a free module over `\ZZ`
EXAMPLES::
sage: A = sage.modular.abvar.abvar.ModularAbelianVariety_abstract((Gamma0(37),), QQ)
sage: A
<repr(<sage.modular.abvar.abvar.ModularAbelianVariety_abstract_with_category at 0x...>) failed: NotImplementedError: BUG -- lattice method must be defined in derived class>
"""
raise NotImplementedError("BUG -- lattice method must be defined in derived class")
#############################################################################
def free_module(self):
r"""
Synonym for ``self.lattice()``.
OUTPUT: a free module over `\ZZ`
EXAMPLES::
sage: J0(37).free_module()
Ambient free module of rank 4 over the principal ideal domain Integer Ring
sage: J0(37)[0].free_module()
Free module of degree 4 and rank 2 over Integer Ring
Echelon basis matrix:
[ 1 -1 1 0]
[ 0 0 2 -1]
"""
return self.lattice()
def vector_space(self):
r"""
Return vector space corresponding to the modular abelian variety.
This is the lattice tensored with `\QQ`.
EXAMPLES::
sage: J0(37).vector_space()
Vector space of dimension 4 over Rational Field
sage: J0(37)[0].vector_space()
Vector space of degree 4 and dimension 2 over Rational Field
Basis matrix:
[ 1 -1 0 1/2]
[ 0 0 1 -1/2]
"""
try:
return self.__vector_space
except AttributeError:
self.__vector_space = self.lattice().change_ring(QQ)
return self.__vector_space
def base_field(self):
r"""
Synonym for ``self.base_ring()``.
EXAMPLES::
sage: J0(11).base_field()
Rational Field
"""
return self.base_ring()
def base_extend(self, K):
"""
EXAMPLES::
sage: A = J0(37); A
Abelian variety J0(37) of dimension 2
sage: A.base_extend(QQbar)
Abelian variety J0(37) over Algebraic Field of dimension 2
sage: A.base_extend(GF(7))
Abelian variety J0(37) over Finite Field of size 7 of dimension 2
"""
N = self.__newform_level if hasattr(self, '__newform_level') else None
return ModularAbelianVariety(self.groups(), self.lattice(), K, newform_level=N)
def __contains__(self, x):
"""
Determine whether or not self contains x.
EXAMPLES::
sage: J = J0(67); G = (J[0] + J[1]).intersection(J[1] + J[2])
sage: G[0]
Finite subgroup with invariants [5, 10] over QQbar of Abelian subvariety of dimension 3 of J0(67)
sage: a = G[0].0; a
[(1/10, 1/10, 3/10, 1/2, 1, -2, -3, 33/10, 0, -1/2)]
sage: a in J[0]
False
sage: a in (J[0]+J[1])
True
sage: a in (J[1]+J[2])
True
sage: C = G[1] # abelian variety in kernel
sage: G[0].0
[(1/10, 1/10, 3/10, 1/2, 1, -2, -3, 33/10, 0, -1/2)]
sage: 5*G[0].0
[(1/2, 1/2, 3/2, 5/2, 5, -10, -15, 33/2, 0, -5/2)]
sage: 5*G[0].0 in C
True
"""
if not isinstance(x, TorsionPoint):
return False
if x.parent().abelian_variety().groups() != self.groups():
return False
v = x.element()
n = v.denominator()
nLambda = self.ambient_variety().lattice().scale(n)
return n*v in self.lattice() + nLambda
def __cmp__(self, other):
"""
Compare two modular abelian varieties.
If other is not a modular abelian variety, compares the types of
self and other. If other is a modular abelian variety, compares the
groups, then if those are the same, compares the newform level and
isogeny class number and degeneracy map numbers. If those are not
defined or matched up, compare the underlying lattices.
EXAMPLES::
sage: cmp(J0(37)[0], J0(37)[1])
-1
sage: cmp(J0(33)[0], J0(33)[1])
-1
sage: cmp(J0(37), 5) #random
1
"""
if not isinstance(other, ModularAbelianVariety_abstract):
return cmp(type(self), type(other))
if self is other:
return 0
c = cmp(self.groups(), other.groups())
if c: return c
try:
c = cmp(self.__newform_level, other.__newform_level)
if c: return c
except AttributeError:
pass
try:
c = cmp(self.__isogeny_number, other.__isogeny_number)
if c: return c
except AttributeError:
pass
try:
c = cmp(self.__degen_t, other.__degen_t)
if c: return c
except AttributeError:
pass
# NOTE!! having the same newform level, isogeny class number,
# and degen_t does not imply two abelian varieties are equal.
# See the docstring for self.label.
return cmp(self.lattice(), other.lattice())
def __radd__(self,other):
"""
Return other + self when other is 0. Otherwise raise a TypeError.
EXAMPLES::
sage: int(0) + J0(37)
Abelian variety J0(37) of dimension 2
"""
if other == 0:
return self
raise TypeError
def _repr_(self):
"""
Return string representation of this modular abelian variety.
This is just the generic base class, so it's unlikely to be called
in practice.
EXAMPLES::
sage: A = J0(23)
sage: import sage.modular.abvar.abvar as abvar
sage: abvar.ModularAbelianVariety_abstract._repr_(A)
'Abelian variety J0(23) of dimension 2'
::
sage: (J0(11) * J0(33))._repr_()
'Abelian variety J0(11) x J0(33) of dimension 4'
"""
field = '' if self.base_field() == QQ else ' over %s'%self.base_field()
#if self.newform_level(none_if_not_known=True) is None:
simple = self.is_simple(none_if_not_known=True)
if simple and self.dimension() > 0:
label = self.label() + ' '
else:
label = ''
simple = 'Simple a' if simple else 'A'
if self.is_ambient():
return '%sbelian variety %s%s of dimension %s'%(simple, self._ambient_repr(), field, self.dimension())
if self.is_subvariety_of_ambient_jacobian():
sub = 'subvariety'
else:
sub = 'variety factor'
return "%sbelian %s %sof dimension %s of %s%s"%(
simple, sub, label, self.dimension(), self._ambient_repr(), field)
def label(self):
r"""
Return the label associated to this modular abelian variety.
The format of the label is [level][isogeny class][group](t, ambient
level)
If this abelian variety `B` has the above label, this
implies only that `B` is isogenous to the newform abelian
variety `A_f` associated to the newform with label
[level][isogeny class][group]. The [group] is empty for
`\Gamma_0(N)`, is G1 for `\Gamma_1(N)` and is
GH[...] for `\Gamma_H(N)`.
.. warning::
The sum of `\delta_s(A_f)` for all `s\mid t`
contains `A`, but no sum for a proper divisor of
`t` contains `A`. It need *not* be the case
that `B` is equal to `\delta_t(A_f)`!!!
OUTPUT: string
EXAMPLES::
sage: J0(11).label()
'11a(1,11)'
sage: J0(11)[0].label()
'11a(1,11)'
sage: J0(33)[2].label()
'33a(1,33)'
sage: J0(22).label()
Traceback (most recent call last):
...
ValueError: self must be simple
We illustrate that self need not equal `\delta_t(A_f)`::
sage: J = J0(11); phi = J.degeneracy_map(33, 1) + J.degeneracy_map(33,3)
sage: B = phi.image(); B
Abelian subvariety of dimension 1 of J0(33)
sage: B.decomposition()
[
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33)
]
sage: C = J.degeneracy_map(33,3).image(); C
Abelian subvariety of dimension 1 of J0(33)
sage: C == B
False
"""
degen = str(self.degen_t()).replace(' ','')
return '%s%s'%(self.newform_label(), degen)
def newform_label(self):
"""
Return the label [level][isogeny class][group] of the newform
`f` such that this abelian variety is isogenous to the
newform abelian variety `A_f`. If this abelian variety is
not simple, raise a ValueError.
OUTPUT: string
EXAMPLES::
sage: J0(11).newform_label()
'11a'
sage: J0(33)[2].newform_label()
'33a'
The following fails since `J_0(33)` is not simple::
sage: J0(33).newform_label()
Traceback (most recent call last):
...
ValueError: self must be simple
"""
N, G = self.newform_level()
if is_Gamma0(G):
group = ''
elif is_Gamma1(G):
group = 'G1'
elif is_GammaH(G):
group = 'GH%s'%(str(G._generators_for_H()).replace(' ',''))
return '%s%s%s'%(N, cremona_letter_code(self.isogeny_number()), group)
def _isogeny_to_newform_abelian_variety(self):
r"""
Return an isogeny from self to an abelian variety `A_f`
attached to a newform. If self is not simple (so that no such
isogeny exists), raise a ValueError.
EXAMPLES::
sage: J0(22)[0]._isogeny_to_newform_abelian_variety()
Abelian variety morphism:
From: Simple abelian subvariety 11a(1,22) of dimension 1 of J0(22)
To: Newform abelian subvariety 11a of dimension 1 of J0(11)
sage: J = J0(11); phi = J.degeneracy_map(33, 1) + J.degeneracy_map(33,3)
sage: A = phi.image()
sage: A._isogeny_to_newform_abelian_variety().matrix()
[-3 3]
[ 0 -3]
"""
try:
return self._newform_isogeny
except AttributeError:
pass
if not self.is_simple():
raise ValueError("self is not simple")
ls = []
t, N = self.decomposition()[0].degen_t()
A = self.ambient_variety()
for i in range(len(self.groups())):
g = self.groups()[i]
if N == g.level():
J = g.modular_abelian_variety()
d = J.degeneracy_map(self.newform_level()[0], t)
p = A.project_to_factor(i)
mat = p.matrix() * d.matrix()
if not (self.lattice().matrix() * mat).is_zero():
break
from constructor import AbelianVariety
Af = AbelianVariety(self.newform_label())
H = A.Hom(Af.ambient_variety())
m = H(Morphism(H, mat))
self._newform_isogeny = m.restrict_domain(self).restrict_codomain(Af)
return self._newform_isogeny
def _simple_isogeny(self, other):
"""
Given self and other, if both are simple, and correspond to the
same newform with the same congruence subgroup, return an isogeny.
Otherwise, raise a ValueError.
INPUT:
- ``self, other`` - modular abelian varieties
OUTPUT: an isogeny
EXAMPLES::
sage: J = J0(33); J
Abelian variety J0(33) of dimension 3
sage: J[0]._simple_isogeny(J[1])
Abelian variety morphism:
From: Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
To: Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33)
The following illustrates how simple isogeny is only implemented
when the ambients are the same::
sage: J[0]._simple_isogeny(J1(11))
Traceback (most recent call last):
...
NotImplementedError: _simple_isogeny only implemented when both abelian variety have the same ambient product Jacobian
"""
if not is_ModularAbelianVariety(other):
raise TypeError("other must be a modular abelian variety")
if not self.is_simple():
raise ValueError("self is not simple")
if not other.is_simple():
raise ValueError("other is not simple")
if self.groups() != other.groups():
# The issue here is that the stuff below probably won't make any sense at all if we don't know
# that the two newform abelian varieties $A_f$ are identical.
raise NotImplementedError("_simple_isogeny only implemented when both abelian variety have the same ambient product Jacobian")
if (self.newform_level() != other.newform_level()) or \
(self.isogeny_number() != other.isogeny_number()):
raise ValueError("self and other do not correspond to the same newform")
return other._isogeny_to_newform_abelian_variety().complementary_isogeny() * \
self._isogeny_to_newform_abelian_variety()
def _Hom_(self, B, cat=None):
"""
INPUT:
- ``B`` - modular abelian varieties
- ``cat`` - category
EXAMPLES::
sage: J0(37)._Hom_(J1(37))
Space of homomorphisms from Abelian variety J0(37) of dimension 2 to Abelian variety J1(37) of dimension 40
sage: J0(37)._Hom_(J1(37)).homset_category()
Category of modular abelian varieties over Rational Field
"""
if cat is None:
K = self.base_field(); L = B.base_field()
if K == L:
F = K
elif K == QQbar or L == QQbar:
F = QQbar
else:
# TODO -- improve this
raise ValueError("please specify a category")
cat = ModularAbelianVarieties(F)
if self is B:
return self.endomorphism_ring(cat)
else:
return homspace.Homspace(self, B, cat)
def in_same_ambient_variety(self, other):
"""
Return True if self and other are abelian subvarieties of the same
ambient product Jacobian.
EXAMPLES::
sage: A,B,C = J0(33)
sage: A.in_same_ambient_variety(B)
True
sage: A.in_same_ambient_variety(J0(11))
False
"""
if not is_ModularAbelianVariety(other):
return False
if self.groups() != other.groups():
return False
if not self.is_subvariety_of_ambient_jacobian() or not other.is_subvariety_of_ambient_jacobian():
return False
return True
def modular_kernel(self):
"""
Return the modular kernel of this abelian variety, which is the
kernel of the canonical polarization of self.
EXAMPLES::
sage: A = AbelianVariety('33a'); A
Newform abelian subvariety 33a of dimension 1 of J0(33)
sage: A.modular_kernel()
Finite subgroup with invariants [3, 3] over QQ of Newform abelian subvariety 33a of dimension 1 of J0(33)
"""
try:
return self.__modular_kernel
except AttributeError:
_, f, _ = self.dual()
G = f.kernel()[0]
self.__modular_kernel = G
return G
def modular_degree(self):
"""
Return the modular degree of this abelian variety, which is the
square root of the degree of the modular kernel.
EXAMPLES::
sage: A = AbelianVariety('37a')
sage: A.modular_degree()
2
"""
n = self.modular_kernel().order()
return ZZ(n.sqrt())
def intersection(self, other):
"""
Returns the intersection of self and other inside a common ambient
Jacobian product.
INPUT:
- ``other`` - a modular abelian variety or a finite
group
OUTPUT: If other is a modular abelian variety:
- ``G`` - finite subgroup of self
- ``A`` - abelian variety (identity component of
intersection) If other is a finite group:
- ``G`` - a finite group
EXAMPLES: We intersect some abelian varieties with finite
intersection.
::
sage: J = J0(37)
sage: J[0].intersection(J[1])
(Finite subgroup with invariants [2, 2] over QQ of Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37), Simple abelian subvariety of dimension 0 of J0(37))
::
sage: D = list(J0(65)); D
[Simple abelian subvariety 65a(1,65) of dimension 1 of J0(65), Simple abelian subvariety 65b(1,65) of dimension 2 of J0(65), Simple abelian subvariety 65c(1,65) of dimension 2 of J0(65)]
sage: D[0].intersection(D[1])
(Finite subgroup with invariants [2] over QQ of Simple abelian subvariety 65a(1,65) of dimension 1 of J0(65), Simple abelian subvariety of dimension 0 of J0(65))
sage: (D[0]+D[1]).intersection(D[1]+D[2])
(Finite subgroup with invariants [2] over QQbar of Abelian subvariety of dimension 3 of J0(65), Abelian subvariety of dimension 2 of J0(65))
::
sage: J = J0(33)
sage: J[0].intersection(J[1])
(Finite subgroup with invariants [5] over QQ of Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33), Simple abelian subvariety of dimension 0 of J0(33))
Next we intersect two abelian varieties with non-finite
intersection::
sage: J = J0(67); D = J.decomposition(); D
[
Simple abelian subvariety 67a(1,67) of dimension 1 of J0(67),
Simple abelian subvariety 67b(1,67) of dimension 2 of J0(67),
Simple abelian subvariety 67c(1,67) of dimension 2 of J0(67)
]
sage: (D[0] + D[1]).intersection(D[1] + D[2])
(Finite subgroup with invariants [5, 10] over QQbar of Abelian subvariety of dimension 3 of J0(67), Abelian subvariety of dimension 2 of J0(67))
"""
# First check whether we are intersecting an abelian variety
# with a finite subgroup. If so, call the intersection method
# for the finite group, which does know how to intersect with
# an abelian variety.
if isinstance(other, FiniteSubgroup):
return other.intersection(self)
# Now both self and other are abelian varieties. We require
# at least that the ambient Jacobian product is the same for
# them.
if not self.in_same_ambient_variety(other):
raise TypeError("other must be an abelian variety in the same ambient space")
# 1. Compute the abelian variety (connected) part of the intersection
V = self.vector_space().intersection(other.vector_space())
if V.dimension() > 0:
# If there is a nonzero abelian variety, get the actual
# lattice that defines it. We intersect (=saturate) in
# the sum of the lattices, to ensure that the intersection
# is an abelian subvariety of both self and other (even if
# they aren't subvarieties of the ambient Jacobian).
lattice = V.intersection(self.lattice() + other.lattice())
A = ModularAbelianVariety(self.groups(), lattice, self.base_field(), check=False)
else:
A = self.zero_subvariety()
# 2. Compute the finite intersection group when the
# intersection is finite, or a group that maps surjectively
# onto the component group in general.
# First we get basis matrices for the lattices that define
# both abelian varieties.
L = self.lattice().basis_matrix()
M = other.lattice().basis_matrix()
# Then we stack matrices and find a subset that forms a
# basis.
LM = L.stack(M)
P = LM.pivot_rows()
V = (ZZ**L.ncols()).span_of_basis([LM.row(p) for p in P])
S = (self.lattice() + other.lattice()).saturation()
n = self.lattice().rank()
# Finally we project onto the L factor.
gens = [L.linear_combination_of_rows(v.list()[:n])
for v in V.coordinate_module(S).basis()]
if A.dimension() > 0:
finitegroup_base_field = QQbar
else:
finitegroup_base_field = self.base_field()
G = self.finite_subgroup(gens, field_of_definition=finitegroup_base_field)
return G, A
def __add__(self, other):
r"""
Returns the sum of the *images* of self and other inside the
ambient Jacobian product. self and other must be abelian
subvarieties of the ambient Jacobian product.
..warning::
The sum of course only makes sense in some ambient variety,
and by definition this function takes the sum of the images
of both self and other in the ambient product Jacobian.
EXAMPLES: We compute the sum of two abelian varieties of
`J_0(33)`::
sage: J = J0(33)
sage: J[0] + J[1]
Abelian subvariety of dimension 2 of J0(33)
We sum all three and get the full `J_0(33)`::
sage: (J[0] + J[1]) + (J[1] + J[2])
Abelian variety J0(33) of dimension 3
Adding to zero works::
sage: J[0] + 0
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
Hence the sum command works::
sage: sum([J[0], J[2]])
Abelian subvariety of dimension 2 of J0(33)
We try to add something in `J_0(33)` to something in
`J_0(11)`; this shouldn't and doesn't work.
::
sage: J[0] + J0(11)
Traceback (most recent call last):
...
TypeError: sum not defined since ambient spaces different
We compute the diagonal image of `J_0(11)` in
`J_0(33)`, then add the result to the new elliptic curve
of level `33`.
::
sage: A = J0(11)
sage: B = (A.degeneracy_map(33,1) + A.degeneracy_map(33,3)).image()
sage: B + J0(33)[2]
Abelian subvariety of dimension 2 of J0(33)
TESTS: This exposed a bug in HNF (see trac #4527)::
sage: A = J0(206).new_subvariety().decomposition()[3] ; A # long time
Simple abelian subvariety 206d(1,206) of dimension 4 of J0(206)
sage: B = J0(206).old_subvariety(2) ; B # long time
Abelian subvariety of dimension 16 of J0(206)
sage: A+B # long time
Abelian subvariety of dimension 20 of J0(206)
"""
if not is_ModularAbelianVariety(other):
if other == 0:
return self
raise TypeError("other must be a modular abelian variety")
if self.groups() != other.groups():
raise ValueError("incompatible ambient Jacobians")
L = self.vector_space() + other.vector_space()
M = L.intersection(self._ambient_lattice())
return ModularAbelianVariety(self.groups(), M, self.base_field(), check=False)
def direct_product(self, other):
"""
Compute the direct product of self and other.
INPUT:
- ``self, other`` - modular abelian varieties
OUTPUT: abelian variety
EXAMPLES::
sage: J0(11).direct_product(J1(13))
Abelian variety J0(11) x J1(13) of dimension 3
sage: A = J0(33)[0].direct_product(J0(33)[1]); A
Abelian subvariety of dimension 2 of J0(33) x J0(33)
sage: A.lattice()
Free module of degree 12 and rank 4 over Integer Ring
Echelon basis matrix:
[ 1 1 -2 0 2 -1 0 0 0 0 0 0]
[ 0 3 -2 -1 2 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 1 0 0 0 -1 2]
[ 0 0 0 0 0 0 0 1 -1 1 0 -2]
"""
return self * other
def __pow__(self, n):
"""
Return `n^{th}` power of self.
INPUT:
- ``n`` - a nonnegative integer
OUTPUT: an abelian variety
EXAMPLES::
sage: J = J0(37)
sage: J^0
Simple abelian subvariety of dimension 0 of J0(37)
sage: J^1
Abelian variety J0(37) of dimension 2
sage: J^1 is J
True
"""
n = ZZ(n)
if n < 0:
raise ValueError("n must be nonnegative")
if n == 0:
return self.zero_subvariety()
if n == 1:
return self
groups = self.groups() * n
L = self.lattice().basis_matrix()
lattice = block_diagonal_matrix([L]*n).row_module(ZZ)
return ModularAbelianVariety(groups, lattice, self.base_field(), check=False)
def __mul__(self, other):
"""
Compute the direct product of self and other.
EXAMPLES: Some modular Jacobians::
sage: J0(11) * J0(33)
Abelian variety J0(11) x J0(33) of dimension 4
sage: J0(11) * J0(33) * J0(11)
Abelian variety J0(11) x J0(33) x J0(11) of dimension 5
We multiply some factors of `J_0(65)`::
sage: d = J0(65).decomposition()
sage: d[0] * d[1] * J0(11)
Abelian subvariety of dimension 4 of J0(65) x J0(65) x J0(11)
"""
if not is_ModularAbelianVariety(other):
raise TypeError("other must be a modular abelian variety")
if other.base_ring() != self.base_ring():
raise TypeError("self and other must have the same base ring")
groups = tuple(list(self.groups()) + list(other.groups()))
lattice = self.lattice().direct_sum(other.lattice())
base_field = self.base_ring()
return ModularAbelianVariety(groups, lattice, base_field, check=False)
def quotient(self, other):
"""
Compute the quotient of self and other, where other is either an
abelian subvariety of self or a finite subgroup of self.
INPUT:
- ``other`` - a finite subgroup or subvariety
OUTPUT: a pair (A, phi) with phi the quotient map from self to A
EXAMPLES: We quotient `J_0(33)` out by an abelian
subvariety::
sage: Q, f = J0(33).quotient(J0(33)[0])
sage: Q
Abelian variety factor of dimension 2 of J0(33)
sage: f
Abelian variety morphism:
From: Abelian variety J0(33) of dimension 3
To: Abelian variety factor of dimension 2 of J0(33)
We quotient `J_0(33)` by the cuspidal subgroup::
sage: C = J0(33).cuspidal_subgroup()
sage: Q, f = J0(33).quotient(C)
sage: Q
Abelian variety factor of dimension 3 of J0(33)
sage: f.kernel()[0]
Finite subgroup with invariants [10, 10] over QQ of Abelian variety J0(33) of dimension 3
sage: C
Finite subgroup with invariants [10, 10] over QQ of Abelian variety J0(33) of dimension 3
sage: J0(11).direct_product(J1(13))
Abelian variety J0(11) x J1(13) of dimension 3
"""
return self.__div__(other)
def __div__(self, other):
"""
Compute the quotient of self and other, where other is either an
abelian subvariety of self or a finite subgroup of self.
INPUT:
- ``other`` - a finite subgroup or subvariety
EXAMPLES: Quotient out by a finite group::
sage: J = J0(67); G = (J[0] + J[1]).intersection(J[1] + J[2])
sage: Q, _ = J/G[0]; Q
Abelian variety factor of dimension 5 of J0(67) over Algebraic Field
sage: Q.base_field()
Algebraic Field
sage: Q.lattice()
Free module of degree 10 and rank 10 over Integer Ring
Echelon basis matrix:
[1/10 1/10 3/10 1/2 0 0 0 3/10 0 1/2]
[ 0 1/5 4/5 4/5 0 0 0 0 0 3/5]
...
Quotient out by an abelian subvariety::
sage: A, B, C = J0(33)
sage: Q, phi = J0(33)/A
sage: Q
Abelian variety factor of dimension 2 of J0(33)
sage: phi.domain()
Abelian variety J0(33) of dimension 3
sage: phi.codomain()
Abelian variety factor of dimension 2 of J0(33)
sage: phi.kernel()
(Finite subgroup with invariants [2] over QQbar of Abelian variety J0(33) of dimension 3,
Abelian subvariety of dimension 1 of J0(33))
sage: phi.kernel()[1] == A
True
The abelian variety we quotient out by must be an abelian
subvariety.
::
sage: Q = (A + B)/C; Q
Traceback (most recent call last):
...
TypeError: other must be a subgroup or abelian subvariety
"""
if isinstance(other, FiniteSubgroup):
if other.abelian_variety() != self:
other = self.finite_subgroup(other)
return self._quotient_by_finite_subgroup(other)
elif isinstance(other, ModularAbelianVariety_abstract) and other.is_subvariety(self):
return self._quotient_by_abelian_subvariety(other)
else:
raise TypeError("other must be a subgroup or abelian subvariety")
def degeneracy_map(self, M_ls, t_ls):
"""
Return the degeneracy map with domain self and given
level/parameter. If self.ambient_variety() is a product of
Jacobians (as opposed to a single Jacobian), then one can provide a
list of new levels and parameters, corresponding to the ambient
Jacobians in order. (See the examples below.)
INPUT:
- ``M, t`` - integers level and `t`, or
- ``Mlist, tlist`` - if self is in a nontrivial
product ambient Jacobian, input consists of a list of levels and
corresponding list of `t`'s.
OUTPUT: a degeneracy map
EXAMPLES: We make several degeneracy maps related to
`J_0(11)` and `J_0(33)` and compute their
matrices.
::
sage: d1 = J0(11).degeneracy_map(33, 1); d1
Degeneracy map from Abelian variety J0(11) of dimension 1 to Abelian variety J0(33) of dimension 3 defined by [1]
sage: d1.matrix()
[ 0 -3 2 1 -2 0]
[ 1 -2 0 1 0 -1]
sage: d2 = J0(11).degeneracy_map(33, 3); d2
Degeneracy map from Abelian variety J0(11) of dimension 1 to Abelian variety J0(33) of dimension 3 defined by [3]
sage: d2.matrix()
[-1 0 0 0 1 -2]
[-1 -1 1 -1 1 0]
sage: d3 = J0(33).degeneracy_map(11, 1); d3
Degeneracy map from Abelian variety J0(33) of dimension 3 to Abelian variety J0(11) of dimension 1 defined by [1]
He we verify that first mapping from level `11` to level
`33`, then back is multiplication by `4`::
sage: d1.matrix() * d3.matrix()
[4 0]
[0 4]
We compute a more complicated degeneracy map involving nontrivial
product ambient Jacobians; note that this is just the block direct
sum of the two matrices at the beginning of this example::
sage: d = (J0(11)*J0(11)).degeneracy_map([33,33], [1,3]); d
Degeneracy map from Abelian variety J0(11) x J0(11) of dimension 2 to Abelian variety J0(33) x J0(33) of dimension 6 defined by [1, 3]
sage: d.matrix()
[ 0 -3 2 1 -2 0 0 0 0 0 0 0]
[ 1 -2 0 1 0 -1 0 0 0 0 0 0]
[ 0 0 0 0 0 0 -1 0 0 0 1 -2]
[ 0 0 0 0 0 0 -1 -1 1 -1 1 0]
"""
if not isinstance(M_ls, list):
M_ls = [M_ls]
if not isinstance(t_ls, list):
t_ls = [t_ls]
groups = self.groups()
length = len(M_ls)
if length != len(t_ls):
raise ValueError("must have same number of Ms and ts")
if length != len(groups):
raise ValueError("must have same number of Ms and groups in ambient variety")
for i in range(length):
N = groups[i].level()
if (M_ls[i]%N) and (N%M_ls[i]):
raise ValueError("one level must divide the other in %s-th component"%i)
if (( max(M_ls[i],N) // min(M_ls[i],N) ) % t_ls[i]):
raise ValueError("each t must divide the quotient of the levels")
ls = [ self.groups()[i].modular_abelian_variety().degeneracy_map(M_ls[i], t_ls[i]).matrix() for i in range(length) ]
new_codomain = prod([ self.groups()[i]._new_group_from_level(M_ls[i]).modular_abelian_variety()
for i in range(length) ])
M = block_diagonal_matrix(ls, subdivide=False)
H = self.Hom(new_codomain)
return H(DegeneracyMap(H, M.restrict_domain(self.lattice()), t_ls))
def _quotient_by_finite_subgroup(self, G):
"""
Return the quotient of self by the finite subgroup `G`.
This is used internally by the quotient and __div__ commands.
INPUT:
- ``G`` - a finite subgroup of self
OUTPUT: abelian variety - the quotient `Q` of self by
`G`
- ``morphism`` - from self to the quotient
`Q`
EXAMPLES: We quotient the elliptic curve `J_0(11)` out by
its cuspidal subgroup.
::
sage: A = J0(11)
sage: G = A.cuspidal_subgroup(); G
Finite subgroup with invariants [5] over QQ of Abelian variety J0(11) of dimension 1
sage: Q, f = A._quotient_by_finite_subgroup(G)
sage: Q
Abelian variety factor of dimension 1 of J0(11)
sage: f
Abelian variety morphism:
From: Abelian variety J0(11) of dimension 1
To: Abelian variety factor of dimension 1 of J0(11)
We compute the finite kernel of `f` (hence the [0]) and
note that it equals the subgroup `G` that we quotiented out
by::
sage: f.kernel()[0] == G
True
"""
if G.order() == 1:
return self
L = self.lattice() + G.lattice()
A = ModularAbelianVariety(self.groups(), L, G.field_of_definition())
M = L.coordinate_module(self.lattice()).basis_matrix()
phi = self.Hom(A)(M)
return A, phi
def _quotient_by_abelian_subvariety(self, B):
"""
Return the quotient of self by the abelian variety `B`.
This is used internally by the quotient and __div__ commands.
INPUT:
- ``B`` - an abelian subvariety of self
OUTPUT:
- ``abelian variety`` - quotient `Q` of self
by B
- ``morphism`` - from self to the quotient
`Q`
EXAMPLES: We compute the new quotient of `J_0(33)`.
::
sage: A = J0(33); B = A.old_subvariety()
sage: Q, f = A._quotient_by_abelian_subvariety(B)
Note that the quotient happens to also be an abelian subvariety::
sage: Q
Abelian subvariety of dimension 1 of J0(33)
sage: Q.lattice()
Free module of degree 6 and rank 2 over Integer Ring
Echelon basis matrix:
[ 1 0 0 -1 0 0]
[ 0 0 1 0 1 -1]
sage: f
Abelian variety morphism:
From: Abelian variety J0(33) of dimension 3
To: Abelian subvariety of dimension 1 of J0(33)
We verify that `B` is equal to the kernel of the quotient
map.
::
sage: f.kernel()[1] == B
True
Next we quotient `J_0(33)` out by `Q` itself::
sage: C, g = A._quotient_by_abelian_subvariety(Q)
The result is not a subvariety::
sage: C
Abelian variety factor of dimension 2 of J0(33)
sage: C.lattice()
Free module of degree 6 and rank 4 over Integer Ring
Echelon basis matrix:
[ 1/3 0 0 2/3 -1 0]
[ 0 1 0 0 -1 1]
[ 0 0 1/3 0 -2/3 2/3]
[ 0 0 0 1 -1 -1]
"""
# We first compute the complement of B in self to get
# an abelian variety C also in self such that self/B
# is isogenous to C. This is the case because the
# projection map pi:self --> C is surjective and has
# kernel a finite extension of the abelian variety B.
C = B.complement(self)
# Now that we have C we need to find some abelian variety Q
# isogenous to C and a map self --> Q whose kernel is exactly
# B. We do this by computing the kernel of the map pi below,
# which is an extension of the abelian variety B by a finite
# group Phi of complements. Our strategy is to enlarge the
# lattice that defines C so that the map pi below suddenly
# has connected kernel.
pi = self.projection(C)
psi = pi.factor_out_component_group()
Q = psi.codomain()
return Q, psi
def projection(self, A, check=True):
"""
Given an abelian subvariety A of self, return a projection morphism
from self to A. Note that this morphism need not be unique.
INPUT:
- ``A`` - an abelian variety
OUTPUT: a morphism
EXAMPLES::
sage: a,b,c = J0(33)
sage: pi = J0(33).projection(a); pi.matrix()
[ 3 -2]
[-5 5]
[-4 1]
[ 3 -2]
[ 5 0]
[ 1 1]
sage: pi = (a+b).projection(a); pi.matrix()
[ 0 0]
[-3 2]
[-4 1]
[-1 -1]
sage: pi = a.projection(a); pi.matrix()
[1 0]
[0 1]
We project onto a factor in a product of two Jacobians::
sage: A = J0(11)*J0(11); A
Abelian variety J0(11) x J0(11) of dimension 2
sage: A[0]
Simple abelian subvariety 11a(1,11) of dimension 1 of J0(11) x J0(11)
sage: A.projection(A[0])
Abelian variety morphism:
From: Abelian variety J0(11) x J0(11) of dimension 2
To: Simple abelian subvariety 11a(1,11) of dimension 1 of J0(11) x J0(11)
sage: A.projection(A[0]).matrix()
[0 0]
[0 0]
[1 0]
[0 1]
sage: A.projection(A[1]).matrix()
[1 0]
[0 1]
[0 0]
[0 0]
"""
if check and not A.is_subvariety(self):
raise ValueError("A must be an abelian subvariety of self")
W = A.complement(self)
mat = A.lattice().basis_matrix().stack(W.lattice().basis_matrix())
# solve X * mat = self, i.e. write each row of self in terms of the
# rows of mat.
X = mat.solve_left(self.lattice().basis_matrix())
# The projection map is got from the first 2*dim(A) columns of X.
X = X.matrix_from_columns(range(2*A.dimension()))
X, _ = X._clear_denom()
return Morphism(self.Hom(A), X)
def project_to_factor(self, n):
"""
If self is an ambient product of Jacobians, return a projection
from self to the nth such Jacobian.
EXAMPLES::
sage: J = J0(33)
sage: J.project_to_factor(0)
Abelian variety endomorphism of Abelian variety J0(33) of dimension 3
::
sage: J = J0(33) * J0(37) * J0(11)
sage: J.project_to_factor(2)
Abelian variety morphism:
From: Abelian variety J0(33) x J0(37) x J0(11) of dimension 6
To: Abelian variety J0(11) of dimension 1
sage: J.project_to_factor(2).matrix()
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[0 0]
[1 0]
[0 1]
"""
if not self.is_ambient():
raise ValueError("self is not ambient")
if n >= len(self.groups()):
raise IndexError("index (=%s) too large (max = %s)"%(n, len(self.groups())))
G = self.groups()[n]
A = G.modular_abelian_variety()
index = sum([ gp.modular_symbols().cuspidal_subspace().dimension()
for gp in self.groups()[0:n] ])
H = self.Hom(A)
mat = H.matrix_space()(0)
mat.set_block(index, 0, identity_matrix(2*A.dimension()))
return H(Morphism(H, mat))
def is_subvariety_of_ambient_jacobian(self):
"""
Return True if self is (presented as) a subvariety of the ambient
product Jacobian.
Every abelian variety in Sage is a quotient of a subvariety of an
ambient Jacobian product by a finite subgroup.
EXAMPLES::
sage: J0(33).is_subvariety_of_ambient_jacobian()
True
sage: A = J0(33)[0]; A
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
sage: A.is_subvariety_of_ambient_jacobian()
True
sage: B, phi = A / A.torsion_subgroup(2)
sage: B
Abelian variety factor of dimension 1 of J0(33)
sage: phi.matrix()
[2 0]
[0 2]
sage: B.is_subvariety_of_ambient_jacobian()
False
"""
try:
return self.__is_sub_ambient
except AttributeError:
self.__is_sub_ambient = (self.lattice().denominator() == 1)
return self.__is_sub_ambient
def ambient_variety(self):
"""
Return the ambient modular abelian variety that contains this
abelian variety. The ambient variety is always a product of
Jacobians of modular curves.
OUTPUT: abelian variety
EXAMPLES::
sage: A = J0(33)[0]; A
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
sage: A.ambient_variety()
Abelian variety J0(33) of dimension 3
"""
try:
return self.__ambient_variety
except AttributeError:
A = ModularAbelianVariety(self.groups(), ZZ**(2*self._ambient_dimension()),
self.base_field(), check=False)
self.__ambient_variety = A
return A
def ambient_morphism(self):
"""
Return the morphism from self to the ambient variety. This is
injective if self is natural a subvariety of the ambient product
Jacobian.
OUTPUT: morphism
The output is cached.
EXAMPLES: We compute the ambient structure morphism for an abelian
subvariety of `J_0(33)`::
sage: A,B,C = J0(33)
sage: phi = A.ambient_morphism()
sage: phi.domain()
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
sage: phi.codomain()
Abelian variety J0(33) of dimension 3
sage: phi.matrix()
[ 1 1 -2 0 2 -1]
[ 0 3 -2 -1 2 0]
phi is of course injective
::
sage: phi.kernel()
(Finite subgroup with invariants [] over QQ of Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Abelian subvariety of dimension 0 of J0(33))
This is the same as the basis matrix for the lattice corresponding
to self::
sage: A.lattice()
Free module of degree 6 and rank 2 over Integer Ring
Echelon basis matrix:
[ 1 1 -2 0 2 -1]
[ 0 3 -2 -1 2 0]
We compute a non-injective map to an ambient space::
sage: Q,pi = J0(33)/A
sage: phi = Q.ambient_morphism()
sage: phi.matrix()
[ 1 4 1 9 -1 -1]
[ 0 15 0 0 30 -75]
[ 0 0 5 10 -5 15]
[ 0 0 0 15 -15 30]
sage: phi.kernel()[0]
Finite subgroup with invariants [5, 15, 15] over QQ of Abelian variety factor of dimension 2 of J0(33)
"""
try:
return self.__ambient_morphism
except AttributeError:
matrix,_ = self.lattice().basis_matrix()._clear_denom()
phi = Morphism(self.Hom(self.ambient_variety()), matrix)
self.__ambient_morphism = phi
return phi
def is_ambient(self):
"""
Return True if self equals the ambient product Jacobian.
OUTPUT: bool
EXAMPLES::
sage: A,B,C = J0(33)
sage: A.is_ambient()
False
sage: J0(33).is_ambient()
True
sage: (A+B).is_ambient()
False
sage: (A+B+C).is_ambient()
True
"""
try:
return self.__is_ambient
except AttributeError:
pass
L = self.lattice()
self.__is_ambient = (self.lattice() == ZZ**L.degree())
return self.__is_ambient
def dimension(self):
"""
Return the dimension of this abelian variety.
EXAMPLES::
sage: A = J0(23)
sage: A.dimension()
2
"""
return self.lattice().rank() // 2
def rank(self):
"""
Return the rank of the underlying lattice of self.
EXAMPLES::
sage: J = J0(33)
sage: J.rank()
6
sage: J[1]
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33)
sage: (J[1] * J[1]).rank()
4
"""
return self.lattice().rank()
def degree(self):
"""
Return the degree of this abelian variety, which is the dimension
of the ambient Jacobian product.
EXAMPLES::
sage: A = J0(23)
sage: A.dimension()
2
"""
return self._ambient_dimension()
def endomorphism_ring(self, category=None):
"""
Return the endomorphism ring of self.
OUTPUT: b = self.sturm_bound()
EXAMPLES: We compute a few endomorphism rings::
sage: J0(11).endomorphism_ring()
Endomorphism ring of Abelian variety J0(11) of dimension 1
sage: J0(37).endomorphism_ring()
Endomorphism ring of Abelian variety J0(37) of dimension 2
sage: J0(33)[2].endomorphism_ring()
Endomorphism ring of Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
No real computation is done::
sage: J1(123456).endomorphism_ring()
Endomorphism ring of Abelian variety J1(123456) of dimension 423185857
"""
try:
return self.__endomorphism_ring
except AttributeError:
pass
self.__endomorphism_ring = homspace.EndomorphismSubring(self, category=category)
return self.__endomorphism_ring
def sturm_bound(self):
r"""
Return a bound `B` such that all Hecke operators
`T_n` for `n\leq B` generate the Hecke algebra.
OUTPUT: integer
EXAMPLES::
sage: J0(11).sturm_bound()
2
sage: J0(33).sturm_bound()
8
sage: J1(17).sturm_bound()
48
sage: J1(123456).sturm_bound()
1693483008
sage: JH(37,[2,3]).sturm_bound()
7
sage: J1(37).sturm_bound()
228
"""
try:
return self.__sturm_bound
except AttributeError:
B = max([G.sturm_bound(2) for G in self.groups()])
self.__sturm_bound = B
return B
def is_hecke_stable(self):
"""
Return True if self is stable under the Hecke operators of its
ambient Jacobian.
OUTPUT: bool
EXAMPLES::
sage: J0(11).is_hecke_stable()
True
sage: J0(33)[2].is_hecke_stable()
True
sage: J0(33)[0].is_hecke_stable()
False
sage: (J0(33)[0] + J0(33)[1]).is_hecke_stable()
True
"""
try:
return self._is_hecke_stable
except AttributeError:
pass
#b = self.modular_symbols().sturm_bound()
b = max([ m.sturm_bound() for m in self._ambient_modular_symbols_spaces() ])
J = self.ambient_variety()
L = self.lattice()
B = self.lattice().basis()
for n in prime_range(1,b+1):
Tn_matrix = J.hecke_operator(n).matrix()
for v in B:
if not (v*Tn_matrix in L):
self._is_hecke_stable = False
return False
self._is_hecke_stable = True
return True
def is_subvariety(self, other):
"""
Return True if self is a subvariety of other as they sit in a
common ambient modular Jacobian. In particular, this function will
only return True if self and other have exactly the same ambient
Jacobians.
EXAMPLES::
sage: J = J0(37); J
Abelian variety J0(37) of dimension 2
sage: A = J[0]; A
Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37)
sage: A.is_subvariety(A)
True
sage: A.is_subvariety(J)
True
"""
if not is_ModularAbelianVariety(other):
return False
if self is other:
return True
if self.groups() != other.groups():
return False
L = self.lattice()
M = other.lattice()
# self is an abelian subvariety of other if and only if
# 1. L is a subset of M (so the abelian subvarieties of
# the ambient J are equal), and
# 2. L is relatively saturated in M, i.e., M/L is
# torsion free.
if not L.is_submodule(M):
return False
# To determine if L is relatively saturated we compute the
# intersection of M with (L tensor Q) and see if that equals
# L.
return L.change_ring(QQ).intersection(M) == L
def change_ring(self, R):
"""
Change the base ring of this modular abelian variety.
EXAMPLES::
sage: A = J0(23)
sage: A.change_ring(QQ)
Abelian variety J0(23) of dimension 2
"""
return ModularAbelianVariety(self.groups(), self.lattice(), R, check=False)
def level(self):
"""
Return the level of this modular abelian variety, which is an
integer N (usually minimal) such that this modular abelian variety
is a quotient of `J_1(N)`. In the case that the ambient
variety of self is a product of Jacobians, return the LCM of their
levels.
EXAMPLES::
sage: J1(5077).level()
5077
sage: JH(389,[4]).level()
389
sage: (J0(11)*J0(17)).level()
187
"""
try:
return self.__level
except AttributeError:
self.__level = LCM([G.level() for G in self.groups()])
return self.__level
def newform_level(self, none_if_not_known=False):
"""
Write self as a product (up to isogeny) of newform abelian
varieties `A_f`. Then this function return the least
common multiple of the levels of the newforms `f`, along
with the corresponding group or list of groups (the groups do not
appear with multiplicity).
INPUT:
- ``none_if_not_known`` - (default: False) if True,
return None instead of attempting to compute the newform level, if
it isn't already known. This None result is not cached.
OUTPUT: integer group or list of distinct groups
EXAMPLES::
sage: J0(33)[0].newform_level()
(11, Congruence Subgroup Gamma0(33))
sage: J0(33)[0].newform_level(none_if_not_known=True)
(11, Congruence Subgroup Gamma0(33))
Here there are multiple groups since there are in fact multiple
newforms::
sage: (J0(11) * J1(13)).newform_level()
(143, [Congruence Subgroup Gamma0(11), Congruence Subgroup Gamma1(13)])
"""
try:
return self.__newform_level
except AttributeError:
if none_if_not_known:
return None
N = [A.newform_level() for A in self.decomposition()]
level = LCM([z[0] for z in N])
groups = sorted(set([z[1] for z in N]))
if len(groups) == 1:
groups = groups[0]
self.__newform_level = level, groups
return self.__newform_level
def zero_subvariety(self):
"""
Return the zero subvariety of self.
EXAMPLES::
sage: J = J0(37)
sage: J.zero_subvariety()
Simple abelian subvariety of dimension 0 of J0(37)
sage: J.zero_subvariety().level()
37
sage: J.zero_subvariety().newform_level()
(1, [])
"""
try:
return self.__zero_subvariety
except AttributeError:
lattice = (ZZ**(2*self.degree())).zero_submodule()
A = ModularAbelianVariety(self.groups(), lattice, self.base_field(),
is_simple=True, check=False)
self.__zero_subvariety = A
return A
###############################################################################
# Properties of the ambient product of Jacobians
###############################################################################
def _ambient_repr(self):
"""
OUTPUT: string
EXAMPLES::
sage: (J0(33)*J1(11))._ambient_repr()
'J0(33) x J1(11)'
"""
v = []
for G in self.groups():
if is_Gamma0(G):
v.append('J0(%s)'%G.level())
elif is_Gamma1(G):
v.append('J1(%s)'%G.level())
elif is_GammaH(G):
v.append('JH(%s,%s)'%(G.level(), G._generators_for_H()))
return ' x '.join(v)
def _ambient_latex_repr(self):
"""
Return Latex representation of the ambient product.
OUTPUT: string
EXAMPLES::
sage: (J0(11) * J0(33))._ambient_latex_repr()
'J_0(11) \\times J_0(33)'
"""
v = []
for G in self.groups():
if is_Gamma0(G):
v.append('J_0(%s)'%G.level())
elif is_Gamma1(G):
v.append('J_1(%s)'%G.level())
elif is_GammaH(G):
v.append('J_H(%s,%s)'%(G.level(), G._generators_for_H()))
return ' \\times '.join(v)
def _ambient_lattice(self):
"""
Return free lattice of rank twice the degree of self. This is the
lattice corresponding to the ambient product Jacobian.
OUTPUT: lattice
EXAMPLES: We compute the ambient lattice of a product::
sage: (J0(33)*J1(11))._ambient_lattice()
Ambient free module of rank 8 over the principal ideal domain Integer Ring
We compute the ambient lattice of an abelian subvariety
`J_0(33)`, which is the same as the lattice for the
`J_0(33)` itself::
sage: A = J0(33)[0]; A._ambient_lattice()
Ambient free module of rank 6 over the principal ideal domain Integer Ring
sage: J0(33)._ambient_lattice()
Ambient free module of rank 6 over the principal ideal domain Integer Ring
"""
try:
return self.__ambient_lattice
except AttributeError:
self.__ambient_lattice = ZZ**(2*self.degree())
return self.__ambient_lattice
def _ambient_modular_symbols_spaces(self):
"""
Return a tuple of the ambient cuspidal modular symbols spaces that
make up the Jacobian product that contains self.
OUTPUT: tuple of cuspidal modular symbols spaces
EXAMPLES::
sage: (J0(11) * J0(33))._ambient_modular_symbols_spaces()
(Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field,
Modular Symbols subspace of dimension 6 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field)
sage: (J0(11) * J0(33)[0])._ambient_modular_symbols_spaces()
(Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field,
Modular Symbols subspace of dimension 6 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field)
"""
if not self.is_ambient():
return self.ambient_variety()._ambient_modular_symbols_spaces()
try:
return self.__ambient_modular_symbols_spaces
except AttributeError:
X = tuple([ModularSymbols(G).cuspidal_subspace() for G in self.groups()])
self.__ambient_modular_symbols_spaces = X
return X
def _ambient_modular_symbols_abvars(self):
"""
Return a tuple of the ambient modular symbols abelian varieties
that make up the Jacobian product that contains self.
OUTPUT: tuple of modular symbols abelian varieties
EXAMPLES::
sage: (J0(11) * J0(33))._ambient_modular_symbols_abvars()
(Abelian variety J0(11) of dimension 1, Abelian variety J0(33) of dimension 3)
"""
if not self.is_ambient():
return self.ambient_variety()._ambient_modular_symbols_abvars()
try:
return self.__ambient_modular_symbols_abvars
except AttributeError:
X = tuple([ModularAbelianVariety_modsym(M) for M in self._ambient_modular_symbols_spaces()])
self.__ambient_modular_symbols_abvars = X
return X
def _ambient_dimension(self):
"""
Return the dimension of the ambient Jacobian product.
EXAMPLES::
sage: A = J0(37) * J1(13); A
Abelian variety J0(37) x J1(13) of dimension 4
sage: A._ambient_dimension()
4
sage: B = A[0]; B
Simple abelian subvariety 13aG1(1,13) of dimension 2 of J0(37) x J1(13)
sage: B._ambient_dimension()
4
This example is fast because it implicitly calls
_ambient_dimension.
::
sage: J0(902834082394)
Abelian variety J0(902834082394) of dimension 113064825881
"""
try:
return self.__ambient_dimension
except AttributeError:
d = sum([G.dimension_cusp_forms(2) for G in self.groups()], Integer(0))
self.__ambient_dimension = d
return d
def _ambient_hecke_matrix_on_modular_symbols(self, n):
r"""
Return block direct sum of the matrix of the Hecke operator
`T_n` acting on each of the ambient modular symbols
spaces.
INPUT:
- ``n`` - an integer `\geq 1`.
OUTPUT: a matrix
EXAMPLES::
sage: (J0(11) * J1(13))._ambient_hecke_matrix_on_modular_symbols(2)
[-2 0 0 0 0 0]
[ 0 -2 0 0 0 0]
[ 0 0 -2 0 -1 1]
[ 0 0 1 -1 0 -1]
[ 0 0 1 1 -2 0]
[ 0 0 0 1 -1 -1]
"""
if not self.is_ambient():
return self.ambient_variety()._ambient_hecke_matrix_on_modular_symbols(n)
try:
return self.__ambient_hecke_matrix_on_modular_symbols[n]
except AttributeError:
self.__ambient_hecke_matrix_on_modular_symbols = {}
except KeyError:
pass
M = self._ambient_modular_symbols_spaces()
if len(M) == 0:
return matrix(QQ,0)
T = M[0].hecke_matrix(n)
for i in range(1,len(M)):
T = T.block_sum(M[i].hecke_matrix(n))
self.__ambient_hecke_matrix_on_modular_symbols[n] = T
return T
###############################################################################
# Rational and Integral Homology
###############################################################################
def _rational_homology_space(self):
"""
Return the rational homology of this modular abelian variety.
EXAMPLES::
sage: J = J0(11)
sage: J._rational_homology_space()
Vector space of dimension 2 over Rational Field
The result is cached::
sage: J._rational_homology_space() is J._rational_homology_space()
True
"""
try:
return self.__rational_homology_space
except AttributeError:
HQ = self.rational_homology().free_module()
self.__rational_homology_space = HQ
return HQ
def homology(self, base_ring=ZZ):
"""
Return the homology of this modular abelian variety.
.. warning::
For efficiency reasons the basis of the integral homology
need not be the same as the basis for the rational
homology.
EXAMPLES::
sage: J0(389).homology(GF(7))
Homology with coefficients in Finite Field of size 7 of Abelian variety J0(389) of dimension 32
sage: J0(389).homology(QQ)
Rational Homology of Abelian variety J0(389) of dimension 32
sage: J0(389).homology(ZZ)
Integral Homology of Abelian variety J0(389) of dimension 32
"""
try:
return self._homology[base_ring]
except AttributeError:
self._homology = {}
except KeyError:
pass
if base_ring == ZZ:
H = homology.IntegralHomology(self)
elif base_ring == QQ:
H = homology.RationalHomology(self)
else:
H = homology.Homology_over_base(self, base_ring)
self._homology[base_ring] = H
return H
def integral_homology(self):
"""
Return the integral homology of this modular abelian variety.
EXAMPLES::
sage: H = J0(43).integral_homology(); H
Integral Homology of Abelian variety J0(43) of dimension 3
sage: H.rank()
6
sage: H = J1(17).integral_homology(); H
Integral Homology of Abelian variety J1(17) of dimension 5
sage: H.rank()
10
If you just ask for the rank of the homology, no serious
calculations are done, so the following is fast::
sage: H = J0(50000).integral_homology(); H
Integral Homology of Abelian variety J0(50000) of dimension 7351
sage: H.rank()
14702
A product::
sage: H = (J0(11) * J1(13)).integral_homology()
sage: H.hecke_operator(2)
Hecke operator T_2 on Integral Homology of Abelian variety J0(11) x J1(13) of dimension 3
sage: H.hecke_operator(2).matrix()
[-2 0 0 0 0 0]
[ 0 -2 0 0 0 0]
[ 0 0 -2 0 -1 1]
[ 0 0 1 -1 0 -1]
[ 0 0 1 1 -2 0]
[ 0 0 0 1 -1 -1]
"""
return self.homology(ZZ)
def rational_homology(self):
"""
Return the rational homology of this modular abelian variety.
EXAMPLES::
sage: H = J0(37).rational_homology(); H
Rational Homology of Abelian variety J0(37) of dimension 2
sage: H.rank()
4
sage: H.base_ring()
Rational Field
sage: H = J1(17).rational_homology(); H
Rational Homology of Abelian variety J1(17) of dimension 5
sage: H.rank()
10
sage: H.base_ring()
Rational Field
"""
return self.homology(QQ)
###############################################################################
# L-series
###############################################################################
def lseries(self):
"""
Return the complex `L`-series of this modular abelian
variety.
EXAMPLES::
sage: A = J0(37)
sage: A.lseries()
Complex L-series attached to Abelian variety J0(37) of dimension 2
"""
try:
return self.__lseries
except AttributeError:
pass
self.__lseries = lseries.Lseries_complex(self)
return self.__lseries
def padic_lseries(self, p):
"""
Return the `p`-adic `L`-series of this modular
abelian variety.
EXAMPLES::
sage: A = J0(37)
sage: A.padic_lseries(7)
7-adic L-series attached to Abelian variety J0(37) of dimension 2
"""
p = int(p)
try:
return self.__lseries_padic[p]
except AttributeError:
self.__lseries_padic = {}
except KeyError:
pass
self.__lseries_padic[p] = lseries.Lseries_padic(self, p)
return self.__lseries_padic[p]
###############################################################################
# Hecke Operators
###############################################################################
def hecke_operator(self, n):
"""
Return the `n^{th}` Hecke operator on the modular abelian
variety, if this makes sense [[elaborate]]. Otherwise raise a
ValueError.
EXAMPLES: We compute `T_2` on `J_0(37)`.
::
sage: t2 = J0(37).hecke_operator(2); t2
Hecke operator T_2 on Abelian variety J0(37) of dimension 2
sage: t2.charpoly().factor()
x * (x + 2)
sage: t2.index()
2
Note that there is no matrix associated to Hecke operators on
modular abelian varieties. For a matrix, instead consider, e.g.,
the Hecke operator on integral or rational homology.
::
sage: t2.action_on_homology().matrix()
[-1 1 1 -1]
[ 1 -1 1 0]
[ 0 0 -2 1]
[ 0 0 0 0]
"""
try:
return self._hecke_operator[n]
except AttributeError:
self._hecke_operator = {}
except KeyError:
pass
Tn = HeckeOperator(self, n)
self._hecke_operator[n] = Tn
return Tn
def hecke_polynomial(self, n, var='x'):
r"""
Return the characteristic polynomial of the `n^{th}` Hecke
operator `T_n` acting on self. Raises an ArithmeticError
if self is not Hecke equivariant.
INPUT:
- ``n`` - integer `\geq 1`
- ``var`` - string (default: 'x'); valid variable
name
EXAMPLES::
sage: J0(33).hecke_polynomial(2)
x^3 + 3*x^2 - 4
sage: f = J0(33).hecke_polynomial(2, 'y'); f
y^3 + 3*y^2 - 4
sage: f.parent()
Univariate Polynomial Ring in y over Rational Field
sage: J0(33)[2].hecke_polynomial(3)
x + 1
sage: J0(33)[0].hecke_polynomial(5)
x - 1
sage: J0(33)[0].hecke_polynomial(11)
x - 1
sage: J0(33)[0].hecke_polynomial(3)
Traceback (most recent call last):
...
ArithmeticError: subspace is not invariant under matrix
"""
n = Integer(n)
if n <= 0:
raise ValueError("n must be a positive integer")
key = (n,var)
try:
return self.__hecke_polynomial[key]
except AttributeError:
self.__hecke_polynomial = {}
except KeyError:
pass
f = self._compute_hecke_polynomial(n, var=var)
self.__hecke_polynomial[key] = f
return f
def _compute_hecke_polynomial(self, n, var='x'):
"""
Return the Hecke polynomial of index `n` in terms of the
given variable.
INPUT:
- ``n`` - positive integer
- ``var`` - string (default: 'x')
EXAMPLES::
sage: A = J0(33)*J0(11)
sage: A._compute_hecke_polynomial(2)
x^4 + 5*x^3 + 6*x^2 - 4*x - 8
"""
return self.hecke_operator(n).charpoly(var=var)
def _integral_hecke_matrix(self, n):
"""
Return the matrix of the Hecke operator `T_n` acting on
the integral homology of this modular abelian variety, if the
modular abelian variety is stable under `T_n`. Otherwise,
raise an ArithmeticError.
EXAMPLES::
sage: A = J0(23)
sage: t = A._integral_hecke_matrix(2); t
[ 0 1 -1 0]
[ 0 1 -1 1]
[-1 2 -2 1]
[-1 1 0 -1]
sage: t.parent()
Full MatrixSpace of 4 by 4 dense matrices over Integer Ring
"""
A = self._ambient_hecke_matrix_on_modular_symbols(n)
return A.restrict(self.lattice())
def _rational_hecke_matrix(self, n):
r"""
Return the matrix of the Hecke operator `T_n` acting on
the rational homology `H_1(A,\QQ)` of this modular
abelian variety, if this action is defined. Otherwise, raise an
ArithmeticError.
EXAMPLES::
sage: A = J0(23)
sage: t = A._rational_hecke_matrix(2); t
[ 0 1 -1 0]
[ 0 1 -1 1]
[-1 2 -2 1]
[-1 1 0 -1]
sage: t.parent()
Full MatrixSpace of 4 by 4 dense matrices over Rational Field
"""
return self._integral_hecke_matrix(n)
###############################################################################
# Subgroups
###############################################################################
def qbar_torsion_subgroup(self):
r"""
Return the group of all points of finite order in the algebraic
closure of this abelian variety.
EXAMPLES::
sage: T = J0(33).qbar_torsion_subgroup(); T
Group of all torsion points in QQbar on Abelian variety J0(33) of dimension 3
The field of definition is the same as the base field of the
abelian variety.
::
sage: T.field_of_definition()
Rational Field
On the other hand, T is a module over `\ZZ`.
::
sage: T.base_ring()
Integer Ring
"""
try:
return self.__qbar_torsion_subgroup
except AttributeError:
G = QQbarTorsionSubgroup(self)
self.__qbar_torsion_subgroup = G
return G
def rational_torsion_subgroup(self):
"""
Return the maximal torsion subgroup of self defined over QQ.
EXAMPLES::
sage: J = J0(33)
sage: A = J.new_subvariety()
sage: A
Abelian subvariety of dimension 1 of J0(33)
sage: t = A.rational_torsion_subgroup()
sage: t.multiple_of_order()
4
sage: t.divisor_of_order()
4
sage: t.order()
4
sage: t.gens()
[[(1/2, 0, 0, -1/2, 0, 0)], [(0, 0, 1/2, 0, 1/2, -1/2)]]
sage: t
Torsion subgroup of Abelian subvariety of dimension 1 of J0(33)
"""
try:
return self.__rational_torsion_subgroup
except AttributeError:
T = RationalTorsionSubgroup(self)
self.__rational_torsion_subgroup = T
return T
def cuspidal_subgroup(self):
"""
Return the cuspidal subgroup of this modular abelian variety. This
is the subgroup generated by rational cusps.
EXAMPLES::
sage: J = J0(54)
sage: C = J.cuspidal_subgroup()
sage: C.gens()
[[(1/3, 0, 0, 0, 0, 1/3, 0, 2/3)], [(0, 1/3, 0, 0, 0, 2/3, 0, 1/3)], [(0, 0, 1/9, 1/9, 1/9, 1/9, 1/9, 2/9)], [(0, 0, 0, 1/3, 0, 1/3, 0, 0)], [(0, 0, 0, 0, 1/3, 1/3, 0, 1/3)], [(0, 0, 0, 0, 0, 0, 1/3, 2/3)]]
sage: C.invariants()
[3, 3, 3, 3, 3, 9]
sage: J1(13).cuspidal_subgroup()
Finite subgroup with invariants [19, 19] over QQ of Abelian variety J1(13) of dimension 2
sage: A = J0(33)[0]
sage: A.cuspidal_subgroup()
Finite subgroup with invariants [5] over QQ of Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
"""
try:
return self._cuspidal_subgroup
except AttributeError:
if not self.is_subvariety_of_ambient_jacobian():
raise ValueError("self must be a subvariety of the ambient variety")
if self.is_ambient():
T = self._ambient_cuspidal_subgroup(rational_only=False)
else:
T = self.ambient_variety().cuspidal_subgroup().intersection(self)
self._cuspidal_subgroup = T
return T
def _ambient_cuspidal_subgroup(self, rational_only=False, rational_subgroup=False):
"""
EXAMPLES::
sage: (J1(13)*J0(11))._ambient_cuspidal_subgroup()
Finite subgroup with invariants [19, 95] over QQ of Abelian variety J1(13) x J0(11) of dimension 3
sage: (J0(33))._ambient_cuspidal_subgroup()
Finite subgroup with invariants [10, 10] over QQ of Abelian variety J0(33) of dimension 3
sage: (J0(33)*J0(33))._ambient_cuspidal_subgroup()
Finite subgroup with invariants [10, 10, 10, 10] over QQ of Abelian variety J0(33) x J0(33) of dimension 6
"""
n = 2 * self.degree()
i = 0
lattice = (ZZ**n).zero_submodule()
if rational_subgroup:
CS = RationalCuspidalSubgroup
elif rational_only:
CS = RationalCuspSubgroup
else:
CS = CuspidalSubgroup
for J in self._ambient_modular_symbols_abvars():
L = CS(J).lattice().basis_matrix()
Z_left = matrix(QQ,L.nrows(),i)
Z_right = matrix(QQ,L.nrows(),n-i-L.ncols())
lattice += (Z_left.augment(L).augment(Z_right)).row_module(ZZ)
i += L.ncols()
return FiniteSubgroup_lattice(self, lattice, field_of_definition=self.base_field())
def shimura_subgroup(self):
r"""
Return the Shimura subgroup of this modular abelian variety. This is
the kernel of $J_0(N) \rightarrow J_1(N)$ under the natural map.
Here we compute the Shimura subgroup as the kernel of
$J_0(N) \rightarrow J_0(Np)$ where the map is the difference between the
two degeneracy maps.
EXAMPLES::
sage: J=J0(11)
sage: J.shimura_subgroup()
Finite subgroup with invariants [5] over QQ of Abelian variety J0(11) of dimension 1
sage: J=J0(17)
sage: G=J.cuspidal_subgroup(); G
Finite subgroup with invariants [4] over QQ of Abelian variety J0(17) of dimension 1
sage: S=J.shimura_subgroup(); S
Finite subgroup with invariants [4] over QQ of Abelian variety J0(17) of dimension 1
sage: G.intersection(S)
Finite subgroup with invariants [2] over QQ of Abelian variety J0(17) of dimension 1
sage: J=J0(33)
sage: A=J.decomposition()[0]
sage: A.shimura_subgroup()
Finite subgroup with invariants [5] over QQ of Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
sage: J.shimura_subgroup()
Finite subgroup with invariants [10] over QQ of Abelian variety J0(33) of dimension 3
"""
N=self.level()
J=self.ambient_variety()
for p in prime_range(100):
if N%p!=0:
break
phi=J.degeneracy_map(N*p,1)
phip=J.degeneracy_map(N*p,p)
SIG = (phi-phip).kernel()
assert SIG[1].dimension()==0, "The intersection should have dimension 0"
return self.intersection(SIG[0])
def rational_cusp_subgroup(self):
r"""
Return the subgroup of this modular abelian variety generated by
rational cusps.
This is a subgroup of the group of rational points in the cuspidal
subgroup.
.. warning::
This is only currently implemented for
`\Gamma_0(N)`.
EXAMPLES::
sage: J = J0(54)
sage: CQ = J.rational_cusp_subgroup(); CQ
Finite subgroup with invariants [3, 3, 9] over QQ of Abelian variety J0(54) of dimension 4
sage: CQ.gens()
[[(1/3, 0, 0, 1/3, 2/3, 1/3, 0, 1/3)], [(0, 0, 1/9, 1/9, 7/9, 7/9, 1/9, 8/9)], [(0, 0, 0, 0, 0, 0, 1/3, 2/3)]]
sage: factor(CQ.order())
3^4
sage: CQ.invariants()
[3, 3, 9]
In this example the rational cuspidal subgroup and the cuspidal
subgroup differ by a lot.
::
sage: J = J0(49)
sage: J.cuspidal_subgroup()
Finite subgroup with invariants [2, 14] over QQ of Abelian variety J0(49) of dimension 1
sage: J.rational_cusp_subgroup()
Finite subgroup with invariants [2] over QQ of Abelian variety J0(49) of dimension 1
Note that computation of the rational cusp subgroup isn't
implemented for `\Gamma_1`.
::
sage: J = J1(13)
sage: J.cuspidal_subgroup()
Finite subgroup with invariants [19, 19] over QQ of Abelian variety J1(13) of dimension 2
sage: J.rational_cusp_subgroup()
Traceback (most recent call last):
...
NotImplementedError: computation of rational cusps only implemented in Gamma0 case.
"""
try:
return self._rational_cusp_subgroup
except AttributeError:
if not self.is_subvariety_of_ambient_jacobian():
raise ValueError("self must be a subvariety of the ambient variety")
if self.is_ambient():
T = self._ambient_cuspidal_subgroup(rational_only=True)
else:
T = self.ambient_variety().rational_cusp_subgroup().intersection(self)
self._rational_cusp_subgroup = T
return T
def rational_cuspidal_subgroup(self):
r"""
Return the rational subgroup of the cuspidal subgroup of this
modular abelian variety.
This is a subgroup of the group of rational points in the
cuspidal subgroup.
.. warning::
This is only currently implemented for
`\Gamma_0(N)`.
EXAMPLES::
sage: J = J0(54)
sage: CQ = J.rational_cuspidal_subgroup(); CQ
Finite subgroup with invariants [3, 3, 9] over QQ of Abelian variety J0(54) of dimension 4
sage: CQ.gens()
[[(1/3, 0, 0, 1/3, 2/3, 1/3, 0, 1/3)], [(0, 0, 1/9, 1/9, 7/9, 7/9, 1/9, 8/9)], [(0, 0, 0, 0, 0, 0, 1/3, 2/3)]]
sage: factor(CQ.order())
3^4
sage: CQ.invariants()
[3, 3, 9]
In this example the rational cuspidal subgroup and the cuspidal
subgroup differ by a lot.
::
sage: J = J0(49)
sage: J.cuspidal_subgroup()
Finite subgroup with invariants [2, 14] over QQ of Abelian variety J0(49) of dimension 1
sage: J.rational_cuspidal_subgroup()
Finite subgroup with invariants [2] over QQ of Abelian variety J0(49) of dimension 1
Note that computation of the rational cusp subgroup isn't
implemented for `\Gamma_1`.
::
sage: J = J1(13)
sage: J.cuspidal_subgroup()
Finite subgroup with invariants [19, 19] over QQ of Abelian variety J1(13) of dimension 2
sage: J.rational_cuspidal_subgroup()
Traceback (most recent call last):
...
NotImplementedError: only implemented when group is Gamma0
"""
try:
return self._rational_cuspidal_subgroup
except AttributeError:
if not self.is_subvariety_of_ambient_jacobian():
raise ValueError("self must be a subvariety of the ambient variety")
if self.is_ambient():
T = self._ambient_cuspidal_subgroup(rational_subgroup=True)
else:
T = self.ambient_variety().rational_cuspidal_subgroup().intersection(self)
self._rational_cuspidal_subgroup = T
return T
def zero_subgroup(self):
"""
Return the zero subgroup of this modular abelian variety, as a
finite group.
EXAMPLES::
sage: A =J0(54); G = A.zero_subgroup(); G
Finite subgroup with invariants [] over QQ of Abelian variety J0(54) of dimension 4
sage: G.is_subgroup(A)
True
"""
try:
return self.__zero_subgroup
except AttributeError:
G = FiniteSubgroup_lattice(self, self.lattice(), field_of_definition=QQ)
self.__zero_subgroup = G
return G
def finite_subgroup(self, X, field_of_definition=None, check=True):
"""
Return a finite subgroup of this modular abelian variety.
INPUT:
- ``X`` - list of elements of other finite subgroups
of this modular abelian variety or elements that coerce into the
rational homology (viewed as a rational vector space); also X could
be a finite subgroup itself that is contained in this abelian
variety.
- ``field_of_definition`` - (default: None) field
over which this group is defined. If None try to figure out the
best base field.
OUTPUT: a finite subgroup of a modular abelian variety
EXAMPLES::
sage: J = J0(11)
sage: J.finite_subgroup([[1/5,0], [0,1/3]])
Finite subgroup with invariants [15] over QQbar of Abelian variety J0(11) of dimension 1
::
sage: J = J0(33); C = J[0].cuspidal_subgroup(); C
Finite subgroup with invariants [5] over QQ of Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)
sage: J.finite_subgroup([[0,0,0,0,0,1/6]])
Finite subgroup with invariants [6] over QQbar of Abelian variety J0(33) of dimension 3
sage: J.finite_subgroup(C)
Finite subgroup with invariants [5] over QQ of Abelian variety J0(33) of dimension 3
"""
if isinstance(X, (list, tuple)):
X = self._ambient_lattice().span(X)
elif isinstance(X, FiniteSubgroup):
if field_of_definition is None:
field_of_definition = X.field_of_definition()
A = X.abelian_variety()
if A.groups() != self.groups():
raise ValueError("ambient product Jacobians must be equal")
if A == self:
X = X.lattice()
else:
if X.is_subgroup(self):
X = (X.lattice() + self.lattice()).intersection(self.vector_space())
else:
raise ValueError("X must be a subgroup of self.")
if field_of_definition is None:
field_of_definition = QQbar
else:
field_of_definition = field_of_definition
return FiniteSubgroup_lattice(self, X, field_of_definition=field_of_definition, check=check)
def torsion_subgroup(self, n):
"""
If n is an integer, return the subgroup of points of order n.
Return the `n`-torsion subgroup of elements of order
dividing `n` of this modular abelian variety `A`,
i.e., the group `A[n]`.
EXAMPLES::
sage: J1(13).torsion_subgroup(19)
Finite subgroup with invariants [19, 19, 19, 19] over QQ of Abelian variety J1(13) of dimension 2
::
sage: A = J0(23)
sage: G = A.torsion_subgroup(5); G
Finite subgroup with invariants [5, 5, 5, 5] over QQ of Abelian variety J0(23) of dimension 2
sage: G.order()
625
sage: G.gens()
[[(1/5, 0, 0, 0)], [(0, 1/5, 0, 0)], [(0, 0, 1/5, 0)], [(0, 0, 0, 1/5)]]
sage: A = J0(23)
sage: A.torsion_subgroup(2).order()
16
"""
try:
return self.__torsion_subgroup[n]
except KeyError:
pass
except AttributeError:
self.__torsion_subgroup = {}
lattice = self.lattice().scale(1/Integer(n))
H = FiniteSubgroup_lattice(self, lattice, field_of_definition=self.base_field())
self.__torsion_subgroup[n] = H
return H
###############################################################################
# Decomposition
###############################################################################
def degen_t(self, none_if_not_known=False):
"""
If this abelian variety is obtained via decomposition then it gets
labeled with the newform label along with some information about
degeneracy maps. In particular, the label ends in a pair
`(t,N)`, where `N` is the ambient level and
`t` is an integer that divides the quotient of `N`
by the newform level. This function returns the tuple
`(t,N)`, or raises a ValueError if self isn't simple.
.. note::
It need not be the case that self is literally equal to the
image of the newform abelian variety under the `t^{th}`
degeneracy map. See the documentation for the label method
for more details.
INPUT:
- ``none_if_not_known`` - (default: False) - if
True, return None instead of attempting to compute the degen map's
`t`, if it isn't known. This None result is not cached.
OUTPUT: a pair (integer, integer)
EXAMPLES::
sage: D = J0(33).decomposition(); D
[
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33),
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
]
sage: D[0].degen_t()
(1, 33)
sage: D[1].degen_t()
(3, 33)
sage: D[2].degen_t()
(1, 33)
sage: J0(33).degen_t()
Traceback (most recent call last):
...
ValueError: self must be simple
"""
try:
return self.__degen_t
except AttributeError:
if none_if_not_known:
return None
elif self.dimension() > 0 and self.is_simple():
self.__degen_t = self.decomposition()[0].degen_t()
return self.__degen_t
raise ValueError("self must be simple")
def isogeny_number(self, none_if_not_known=False):
"""
Return the number (starting at 0) of the isogeny class of new
simple abelian varieties that self is in. If self is not simple,
raises a ValueError exception.
INPUT:
- ``none_if_not_known`` - bool (default: False); if
True then this function may return None instead of True of False if
we don't already know the isogeny number of self.
EXAMPLES: We test the none_if_not_known flag first::
sage: J0(33).isogeny_number(none_if_not_known=True) is None
True
Of course, `J_0(33)` is not simple, so this function
raises a ValueError::
sage: J0(33).isogeny_number()
Traceback (most recent call last):
...
ValueError: self must be simple
Each simple factor has isogeny number 1, since that's the number at
which the factor is new.
::
sage: J0(33)[1].isogeny_number()
0
sage: J0(33)[2].isogeny_number()
0
Next consider `J_0(37)` where there are two distinct
newform factors::
sage: J0(37)[1].isogeny_number()
1
"""
try:
return self.__isogeny_number
except AttributeError:
if none_if_not_known:
return None
elif self.is_simple():
self.__isogeny_number = self.decomposition()[0].isogeny_number()
return self.__isogeny_number
else:
raise ValueError("self must be simple")
def is_simple(self, none_if_not_known=False):
"""
Return whether or not this modular abelian variety is simple, i.e.,
has no proper nonzero abelian subvarieties.
INPUT:
- ``none_if_not_known`` - bool (default: False); if
True then this function may return None instead of True of False if
we don't already know whether or not self is simple.
EXAMPLES::
sage: J0(5).is_simple(none_if_not_known=True) is None # this may fail if J0(5) comes up elsewhere...
True
sage: J0(33).is_simple()
False
sage: J0(33).is_simple(none_if_not_known=True)
False
sage: J0(33)[1].is_simple()
True
sage: J1(17).is_simple()
False
"""
try:
return self.__is_simple
except AttributeError:
if none_if_not_known:
return None
self.__is_simple = len(self.decomposition()) <= 1
return self.__is_simple
def decomposition(self, simple=True, bound=None):
"""
Return a sequence of abelian subvarieties of self that are all
simple, have finite intersection and sum to self.
INPUT: simple- bool (default: True) if True, all factors are
simple. If False, each factor returned is isogenous to a power of a
simple and the simples in each factor are distinct.
- ``bound`` - int (default: None) if given, only use
Hecke operators up to this bound when decomposing. This can give
wrong answers, so use with caution!
EXAMPLES::
sage: m = ModularSymbols(11).cuspidal_submodule()
sage: d1 = m.degeneracy_map(33,1).matrix(); d3=m.degeneracy_map(33,3).matrix()
sage: w = ModularSymbols(33).submodule((d1 + d3).image(), check=False)
sage: A = w.abelian_variety(); A
Abelian subvariety of dimension 1 of J0(33)
sage: D = A.decomposition(); D
[
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33)
]
sage: D[0] == A
True
sage: B = A + J0(33)[0]; B
Abelian subvariety of dimension 2 of J0(33)
sage: dd = B.decomposition(simple=False); dd
[
Abelian subvariety of dimension 2 of J0(33)
]
sage: dd[0] == B
True
sage: dd = B.decomposition(); dd
[
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33)
]
sage: sum(dd) == B
True
We decompose a product of two Jacobians::
sage: (J0(33) * J0(11)).decomposition()
[
Simple abelian subvariety 11a(1,11) of dimension 1 of J0(33) x J0(11),
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33) x J0(11),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33) x J0(11),
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33) x J0(11)
]
"""
try:
return self.__decomposition[(simple, bound)]
except KeyError:
pass
except AttributeError:
self.__decomposition = {}
if self.is_ambient():
# Decompose each piece, then lift
if len(self.groups()) == 0:
D = []
elif len(self.groups()) == 1:
D = ModularAbelianVariety_modsym(ModularSymbols(self.groups()[0], sign=0).cuspidal_submodule()).decomposition(simple=simple, bound=bound)
else:
# Decompose each ambient modular symbols factor.
#X = [ModularAbelianVariety_modsym(ModularSymbols(G,sign=0).cuspidal_submodule()) for G in self.groups()]
from abvar_ambient_jacobian import ModAbVar_ambient_jacobian_class
X = [ModAbVar_ambient_jacobian_class(G) for G in self.groups()]
E = [A.decomposition(simple=simple, bound=bound) for A in X]
i = 0
n = 2*self.dimension()
# Now lift each factor of the decomposition to self.
G = self.groups()
D = []
K = self.base_field()
for C in E:
for B in C:
L = B.lattice().basis_matrix()
if simple:
is_simple = True
else:
is_simple = None
lattice = matrix(QQ,L.nrows(),i).augment(L).augment(matrix(QQ,L.nrows(),n-i-L.ncols())).row_module(ZZ)
D.append(ModularAbelianVariety(G, lattice, K, is_simple=is_simple, newform_level=B.newform_level(),
isogeny_number=B.isogeny_number(none_if_not_known=True),
number=B.degen_t(none_if_not_known=True)))
if len(C) > 0:
i += L.ncols()
elif not simple:
# In this case decompose the ambient space into powers of
# simple abelian varieties (i.e. with
# \code{simple=False)}, and then intersect the lattice
# corresponding to self with each of these factors.
D = []
L = self.lattice()
groups = self.groups()
K = self.base_ring()
for X in self.ambient_variety().decomposition(simple=False):
lattice = L.intersection(X.vector_space())
if lattice.rank() > 0:
the_factor = ModularAbelianVariety(groups, lattice, K, is_simple=X.is_simple(none_if_not_known=True), newform_level=X.newform_level(), isogeny_number=X.isogeny_number(none_if_not_known=True), number=X.degen_t(none_if_not_known=True))
D.append(the_factor)
else:
# See the documentation for self._classify_ambient_factors
# in order to understand what we're doing here.
I_F, I_E, X = self._classify_ambient_factors(simple=simple, bound=bound)
Z_E = [X[i] for i in I_E]
Z_F = [X[i] for i in I_F]
F = sum(Z_F, self.zero_subvariety())
# Now self is isogenous to the sum of the factors in Z.
# We use this isogeny to obtain a product decomposition of
# self.
if F == self:
# The easy case -- it is already such a decomposition
D = Z_F
else:
# The hard case -- now we have to pull back the
# factorization
# Suppose $B$ is an abelian variety and there is a
# finite degree map $B\to J$, where $J$ is an ambient
# Jacobian. Suppose further that we find abelian
# subvarieties $E$ and $F$ of $J$ such that $E + F =
# J$, $E$ and $F$ have finite intersection, the
# composition $B \to J \to J/E$ is an isogeny, and we
# know an explicit decomposition of $F$. Then we can
# compute a decomposition of $B$ as follows. Let
# $L_E$ and $L_F$ be the lattices corresponding to $E$
# and $F$ inside of $L_J$. Compute a matrix $\Phi$
# representing the composition $L_B \to L_J \to L_F
# \otimes \QQ$, where the map $L_J$ to $L_F\otimes
# \QQ$ is projection onto the second factor in the
# decomposition of $L_J$ as $L_E + L_F$ (up to finite
# index). Finally, for each factor $A_i$ of $F$ with
# lattice $L_{A_i}$, compute the saturation $S_i$ of
# $\Phi^{-1}(L_{A_i})$. Then the $S_i$ define a
# decomposition of $B$.
E = sum(Z_E, self.zero_subvariety())
L_B = self.lattice()
L_E = E.lattice()
L_F = F.lattice()
decomp_matrix = L_E.basis_matrix().stack(L_F.basis_matrix())
# Now we compute explicitly the ZZ-linear map (over
# QQ) from L_B that is "projection onto L_F". This
# means write each element of a basis for L_B in terms
# of decomp_matrix, then take the bottom coordinates.
X = decomp_matrix.solve_left(L_B.basis_matrix())
# Now row of X gives each element of L_B as a linear
# combination of the rows of decomp_matrix. We
# project onto L_F by taking the right-most part of
# this matrix.
n = X.ncols()
proj = X.matrix_from_columns(range(n-L_F.rank(), n))
# Now proj is the matrix of projection that goes from
# L_B to L_F, wrt the basis of those spaces.
section = proj**(-1)
# Now section maps L_F to L_B (tensor QQ). Now we
# just take each factor of F, which corresponds to a
# submodule of L_F, and map it over to L_B tensor QQ
# and saturate.
D = []
groups = self.groups()
K = self.base_field()
for A in Z_F:
L_A = A.lattice()
M = L_F.coordinate_module(L_A).basis_matrix() * section
M, _ = M._clear_denom()
M = M.saturation()
M = M * L_B.basis_matrix()
lattice = M.row_module(ZZ)
the_factor = ModularAbelianVariety(groups, lattice, K, is_simple=True, newform_level=A.newform_level(),
isogeny_number=A.isogeny_number(), number=A.degen_t())
D.append(the_factor)
################
if isinstance(D, Sequence_generic):
S = D
else:
D.sort()
S = Sequence(D, immutable=True, cr=True, universe=self.category())
self.__decomposition[(simple, bound)] = S
return S
def _classify_ambient_factors(self, simple=True, bound=None):
r"""
This function implements the following algorithm, which produces
data useful in finding a decomposition or complement of self.
#. Suppose `A_1 + \cdots + A_n` is a simple decomposition
of the ambient space.
#. For each `i`, let
`B_i = A_1 + \cdots + A_i`.
#. For each `i`, compute the intersection `C_i` of
`B_i` and self.
#. For each `i`, if the dimension of `C_i` is
bigger than `C_{i-1}` put `i` in the "in" list;
otherwise put `i` in the "out" list.
Then one can show that self is isogenous to the sum of the
`A_i` with `i` in the "in" list. Moreover, the sum
of the `A_j` with `i` in the "out" list is a
complement of self in the ambient space.
INPUT:
- ``simple`` - bool (default: True)
- ``bound`` - integer (default: None); if given,
passed onto decomposition function
OUTPUT: IN list OUT list simple (or power of simple) factors
EXAMPLES::
sage: d1 = J0(11).degeneracy_map(33, 1); d1
Degeneracy map from Abelian variety J0(11) of dimension 1 to Abelian variety J0(33) of dimension 3 defined by [1]
sage: d2 = J0(11).degeneracy_map(33, 3); d2
Degeneracy map from Abelian variety J0(11) of dimension 1 to Abelian variety J0(33) of dimension 3 defined by [3]
sage: A = (d1 + d2).image(); A
Abelian subvariety of dimension 1 of J0(33)
sage: A._classify_ambient_factors()
([1], [0, 2], [
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33),
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
])
"""
# Decompose an arbitrary abelian variety
amb = self.ambient_variety()
S = self.vector_space()
X = amb.decomposition(simple=simple, bound=bound)
IN = []; OUT = []
i = 0
V = 0
last_dimension = 0
for j in range(len(X)):
V += X[j].vector_space()
d = S.intersection(V).dimension()
if d > last_dimension:
IN.append(j)
last_dimension = d
else:
OUT.append(j)
return IN, OUT, X
def _isogeny_to_product_of_simples(self):
r"""
Given an abelian variety `A`, return an isogeny
`\phi: A \rightarrow B_1 \times \cdots \times B_n`, where
each `B_i` is simple. Note that this isogeny is not
unique.
EXAMPLES::
sage: J = J0(37) ; J.decomposition()
[
Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37),
Simple abelian subvariety 37b(1,37) of dimension 1 of J0(37)
]
sage: phi = J._isogeny_to_product_of_simples() ; phi
Abelian variety morphism:
From: Abelian variety J0(37) of dimension 2
To: Abelian subvariety of dimension 2 of J0(37) x J0(37)
sage: J[0].intersection(J[1]) == phi.kernel()
True
::
sage: J = J0(22) * J0(37)
sage: J._isogeny_to_product_of_simples()
Abelian variety morphism:
From: Abelian variety J0(22) x J0(37) of dimension 4
To: Abelian subvariety of dimension 4 of J0(11) x J0(11) x J0(37) x J0(37)
"""
try:
return self._simple_product_isogeny
except AttributeError:
pass
D = self.decomposition()
dest = prod([d._isogeny_to_newform_abelian_variety().image() for d in D])
A = self.ambient_variety()
dim = sum([d.dimension() for d in D])
proj_ls = [ A.projection(factor) for factor in D ]
mat = matrix(ZZ, 2*self.dimension(), 2*dim)
ind = 0
for i in range(len(D)):
factor = D[i]
proj = proj_ls[i]
mat.set_block(0, ind, proj.restrict_domain(self).matrix())
ind += 2*factor.dimension()
H = self.Hom(dest)
self._simple_product_isogeny = H(Morphism(H, mat))
return self._simple_product_isogeny
def _isogeny_to_product_of_powers(self):
r"""
Given an abelian variety `A`, return an isogeny
`\phi: A \rightarrow B_1 \times \cdots \times B_n`, where
each `B_i` is a power of a simple abelian variety. These
factors will be exactly those returned by
self.decomposition(simple=False).Note that this isogeny is not
unique.
EXAMPLES::
sage: J = J0(33) ; D = J.decomposition(simple=False) ; len(D)
2
sage: phi = J._isogeny_to_product_of_powers() ; phi
Abelian variety morphism:
From: Abelian variety J0(33) of dimension 3
To: Abelian subvariety of dimension 3 of J0(33) x J0(33)
::
sage: J = J0(22) * J0(37)
sage: J._isogeny_to_product_of_powers()
Abelian variety morphism:
From: Abelian variety J0(22) x J0(37) of dimension 4
To: Abelian subvariety of dimension 4 of J0(22) x J0(37) x J0(22) x J0(37) x J0(22) x J0(37)
"""
try:
return self._simple_power_product_isogeny
except AttributeError:
pass
D = self.decomposition(simple=False)
A = self.ambient_variety()
proj_ls = [ A.projection(factor) for factor in D ]
dest = prod([phi.image() for phi in proj_ls])
dim = sum([d.dimension() for d in D])
mat = matrix(ZZ, 2*self.dimension(), 2*dim)
ind = 0
for i in range(len(D)):
factor = D[i]
proj = proj_ls[i]
mat.set_block(0, ind, proj.restrict_domain(self).matrix())
ind += 2*factor.dimension()
H = self.Hom(dest)
self._simple_power_product_isogeny = H(Morphism(H, mat))
return self._simple_power_product_isogeny
def complement(self, A=None):
"""
Return a complement of this abelian variety.
INPUT:
- ``A`` - (default: None); if given, A must be an
abelian variety that contains self, in which case the complement of
self is taken inside A. Otherwise the complement is taken in the
ambient product Jacobian.
OUTPUT: abelian variety
EXAMPLES::
sage: a,b,c = J0(33)
sage: (a+b).complement()
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
sage: (a+b).complement() == c
True
sage: a.complement(a+b)
Abelian subvariety of dimension 1 of J0(33)
"""
try:
C = self.__complement
except AttributeError:
pass
if self.dimension() is 0:
if A is None:
C = self.ambient_variety()
else:
C = A
elif A is not None and self.dimension() == A.dimension():
if not self.is_subvariety(A):
raise ValueError("self must be a subvariety of A")
C = self.zero_subvariety()
else:
_, factors, X = self._classify_ambient_factors()
D = [X[i] for i in factors]
C = sum(D)
if C:
self.__complement = C
if A is not None:
C = C.intersection(A)[1]
else:
C = self.zero_subvariety()
return C
def dual(self):
r"""
Return the dual of this abelian variety.
OUTPUT:
- dual abelian variety
- morphism from self to dual
- covering morphism from J to dual
.. warning::
This is currently only implemented when self is an abelian
subvariety of the ambient Jacobian product, and the
complement of self in the ambient product Jacobian share no
common factors. A more general implementation will require
implementing computation of the intersection pairing on
integral homology and the resulting Weil pairing on
torsion.
EXAMPLES: We compute the dual of the elliptic curve newform abelian
variety of level `33`, and find the kernel of the modular
map, which has structure `(\ZZ/3)^2`.
::
sage: A,B,C = J0(33)
sage: C
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
sage: Cd, f, pi = C.dual()
sage: f.matrix()
[3 0]
[0 3]
sage: f.kernel()[0]
Finite subgroup with invariants [3, 3] over QQ of Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
By a theorem the modular degree must thus be `3`::
sage: E = EllipticCurve('33a')
sage: E.modular_degree()
3
Next we compute the dual of a `2`-dimensional new simple
abelian subvariety of `J_0(43)`.
::
sage: A = AbelianVariety('43b'); A
Newform abelian subvariety 43b of dimension 2 of J0(43)
sage: Ad, f, pi = A.dual()
The kernel shows that the modular degree is `2`::
sage: f.kernel()[0]
Finite subgroup with invariants [2, 2] over QQ of Newform abelian subvariety 43b of dimension 2 of J0(43)
Unfortunately, the dual is not implemented in general::
sage: A = J0(22)[0]; A
Simple abelian subvariety 11a(1,22) of dimension 1 of J0(22)
sage: A.dual()
Traceback (most recent call last):
...
NotImplementedError: dual not implemented unless complement shares no simple factors with self.
"""
try:
return self.__dual
except AttributeError:
if not self.is_subvariety_of_ambient_jacobian():
raise NotImplementedError("dual not implemented unless abelian variety is a subvariety of the ambient Jacobian product")
if not self._complement_shares_no_factors_with_same_label():
raise NotImplementedError("dual not implemented unless complement shares no simple factors with self.")
C = self.complement()
Q, phi = self.ambient_variety().quotient(C)
psi = self.ambient_morphism()
self.__dual = Q, phi*psi, phi
return self.__dual
def _factors_with_same_label(self, other):
"""
Given two modular abelian varieties self and other, this function
returns a list of simple abelian subvarieties appearing in the
decomposition of self that have the same newform labels. Each
simple factor with a given newform label appears at most one.
INPUT:
- ``other`` - abelian variety
OUTPUT: list of simple abelian varieties
EXAMPLES::
sage: D = J0(33).decomposition(); D
[
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33),
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
]
sage: D[0]._factors_with_same_label(D[1])
[Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)]
sage: D[0]._factors_with_same_label(D[2])
[]
sage: (D[0]+D[1])._factors_with_same_label(D[1] + D[2])
[Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)]
This illustrates that the multiplicities in the returned list are
1::
sage: (D[0]+D[1])._factors_with_same_label(J0(33))
[Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)]
This illustrates that the ambient product Jacobians do not have to
be the same::
sage: (D[0]+D[1])._factors_with_same_label(J0(22))
[Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33)]
This illustrates that the actual factor labels are relevant, not
just the isogeny class.
::
sage: (D[0]+D[1])._factors_with_same_label(J1(11))
[]
sage: J1(11)[0].newform_label()
'11aG1'
"""
if not isinstance(other, ModularAbelianVariety_abstract):
raise TypeError("other must be an abelian variety")
D = self.decomposition()
C = set([A.newform_label() for A in other.decomposition()])
Z = []
for X in D:
lbl = X.newform_label()
if lbl in C:
Z.append(X)
C.remove(lbl)
Z.sort()
return Z
def _complement_shares_no_factors_with_same_label(self):
"""
Return True if no simple factor of self has the same newform_label
as any factor in a Poincare complement of self in the ambient
product Jacobian.
EXAMPLES: `J_0(37)` is made up of two non-isogenous
elliptic curves::
sage: J0(37)[0]._complement_shares_no_factors_with_same_label()
True
`J_0(33)` decomposes as a product of two isogenous
elliptic curves with a third nonisogenous curve::
sage: D = J0(33).decomposition(); D
[
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33),
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
]
sage: D[0]._complement_shares_no_factors_with_same_label()
False
sage: (D[0]+D[1])._complement_shares_no_factors_with_same_label()
True
sage: D[2]._complement_shares_no_factors_with_same_label()
True
This example illustrates the relevance of the ambient product
Jacobian.
::
sage: D = (J0(11) * J0(11)).decomposition(); D
[
Simple abelian subvariety 11a(1,11) of dimension 1 of J0(11) x J0(11),
Simple abelian subvariety 11a(1,11) of dimension 1 of J0(11) x J0(11)
]
sage: D[0]._complement_shares_no_factors_with_same_label()
False
This example illustrates that it is the newform label, not the
isogeny, class that matters::
sage: D = (J0(11)*J1(11)).decomposition(); D
[
Simple abelian subvariety 11aG1(1,11) of dimension 1 of J0(11) x J1(11),
Simple abelian subvariety 11a(1,11) of dimension 1 of J0(11) x J1(11)
]
sage: D[0]._complement_shares_no_factors_with_same_label()
True
sage: D[0].newform_label()
'11aG1'
sage: D[1].newform_label()
'11a'
"""
try:
return self.__complement_shares
except AttributeError:
t = len(self._factors_with_same_label(self.complement())) == 0
self.__complement_shares = t
return t
def __getitem__(self, i):
"""
Returns the `i^{th}` decomposition factor of self
or returns the slice `i` of decompositions of self.
EXAMPLES::
sage: J = J0(389)
sage: J.decomposition()
[
Simple abelian subvariety 389a(1,389) of dimension 1 of J0(389),
Simple abelian subvariety 389b(1,389) of dimension 2 of J0(389),
Simple abelian subvariety 389c(1,389) of dimension 3 of J0(389),
Simple abelian subvariety 389d(1,389) of dimension 6 of J0(389),
Simple abelian subvariety 389e(1,389) of dimension 20 of J0(389)
]
sage: J[2]
Simple abelian subvariety 389c(1,389) of dimension 3 of J0(389)
sage: J[-1]
Simple abelian subvariety 389e(1,389) of dimension 20 of J0(389)
sage: J = J0(125); J.decomposition()
[
Simple abelian subvariety 125a(1,125) of dimension 2 of J0(125),
Simple abelian subvariety 125b(1,125) of dimension 2 of J0(125),
Simple abelian subvariety 125c(1,125) of dimension 4 of J0(125)
]
sage: J[:2]
[
Simple abelian subvariety 125a(1,125) of dimension 2 of J0(125),
Simple abelian subvariety 125b(1,125) of dimension 2 of J0(125)
]
"""
return self.decomposition()[i]
class ModularAbelianVariety(ModularAbelianVariety_abstract):
def __init__(self, groups, lattice=None, base_field=QQ, is_simple=None, newform_level=None,
isogeny_number=None, number=None, check=True):
r"""
Create a modular abelian variety with given level and base field.
INPUT:
- ``groups`` - a tuple of congruence subgroups
- ``lattice`` - (default: `\ZZ^n`) a
full lattice in `\ZZ^n`, where `n` is the
sum of the dimensions of the spaces of cuspidal modular symbols
corresponding to each `\Gamma \in` groups
- ``base_field`` - a field (default:
`\QQ`)
EXAMPLES::
sage: J0(23)
Abelian variety J0(23) of dimension 2
"""
ModularAbelianVariety_abstract.__init__(self, groups, base_field, is_simple=is_simple, newform_level=newform_level,
isogeny_number=isogeny_number, number=number, check=check)
if lattice is None:
lattice = ZZ**(2*self._ambient_dimension())
if check:
n = self._ambient_dimension()
if not is_FreeModule(lattice):
raise TypeError("lattice must be a free module")
if lattice.base_ring() != ZZ:
raise TypeError("lattice must be over ZZ")
if lattice.degree() != 2*n:
raise ValueError("lattice must have degree 2*n (=%s)"%(2*n))
if not lattice.saturation().is_submodule(lattice): # potentially expensive
raise ValueError("lattice must be full")
self.__lattice = lattice
def lattice(self):
"""
Return the lattice that defines this abelian variety.
OUTPUT:
- ``lattice`` - a lattice embedded in the rational
homology of the ambient product Jacobian
EXAMPLES::
sage: A = (J0(11) * J0(37))[1]; A
Simple abelian subvariety 37a(1,37) of dimension 1 of J0(11) x J0(37)
sage: type(A)
<class 'sage.modular.abvar.abvar.ModularAbelianVariety_with_category'>
sage: A.lattice()
Free module of degree 6 and rank 2 over Integer Ring
Echelon basis matrix:
[ 0 0 1 -1 1 0]
[ 0 0 0 0 2 -1]
"""
return self.__lattice
class ModularAbelianVariety_modsym_abstract(ModularAbelianVariety_abstract):
# Anything that derives from this class must define the
# modular_symbols method, which returns a cuspidal modular symbols
# space over QQ. It can have any sign.
def _modular_symbols(self):
"""
Return the space of modular symbols corresponding to this modular
symbols abelian variety.
EXAMPLES: This function is in the abstract base class, so it raises
a NotImplementedError::
sage: M = ModularSymbols(37).cuspidal_submodule()
sage: A = M.abelian_variety(); A
Abelian variety J0(37) of dimension 2
sage: sage.modular.abvar.abvar.ModularAbelianVariety_modsym_abstract._modular_symbols(A)
Traceback (most recent call last):
...
NotImplementedError: bug -- must define this
Of course this function isn't called in practice, so this works::
sage: A._modular_symbols()
Modular Symbols subspace of dimension 4 of Modular Symbols space of dimension 5 for Gamma_0(37) of weight 2 with sign 0 over Rational Field
"""
raise NotImplementedError("bug -- must define this")
def __add__(self, other):
"""
Add two modular abelian variety factors.
EXAMPLES::
sage: A = J0(42); D = A.decomposition(); D
[
Simple abelian subvariety 14a(1,42) of dimension 1 of J0(42),
Simple abelian subvariety 14a(3,42) of dimension 1 of J0(42),
Simple abelian subvariety 21a(1,42) of dimension 1 of J0(42),
Simple abelian subvariety 21a(2,42) of dimension 1 of J0(42),
Simple abelian subvariety 42a(1,42) of dimension 1 of J0(42)
]
sage: D[0] + D[1]
Abelian subvariety of dimension 2 of J0(42)
sage: D[1].is_subvariety(D[0] + D[1])
True
sage: D[0] + D[1] + D[2]
Abelian subvariety of dimension 3 of J0(42)
sage: D[0] + D[0]
Abelian subvariety of dimension 1 of J0(42)
sage: D[0] + D[0] == D[0]
True
sage: sum(D, D[0]) == A
True
"""
if not is_ModularAbelianVariety(other):
if other == 0:
return self
raise TypeError("sum not defined")
if not isinstance(other, ModularAbelianVariety_modsym_abstract):
return ModularAbelianVariety_abstract.__add__(self, other)
if self.groups() != other.groups():
raise TypeError("sum not defined since ambient spaces different")
M = self.modular_symbols() + other.modular_symbols()
return ModularAbelianVariety_modsym(M)
def groups(self):
"""
Return the tuple of groups associated to the modular symbols
abelian variety. This is always a 1-tuple.
OUTPUT: tuple
EXAMPLES::
sage: A = ModularSymbols(33).cuspidal_submodule().abelian_variety(); A
Abelian variety J0(33) of dimension 3
sage: A.groups()
(Congruence Subgroup Gamma0(33),)
sage: type(A)
<class 'sage.modular.abvar.abvar.ModularAbelianVariety_modsym_with_category'>
"""
return (self._modular_symbols().group(), )
def lattice(self):
r"""
Return the lattice defining this modular abelian variety.
OUTPUT:
A free `\ZZ`-module embedded in an ambient `\QQ`-vector space.
EXAMPLES::
sage: A = ModularSymbols(33).cuspidal_submodule()[0].abelian_variety(); A
Abelian subvariety of dimension 1 of J0(33)
sage: A.lattice()
Free module of degree 6 and rank 2 over Integer Ring
User basis matrix:
[ 1 0 0 -1 0 0]
[ 0 0 1 0 1 -1]
sage: type(A)
<class 'sage.modular.abvar.abvar.ModularAbelianVariety_modsym_with_category'>
"""
try:
return self.__lattice
except AttributeError:
M = self.modular_symbols()
S = M.ambient_module().cuspidal_submodule()
if M.dimension() == S.dimension():
L = ZZ**M.dimension()
else:
K0 = M.integral_structure()
K1 = S.integral_structure()
L = K1.coordinate_module(K0)
self.__lattice = L
return self.__lattice
def _set_lattice(self, lattice):
"""
Set the lattice of this modular symbols abelian variety.
.. warning::
This is only for internal use. Do not use this unless you
really really know what you're doing. That's why there is
an underscore in this method name.
INPUT:
- ``lattice`` - a lattice
EXAMPLES: We do something evil - there's no type checking since
this function is for internal use only::
sage: A = ModularSymbols(33).cuspidal_submodule().abelian_variety()
sage: A._set_lattice(5)
sage: A.lattice()
5
"""
self.__lattice = lattice
def modular_symbols(self, sign=0):
"""
Return space of modular symbols (with given sign) associated to
this modular abelian variety, if it can be found by cutting down
using Hecke operators. Otherwise raise a RuntimeError exception.
EXAMPLES::
sage: A = J0(37)
sage: A.modular_symbols()
Modular Symbols subspace of dimension 4 of Modular Symbols space of dimension 5 for Gamma_0(37) of weight 2 with sign 0 over Rational Field
sage: A.modular_symbols(1)
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 3 for Gamma_0(37) of weight 2 with sign 1 over Rational Field
More examples::
sage: J0(11).modular_symbols()
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field
sage: J0(11).modular_symbols(sign=1)
Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 2 for Gamma_0(11) of weight 2 with sign 1 over Rational Field
sage: J0(11).modular_symbols(sign=0)
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field
sage: J0(11).modular_symbols(sign=-1)
Modular Symbols space of dimension 1 for Gamma_0(11) of weight 2 with sign -1 over Rational Field
Even more examples::
sage: A = J0(33)[1]; A
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33)
sage: A.modular_symbols()
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field
It is not always possible to determine the sign subspaces::
sage: A.modular_symbols(1)
Traceback (most recent call last):
...
RuntimeError: unable to determine sign (=1) space of modular symbols
::
sage: A.modular_symbols(-1)
Traceback (most recent call last):
...
RuntimeError: unable to determine sign (=-1) space of modular symbols
"""
M = self._modular_symbols().modular_symbols_of_sign(sign)
if (sign != 0 and M.dimension() != self.dimension()) or (sign == 0 and M.dimension() != 2*self.dimension()):
raise RuntimeError("unable to determine sign (=%s) space of modular symbols"%sign)
return M
def _compute_hecke_polynomial(self, n, var='x'):
"""
Return the characteristic polynomial of the `n^{th}` Hecke
operator on self.
.. note::
If self has dimension d, then this is a polynomial of
degree d. It is not of degree 2\*d, so it is the square
root of the characteristic polynomial of the Hecke operator
on integral or rational homology (which has degree 2\*d).
EXAMPLES::
sage: J0(11).hecke_polynomial(2)
x + 2
sage: J0(23)._compute_hecke_polynomial(2)
x^2 + x - 1
sage: J1(13).hecke_polynomial(2)
x^2 + 3*x + 3
sage: factor(J0(43).hecke_polynomial(2))
(x + 2) * (x^2 - 2)
The Hecke polynomial is the square root of the characteristic
polynomial::
sage: factor(J0(43).hecke_operator(2).charpoly())
(x + 2) * (x^2 - 2)
"""
return sqrt_poly(self.modular_symbols().hecke_polynomial(n, var))
def _integral_hecke_matrix(self, n, sign=0):
"""
Return the action of the Hecke operator `T_n` on the
integral homology of self.
INPUT:
- ``n`` - a positive integer
- ``sign`` - 0, +1, or -1; if 1 or -1 act on the +1 or
-1 quotient of the integral homology.
EXAMPLES::
sage: J1(13)._integral_hecke_matrix(2) # slightly random choice of basis
[-2 0 -1 1]
[ 1 -1 0 -1]
[ 1 1 -2 0]
[ 0 1 -1 -1]
sage: J1(13)._integral_hecke_matrix(2,sign=1) # slightly random choice of basis
[-1 1]
[-1 -2]
sage: J1(13)._integral_hecke_matrix(2,sign=-1) # slightly random choice of basis
[-2 -1]
[ 1 -1]
"""
return self.modular_symbols(sign).integral_hecke_matrix(n)
def _rational_hecke_matrix(self, n, sign=0):
"""
Return the action of the Hecke operator `T_n` on the
rational homology of self.
INPUT:
- ``n`` - a positive integer
- ``sign`` - 0, +1, or -1; if 1 or -1 act on the +1 or
-1 quotient of the rational homology.
EXAMPLES::
sage: J1(13)._rational_hecke_matrix(2) # slightly random choice of basis
[-2 0 -1 1]
[ 1 -1 0 -1]
[ 1 1 -2 0]
[ 0 1 -1 -1]
sage: J0(43)._rational_hecke_matrix(2,sign=1) # slightly random choice of basis
[-2 0 1]
[-1 -2 2]
[-2 0 2]
"""
return self._integral_hecke_matrix(n, sign=sign).change_ring(QQ)
def group(self):
"""
Return the congruence subgroup associated that this modular abelian
variety is associated to.
EXAMPLES::
sage: J0(13).group()
Congruence Subgroup Gamma0(13)
sage: J1(997).group()
Congruence Subgroup Gamma1(997)
sage: JH(37,[3]).group()
Congruence Subgroup Gamma_H(37) with H generated by [3]
sage: J0(37)[1].groups()
(Congruence Subgroup Gamma0(37),)
"""
return self.modular_symbols().group()
def is_subvariety(self, other):
"""
Return True if self is a subvariety of other.
EXAMPLES::
sage: J = J0(37); J
Abelian variety J0(37) of dimension 2
sage: A = J[0]; A
Simple abelian subvariety 37a(1,37) of dimension 1 of J0(37)
sage: A.is_subvariety(J)
True
sage: A.is_subvariety(J0(11))
False
There may be a way to map `A` into `J_0(74)`, but
`A` is not equipped with any special structure of an
embedding.
::
sage: A.is_subvariety(J0(74))
False
Some ambient examples::
sage: J = J0(37)
sage: J.is_subvariety(J)
True
sage: J.is_subvariety(25)
False
More examples::
sage: A = J0(42); D = A.decomposition(); D
[
Simple abelian subvariety 14a(1,42) of dimension 1 of J0(42),
Simple abelian subvariety 14a(3,42) of dimension 1 of J0(42),
Simple abelian subvariety 21a(1,42) of dimension 1 of J0(42),
Simple abelian subvariety 21a(2,42) of dimension 1 of J0(42),
Simple abelian subvariety 42a(1,42) of dimension 1 of J0(42)
]
sage: D[0].is_subvariety(A)
True
sage: D[1].is_subvariety(D[0] + D[1])
True
sage: D[2].is_subvariety(D[0] + D[1])
False
"""
if not is_ModularAbelianVariety(other):
return False
if not isinstance(other, ModularAbelianVariety_modsym_abstract):
return ModularAbelianVariety_abstract.is_subvariety(self, other)
return self.modular_symbols().is_submodule(other.modular_symbols())
def is_ambient(self):
"""
Return True if this abelian variety attached to a modular symbols
space space is attached to the cuspidal subspace of the ambient
modular symbols space.
OUTPUT: bool
EXAMPLES::
sage: A = ModularSymbols(43).cuspidal_subspace().abelian_variety(); A
Abelian variety J0(43) of dimension 3
sage: A.is_ambient()
True
sage: type(A)
<class 'sage.modular.abvar.abvar.ModularAbelianVariety_modsym_with_category'>
sage: A = ModularSymbols(43).cuspidal_subspace()[1].abelian_variety(); A
Abelian subvariety of dimension 2 of J0(43)
sage: A.is_ambient()
False
"""
return self.degree() == self.dimension()
def dimension(self):
"""
Return the dimension of this modular abelian variety.
EXAMPLES::
sage: J0(37)[0].dimension()
1
sage: J0(43)[1].dimension()
2
sage: J1(17)[1].dimension()
4
"""
try:
return self._dimension
except AttributeError:
M = self._modular_symbols()
if M.sign() == 0:
d = M.dimension() // 2
else:
d = M.dimension()
self._dimension = d
return d
def new_subvariety(self, p=None):
"""
Return the new or `p`-new subvariety of self.
INPUT:
- ``self`` - a modular abelian variety
- ``p`` - prime number or None (default); if p is a
prime, return the p-new subvariety. Otherwise return the full new
subvariety.
EXAMPLES::
sage: J0(33).new_subvariety()
Abelian subvariety of dimension 1 of J0(33)
sage: J0(100).new_subvariety()
Abelian subvariety of dimension 1 of J0(100)
sage: J1(13).new_subvariety()
Abelian variety J1(13) of dimension 2
"""
try:
return self.__new_subvariety[p]
except AttributeError:
self.__new_subvariety = {}
except KeyError:
pass
A = self.modular_symbols()
N = A.new_submodule(p=p)
B = ModularAbelianVariety_modsym(N)
self.__new_subvariety[p] = B
return B
def old_subvariety(self, p=None):
"""
Return the old or `p`-old abelian variety of self.
INPUT:
- ``self`` - a modular abelian variety
- ``p`` - prime number or None (default); if p is a
prime, return the p-old subvariety. Otherwise return the full old
subvariety.
EXAMPLES::
sage: J0(33).old_subvariety()
Abelian subvariety of dimension 2 of J0(33)
sage: J0(100).old_subvariety()
Abelian subvariety of dimension 6 of J0(100)
sage: J1(13).old_subvariety()
Abelian subvariety of dimension 0 of J1(13)
"""
try:
return self.__old_subvariety[p]
except AttributeError:
self.__old_subvariety = {}
except KeyError:
pass
A = self.modular_symbols()
N = A.old_submodule(p=p)
B = ModularAbelianVariety_modsym(N)
self.__old_subvariety[p] = B
return B
def decomposition(self, simple=True, bound=None):
r"""
Decompose this modular abelian variety as a product of abelian
subvarieties, up to isogeny.
INPUT: simple- bool (default: True) if True, all factors are
simple. If False, each factor returned is isogenous to a power of a
simple and the simples in each factor are distinct.
- ``bound`` - int (default: None) if given, only use
Hecke operators up to this bound when decomposing. This can give
wrong answers, so use with caution!
EXAMPLES::
sage: J = J0(33)
sage: J.decomposition()
[
Simple abelian subvariety 11a(1,33) of dimension 1 of J0(33),
Simple abelian subvariety 11a(3,33) of dimension 1 of J0(33),
Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33)
]
sage: J1(17).decomposition()
[
Simple abelian subvariety 17aG1(1,17) of dimension 1 of J1(17),
Simple abelian subvariety 17bG1(1,17) of dimension 4 of J1(17)
]
"""
try:
return self.__decomposition[(simple, bound)]
except KeyError:
pass
except AttributeError:
self.__decomposition = {}
if not self.is_ambient():
S = ModularAbelianVariety_abstract.decomposition(self, simple=simple, bound=bound)
else:
A = self.modular_symbols()
amb = A.ambient_module()
G = amb.group()
S = amb.cuspidal_submodule().integral_structure()
if simple:
M = A.level()
D = []
for N in reversed(divisors(M)):
if N > 1:
isogeny_number = 0
A = amb.modular_symbols_of_level(N).cuspidal_subspace().new_subspace()
if bound is None:
X = factor_new_space(A)
else:
X = A.decomposition(bound = bound)
for B in X:
for t in divisors(M//N):
D.append(ModularAbelianVariety_modsym(B.degeneracy_map(M, t).image(),
is_simple=True, newform_level=(N, G),
isogeny_number=isogeny_number,
number=(t,M)))
isogeny_number += 1
elif A == amb.cuspidal_submodule():
D = [ModularAbelianVariety_modsym(B) for B in A.decomposition(bound = bound)]
else:
D = ModularAbelianVariety_abstract.decomposition(self, simple=simple, bound=bound)
D.sort()
S = Sequence(D, immutable=True, cr=True, universe=self.category())
self.__decomposition[(simple, bound)] = S
return S
class ModularAbelianVariety_modsym(ModularAbelianVariety_modsym_abstract):
def __init__(self, modsym, lattice=None, newform_level=None,
is_simple=None, isogeny_number=None, number=None, check=True):
"""
Modular abelian variety that corresponds to a Hecke stable space of
cuspidal modular symbols.
EXAMPLES: We create a modular abelian variety attached to a space
of modular symbols.
::
sage: M = ModularSymbols(23).cuspidal_submodule()
sage: A = M.abelian_variety(); A
Abelian variety J0(23) of dimension 2
"""
if check:
if not isinstance(modsym, ModularSymbolsSpace):
raise TypeError("modsym must be a modular symbols space")
if modsym.sign() != 0:
raise TypeError("modular symbols space must have sign 0")
if not modsym.is_cuspidal():
raise ValueError("modsym must be cuspidal")
ModularAbelianVariety_abstract.__init__(self, (modsym.group(), ), modsym.base_ring(),
newform_level=newform_level, is_simple=is_simple,
isogeny_number=isogeny_number, number=number, check=check)
if lattice is not None:
self._set_lattice(lattice)
self.__modsym = modsym
def _modular_symbols(self):
"""
Return the modular symbols space that defines this modular abelian
variety.
OUTPUT: space of modular symbols
EXAMPLES::
sage: M = ModularSymbols(37).cuspidal_submodule()
sage: A = M.abelian_variety(); A
Abelian variety J0(37) of dimension 2
sage: A._modular_symbols()
Modular Symbols subspace of dimension 4 of Modular Symbols space of dimension 5 for Gamma_0(37) of weight 2 with sign 0 over Rational Field
"""
return self.__modsym
def component_group_order(self, p):
"""
Return the order of the component group of the special fiber
at p of the Neron model of self.
NOTE: For bad primes, this is only implemented when the group
if Gamma0 and p exactly divides the level.
NOTE: the input abelian variety must be simple
ALGORITHM: See "Component Groups of Quotients of J0(N)" by Kohel and Stein. That
paper is about optimal quotients; however, section 4.1 of Conrad-Stein "Component
Groups of Purely Toric Quotients", one sees that the component group of an optimal
quotient is the same as the component group of its dual (which is the subvariety).
INPUT:
- p -- a prime number
OUTPUT:
- Integer
EXAMPLES::
sage: A = J0(37)[1]
sage: A.component_group_order(37)
3
sage: A = J0(43)[1]
sage: A.component_group_order(37)
1
sage: A.component_group_order(43)
7
sage: A = J0(23)[0]
sage: A.component_group_order(23)
11
"""
if not self.is_simple():
raise ValueError("self must be simple")
p = Integer(p)
if not p.is_prime(): raise ValueError("p must be a prime integer")
try: return self.__component_group[p][0]
except AttributeError:
self.__component_group = {}
except KeyError: pass
# Easy special case -- a prime of good reduction
if self.level() % p != 0:
one = Integer(1)
self.__component_group[p] = (one,one,one)
return one
# Cases that we don't know how to handle yet.
if not is_Gamma0(self.group()):
raise NotImplementedError("computation of component group not implemented when group isn't Gamma0")
if self.level() % (p*p) == 0:
raise NotImplementedError("computation of component group not implemented when p^2 divides the level")
# Now we're on Gamma0(p*M) with gcd(p,M) = 1.
# 1. Compute factor of Brandt module space, and put integral structure on it.
# TODO -- in case self.level() is prime, should use
# supersingular module instead for massive speedup... Of
# course, then one can just use Emertons theorem that the
# component group order equals the torsion order, and avoid
# all of this!
XI = self.brandt_module(p)
Y = XI.ambient_module()
n = Y.dimension()
# X_ZZ is the submodule of degree 0 divisors
M = ZZ**n
deg_zero = []
for k in range(1,n):
v = vector(ZZ, n)
v[0] = 1
v[k] = -1
deg_zero.append(v)
X_ZZ = M.span(deg_zero, ZZ)
XI_ZZ = XI.free_module().intersection(M)
# 2. Compute the map alpha: X --> Hom(X[I],Z) over ZZ
# todo -- this could be done more quickly with a clever matrix multiply
B = [XI(v) for v in XI_ZZ.basis()]
mat = []
for v in M.basis():
w = Y(v)
mat.append([w.monodromy_pairing(b) for b in B])
monodromy = matrix(ZZ, mat)
alpha = X_ZZ.basis_matrix().change_ring(ZZ) * monodromy
# 3. Compute invariants:
# * Phi_X = #coker(alpha)
# * m_X = #(alpha(X)/alpha(X[I]))
alphaX = alpha.row_module()
Phi_X_invariants = alphaX.basis_matrix().change_ring(ZZ).elementary_divisors()
Phi_X = prod(Phi_X_invariants + [Integer(1)])
W = alphaX.span([b*monodromy for b in XI_ZZ.basis()], ZZ)
m_X = Integer(W.index_in(alphaX))
# 4. Compute the modular degree
moddeg = self.modular_degree()
# 5. Obtain the component group order using Theorem 1 of [Kohel-Stein]
Phi = Phi_X * moddeg / m_X
# 6. Record the answer
self.__component_group[p] = (Phi, Phi_X_invariants, m_X)
return Phi
def _invariants_of_image_of_component_group_of_J0(self, p):
"""
Return the elementary invariants of the image of the component
group of J0(N). The API of this function is subject to
change, which is why it starts with an underscore.
INPUT:
- p -- integer
OUTPUT:
- list -- of elementary invariants
EXAMPLES::
sage: A = J0(62).new_subvariety()[1]; A
Simple abelian subvariety 62b(1,62) of dimension 2 of J0(62)
sage: A._invariants_of_image_of_component_group_of_J0(2)
[1, 6]
sage: A.component_group_order(2)
66
"""
self.component_group_order(p)
return list(self.__component_group[p][1]) # make a copy
def tamagawa_number(self, p):
"""
Return the Tamagawa number of this abelian variety at p.
NOTE: For bad primes, this is only implemented when the group
if Gamma0 and p exactly divides the level and Atkin-Lehner
acts diagonally on this abelian variety (e.g., if this variety
is new and simple). See the self.component_group command for
more information.
NOTE: the input abelian variety must be simple
In cases where this function doesn't work, consider using the
self.tamagawa_number_bounds functions.
INPUT:
- p -- a prime number
OUTPUT:
- Integer
EXAMPLES::
sage: A = J0(37)[1]
sage: A.tamagawa_number(37)
3
sage: A = J0(43)[1]
sage: A.tamagawa_number(37)
1
sage: A.tamagawa_number(43)
7
sage: A = J0(23)[0]
sage: A.tamagawa_number(23)
11
"""
try: return self.__tamagawa_number[p]
except AttributeError: self.__tamagawa_number = {}
except KeyError: pass
if not self.is_simple():
raise ValueError("self must be simple")
try:
g = self.component_group_order(p)
except NotImplementedError:
raise NotImplementedError("Tamagawa number can't be determined using known algorithms, so consider using the tamagawa_number_bounds function instead")
div, mul, mul_primes = self.tamagawa_number_bounds(p)
if div == mul:
cp = div
else:
raise NotImplementedError("the Tamagawa number at %s is a power of 2, but the exact power can't be determined using known algorithms. Consider using the tamagawa_number_bounds function instead."%p)
self.__tamagawa_number[p] = cp
return cp
def tamagawa_number_bounds(self, p):
"""
Return a divisor and multiple of the Tamagawa number of self at p.
NOTE: the input abelian variety must be simple
INPUT:
- p -- a prime number
OUTPUT:
- div -- integer; divisor of Tamagawa number at p
- mul -- integer; multiple of Tamagawa number at p
- mul_primes -- tuple; in case mul==0, a list of all
primes that can possibly divide the Tamagawa number at p.
EXAMPLES::
sage: A = J0(63).new_subvariety()[1]; A
Simple abelian subvariety 63b(1,63) of dimension 2 of J0(63)
sage: A.tamagawa_number_bounds(7)
(3, 3, ())
sage: A.tamagawa_number_bounds(3)
(1, 0, (2, 3, 5))
"""
try: return self.__tamagawa_number_bounds[p]
except AttributeError: self.__tamagawa_number_bounds = {}
except KeyError: pass
if not self.is_simple():
raise ValueError("self must be simple")
N = self.level()
div = 1; mul = 0; mul_primes = []
if N % p != 0:
div = 1; mul = 1
elif N.valuation(p) == 1:
M = self.modular_symbols(sign=1)
if is_Gamma0(M.group()):
g = self.component_group_order(p)
W = M.atkin_lehner_operator(p).matrix()
cp = None
if W == -1:
# Frob acts trivially
div = g; mul = g
elif W == 1:
# Frob acts by -1
n = g.valuation(2)
if n <= 1:
div = 2**n
else:
phi_X_invs = self._invariants_of_image_of_component_group_of_J0(p)
m = max(1, len([z for z in phi_X_invs if z%2==0]))
div = 2**m
mul = 2**n
else:
raise NotImplementedError("Atkin-Lehner at p must act as a scalar")
else:
mul_primes = list(sorted(set([p] + [q for q in prime_range(2,2*self.dimension()+2)])))
div = Integer(div)
mul = Integer(mul)
mul_primes = tuple(mul_primes)
self.__tamagawa_number_bounds[p] = (div, mul, mul_primes)
return (div, mul, mul_primes)
def brandt_module(self, p):
"""
Return the Brandt module at p that corresponds to self. This
is the factor of the vector space on the ideal class set in an
order of level N in the quaternion algebra ramified at p and
infinity.
INPUT:
- p -- prime that exactly divides the level
OUTPUT:
- Brandt module space that corresponds to self.
EXAMPLES::
sage: J0(43)[1].brandt_module(43)
Subspace of dimension 2 of Brandt module of dimension 4 of level 43 of weight 2 over Rational Field
sage: J0(43)[1].brandt_module(43).basis()
((1, 0, -1/2, -1/2), (0, 1, -1/2, -1/2))
sage: J0(43)[0].brandt_module(43).basis()
((0, 0, 1, -1),)
sage: J0(35)[0].brandt_module(5).basis()
((1, 0, -1, 0),)
sage: J0(35)[0].brandt_module(7).basis()
((1, -1, 1, -1),)
"""
try: return self.__brandt_module[p]
except AttributeError: self.__brandt_module = {}
except KeyError: pass
p = Integer(p)
if not is_Gamma0(self.group()):
raise NotImplementedError("Brandt module only defined on Gamma0")
if not p.is_prime(): raise ValueError("p must be a prime integer")
if self.level().valuation(p) != 1:
raise ValueError("p must exactly divide the level")
M = self.level() / p
from sage.modular.all import BrandtModule
V = BrandtModule(p, M)
# now cut out version of self in B
S = self.modular_symbols(sign=1)
B = S.hecke_bound()
if self.dimension() <= 3:
q = 2
while V.dimension() > self.dimension() and q <= B:
f = S.hecke_polynomial(q)
V = f(V.hecke_operator(q)).kernel()
q = next_prime(q)
if V.dimension() > self.dimension():
raise RuntimeError("unable to cut out Brandt module (got dimension %s instead of %s)"%(V.dimension(), self.dimension()))
else:
D = V.decomposition()
D = [A for A in D if A.dimension() == self.dimension()]
# now figure out which element of D is isomorphic to self.
q = 2
while len(D) > 1 and q <= B:
f = S.hecke_polynomial(q)
D = [A for A in D if A.hecke_polynomial(q) == f]
q = next_prime(q)
if len(D) != 1:
raise RuntimeError("unable to locate Brandt module (got %s candidates instead of 1)"%(len(D)))
V = D[0]
self.__brandt_module[p] = V
return V
def sqrt_poly(f):
"""
Return the square root of the polynomial `f`.
.. note::
At some point something like this should be a member of the
polynomial class. For now this is just used internally by some
charpoly functions above.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f = (x-1)*(x+2)*(x^2 + 1/3*x + 5)
sage: f
x^4 + 4/3*x^3 + 10/3*x^2 + 13/3*x - 10
sage: sage.modular.abvar.abvar.sqrt_poly(f^2)
x^4 + 4/3*x^3 + 10/3*x^2 + 13/3*x - 10
sage: sage.modular.abvar.abvar.sqrt_poly(f)
Traceback (most recent call last):
...
ValueError: f must be a perfect square
sage: sage.modular.abvar.abvar.sqrt_poly(2*f^2)
Traceback (most recent call last):
...
ValueError: f must be monic
"""
if not f.is_monic():
raise ValueError("f must be monic")
try:
return prod([g**Integer(e/Integer(2)) for g,e in f.factor()])
except TypeError:
raise ValueError("f must be a perfect square")
####################################################################################################
# Useful for decomposing exactly the sort of modular symbols spaces that come up here.
from random import randrange
from sage.rings.arith import next_prime
def random_hecke_operator(M, t=None, p=2):
"""
Return a random Hecke operator acting on `M`, got by adding
to `t` a random multiple of `T_p`
INPUT:
- ``M`` - modular symbols space
- ``t`` - None or a Hecke operator
- ``p`` - a prime
OUTPUT: Hecke operator prime
EXAMPLES::
sage: M = ModularSymbols(11).cuspidal_subspace()
sage: t, p = sage.modular.abvar.abvar.random_hecke_operator(M)
sage: p
3
sage: t, p = sage.modular.abvar.abvar.random_hecke_operator(M, t, p)
sage: p
5
"""
r = 0
while r == 0:
r = randrange(1,p//2+1) * ZZ.random_element()
t = (0 if t is None else t) + r*M.hecke_operator(p)
return t, next_prime(p)
def factor_new_space(M):
"""
Given a new space `M` of modular symbols, return the
decomposition into simple of `M` under the Hecke
operators.
INPUT:
- ``M`` - modular symbols space
OUTPUT: list of factors
EXAMPLES::
sage: M = ModularSymbols(37).cuspidal_subspace()
sage: sage.modular.abvar.abvar.factor_new_space(M)
[
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 5 for Gamma_0(37) of weight 2 with sign 0 over Rational Field,
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 5 for Gamma_0(37) of weight 2 with sign 0 over Rational Field
]
"""
t = None; p = 2
for i in range(200):
t, p = random_hecke_operator(M, t, p)
f = t.charpoly()
cube_free = True
for _, e in f.factor():
if e > 2:
cube_free = False
break
if cube_free:
return t.decomposition()
t, p = random_hecke_operator(M, t, p)
raise RuntimeError("unable to factor new space -- this should not happen") # should never happen
def factor_modsym_space_new_factors(M):
"""
Given an ambient modular symbols space, return complete
factorization of it.
INPUT:
- ``M`` - modular symbols space
OUTPUT: list of decompositions corresponding to each new space.
EXAMPLES::
sage: M = ModularSymbols(33)
sage: sage.modular.abvar.abvar.factor_modsym_space_new_factors(M)
[[
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field
],
[
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field
]]
"""
eps = M.character()
K = eps.conductor() if eps is not None else 1
N = [M.modular_symbols_of_level(d).cuspidal_subspace().new_subspace() \
for d in M.level().divisors() if d%K == 0 and (d == 11 or d >= 13)]
return [factor_new_space(A) for A in N]
def simple_factorization_of_modsym_space(M, simple=True):
"""
Return factorization of `M`. If simple is False, return
powers of simples.
INPUT:
- ``M`` - modular symbols space
- ``simple`` - bool (default: True)
OUTPUT: sequence
EXAMPLES::
sage: M = ModularSymbols(33)
sage: sage.modular.abvar.abvar.simple_factorization_of_modsym_space(M)
[
(11, 0, 1, Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field),
(11, 0, 3, Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field),
(33, 0, 1, Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field)
]
sage: sage.modular.abvar.abvar.simple_factorization_of_modsym_space(M, simple=False)
[
(11, 0, None, Modular Symbols subspace of dimension 4 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field),
(33, 0, None, Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field)
]
"""
D = []
N = M.level()
for G in factor_modsym_space_new_factors(M):
if len(G) > 0:
# Compute the matrices of the degeneracy maps up.
T = divisors(N//G[0].level())
degen = [G[0].ambient_module().degeneracy_map(N, t).matrix() for t in T]
# Construct a matrix with rows the basis for all the factors
# stacked on top of each other. We just multiply this by each
# degeneracy matrix to get the basis for the images of the
# factors at higher level. By doing matrix multiplies, we
# save time over taking images of individual factors.
matrix = G[0].basis_matrix()
for A in G[1:]:
matrix = matrix.stack(A.basis_matrix())
# Compute the actual images
ims = [matrix * z for z in degen]
# Construct the corresponding subspaces at higher level.
j = 0
for (isog,A) in enumerate(G):
d = A.dimension()
if simple:
for i in range(len(T)):
V = ims[i].matrix_from_rows(range(j, j+d)).row_module()
W = M.submodule(V, check=False)
D.append( (A.level(), isog, T[i], W) )
else:
V = sum(ims[i].matrix_from_rows(range(j, j+d)).row_module() for i in range(len(T)))
W = M.submodule(V, check=False)
D.append( (A.level(), isog, None, W))
j += d
return Sequence(D, cr=True)
def modsym_lattices(M, factors):
"""
Append lattice information to the output of
simple_factorization_of_modsym_space.
INPUT:
- ``M`` - modular symbols spaces
- ``factors`` - Sequence
(simple_factorization_of_modsym_space)
OUTPUT: sequence with more information for each factor (the
lattice)
EXAMPLES::
sage: M = ModularSymbols(33)
sage: factors = sage.modular.abvar.abvar.simple_factorization_of_modsym_space(M, simple=False)
sage: sage.modular.abvar.abvar.modsym_lattices(M, factors)
[
(11, 0, None, Modular Symbols subspace of dimension 4 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field, Free module of degree 6 and rank 4 over Integer Ring
Echelon basis matrix:
[ 1 0 0 0 -1 2]
[ 0 1 0 0 -1 1]
[ 0 0 1 0 -2 2]
[ 0 0 0 1 -1 -1]),
(33, 0, None, Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 9 for Gamma_0(33) of weight 2 with sign 0 over Rational Field, Free module of degree 6 and rank 2 over Integer Ring
Echelon basis matrix:
[ 1 0 0 -1 0 0]
[ 0 0 1 0 1 -1])
]
"""
# 1. Change basis of everything to the ambient integral modular symbols space
# 2. Clear denominator.
# 3. Echelonize/saturate each factor
if len(factors) == 0:
return factors
D = []
I = M.cuspidal_submodule().integral_structure().basis_matrix()
A = factors[0][-1].basis_matrix()
rows = [range(A.nrows())]
for F in factors[1:]:
mat = F[-1].basis_matrix()
i = rows[-1][-1]+1
rows.append(range(i, i + mat.nrows()))
A = A.stack(mat)
X = I.solve_left(A)
X, _ = X._clear_denom()
for i, R in enumerate(rows):
A = X.matrix_from_rows(R)
A = copy(A.saturation())
A.echelonize()
D.append(tuple(list(factors[i]) + [A.row_module()]))
return Sequence(D, cr=True)
|
#!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-spec-test.py
# ./run-spec-test.py ./core/i32.json
# ./run-spec-test.py ./core/float_exprs.json --line 2070
# ./run-spec-test.py ./proposals/tail-call/*.json
# ./run-spec-test.py --exec ../build-custom/wasm3
# ./run-spec-test.py --engine "wasmer run" --exec ../build-wasi/wasm3.wasm
# ./run-spec-test.py --engine "wasmer run --backend=llvm" --exec ../build-wasi/wasm3.wasm
#
# TODO
# - Get more tests from: https://github.com/microsoft/ChakraCore/tree/master/test/WasmSpec
# - Fix "Empty Stack" check
# - Check Canonical NaN and Arithmetic NaN separately
# - Fix imports.wast
import argparse
import os, sys, glob, time
import subprocess
import json
import re
import struct
import math
import pathlib
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.join(scriptDir, '..', 'extra'))
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3")
parser.add_argument("--engine", metavar="<engine>")
parser.add_argument("--timeout", type=int, default=30)
parser.add_argument("--line", metavar="<source line>", type=int)
parser.add_argument("--all", action="store_true")
parser.add_argument("--show-logs", action="store_true")
parser.add_argument("--format", choices=["raw", "hex", "fp"], default="fp")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-s", "--silent", action="store_true")
parser.add_argument("file", nargs='*')
args = parser.parse_args()
if args.line:
args.show_logs = True
#
# Utilities
#
log = open("spec-test.log","w+")
log.write("======================\n")
def warning(msg):
log.write("Warning: " + msg + "\n")
log.flush()
if args.verbose:
print(f"{ansi.WARNING}Warning:{ansi.ENDC} {msg}")
def fatal(msg):
log.write("Fatal: " + msg + "\n")
log.flush()
print(f"{ansi.FAIL}Fatal:{ansi.ENDC} {msg}")
sys.exit(1)
def binaryToFloat(num, t):
if t == "f32":
return struct.unpack('!f', struct.pack('!L', int(num)))[0]
elif t == "f64":
return struct.unpack('!d', struct.pack('!Q', int(num)))[0]
else:
fatal(f"Unknown type '{t}'")
def escape(s):
c = ord(s)
if c < 128 and s.isprintable() and not s in " \n\r\t\\":
return s
if c <= 0xff:
return r'\x{0:02x}'.format(c)
elif c <= 0xffff:
return r'\u{0:04x}'.format(c)
else:
return r'\U{0:08x}'.format(c)
def escape_str(s):
if s == "":
return r'\x00'
return ''.join(escape(c) for c in s)
#
# Value format options
#
def formatValueRaw(num, t):
return str(num)
def formatValueHex(num, t):
if t == "f32" or t == "i32":
return "{0:#0{1}x}".format(int(num), 8+2)
elif t == "f64" or t == "i64":
return "{0:#0{1}x}".format(int(num), 16+2)
else:
return str(num)
def formatValueFloat(num, t):
if t == "f32":
s = 6
elif t == "f64":
s = 10
else:
return str(num)
result = "{0:.{1}f}".format(binaryToFloat(num, t), s).rstrip('0')
if result.endswith('.'): result = result + '0'
if len(result) > s*2:
result = "{0:.{1}e}".format(binaryToFloat(num, t), s)
return result
formaters = {
'raw': formatValueRaw,
'hex': formatValueHex,
'fp': formatValueFloat,
}
formatValue = formaters[args.format]
if args.format == "fp":
print("When using fp display format, values are compared loosely (some tests may produce false positives)")
#
# Spec tests preparation
#
if not (os.path.isdir("./core") and os.path.isdir("./proposals")):
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
officialSpec = "https://github.com/wasm3/wasm-core-testsuite/archive/master.zip"
print(f"Downloading {officialSpec}")
resp = urlopen(officialSpec)
with ZipFile(BytesIO(resp.read())) as zipFile:
for zipInfo in zipFile.infolist():
if re.match(r".*-master/.*/.*(\.wasm|\.json)", zipInfo.filename):
parts = pathlib.Path(zipInfo.filename).parts
newpath = str(pathlib.Path(*parts[1:-1]))
newfn = str(pathlib.Path(*parts[-1:]))
ensure_path(newpath)
newpath = newpath + "/" + newfn
zipInfo.filename = newpath
zipFile.extract(zipInfo)
#
# Wasm3 REPL
#
from subprocess import Popen, STDOUT, PIPE
from threading import Thread
from queue import Queue, Empty
import shlex
def get_engine_cmd(engine, exe):
if engine:
cmd = shlex.split(engine)
if "wasirun" in engine or "wasm3" in engine:
return cmd + [exe, "--repl"]
elif "wasmer" in engine:
return cmd + ["--dir=.", exe, "--", "--repl"]
elif "wasmtime" in engine:
return cmd + ["--dir=.", exe, "--", "--repl"]
elif "iwasm" in engine:
return cmd + ["--dir=.", exe, "--repl"]
elif "wavm" in engine:
return cmd + ["--mount-root", ".", exe, "--repl"] # TODO, fix path
else:
fatal(f"Don't know how to run engine {engine}")
else:
if exe.endswith(".wasm"):
fatal(f"Need engine to execute wasm")
return shlex.split(exe) + ["--repl"]
class Wasm3():
def __init__(self, exe, engine=None):
self.exe = exe
self.engine = engine
self.p = None
self.loaded = None
self.timeout = args.timeout
self.autorestart = True
self.run()
def run(self):
if self.p:
self.terminate()
cmd = get_engine_cmd(self.engine, self.exe)
#print(f"wasm3: Starting {' '.join(cmd)}")
self.q = Queue()
self.p = Popen(cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
def _read_output(out, queue):
for data in iter(lambda: out.read(1024), b''):
queue.put(data)
queue.put(None)
self.t = Thread(target=_read_output, args=(self.p.stdout, self.q))
self.t.daemon = True
self.t.start()
try:
self._read_until("wasm3> ")
except Exception as e:
print(f"wasm3: Could not start: {e}")
def restart(self):
print(f"wasm3: Restarting")
for i in range(10):
try:
self.run()
try:
if self.loaded:
self.load(self.loaded)
except Exception as e:
pass
break
except Exception as e:
print(f"wasm3: {e} => retry")
time.sleep(0.1)
def init(self):
return self._run_cmd(f":init\n")
def version(self):
return self._run_cmd(f":version\n")
def load(self, fn):
self.loaded = None
res = self._run_cmd(f":load {fn}\n")
self.loaded = fn
return res
def invoke(self, cmd):
return self._run_cmd(" ".join(map(str, cmd)) + "\n")
def _run_cmd(self, cmd):
if self.autorestart and not self._is_running():
self.restart()
self._flush_input()
#print(f"wasm3: {cmd.strip()}")
self._write(cmd)
return self._read_until("wasm3> ")
def _read_until(self, token):
buff = ""
tout = time.time() + self.timeout
error = None
while time.time() < tout:
try:
data = self.q.get(timeout=0.1)
if data == None:
error = "Crashed"
break
buff = buff + data.decode("utf-8")
idx = buff.rfind(token)
if idx >= 0:
return buff[0:idx].strip()
except Empty:
pass
else:
error = "Timeout"
self.terminate()
raise Exception(error)
def _write(self, data):
self.p.stdin.write(data.encode("utf-8"))
self.p.stdin.flush()
def _is_running(self):
return self.p and (self.p.poll() == None)
def _flush_input(self):
while not self.q.empty():
self.q.get()
def terminate(self):
self.p.stdin.close()
self.p.terminate()
self.p.wait(timeout=1.0)
self.p = None
#
# Actual test
#
wasm3 = Wasm3(args.exec, args.engine)
print("Version: " + wasm3.version())
blacklist = Blacklist([
"float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*",
"imports.wast:*",
"names.wast:630 *", # name that starts with '\0'
])
stats = dotdict(total_run=0, skipped=0, failed=0, crashed=0, timeout=0, success=0, missing=0)
# Convert some trap names from the original spec
trapmap = {
"unreachable": "unreachable executed"
}
def runInvoke(test):
test.cmd = [test.action.field]
displayArgs = []
for arg in test.action.args:
test.cmd.append(arg['value'])
displayArgs.append(formatValue(arg['value'], arg['type']))
test_id = f"{test.source} {test.wasm} {test.cmd[0]}({', '.join(test.cmd[1:])})"
if test_id in blacklist and not args.all:
warning(f"Skipped {test_id} (blacklisted)")
stats.skipped += 1
return
if args.verbose:
print(f"Running {test_id}")
stats.total_run += 1
output = ""
actual = None
actual_val = None
force_fail = False
try:
output = wasm3.invoke(test.cmd)
except Exception as e:
actual = f"<{e}>"
force_fail = True
# Parse the actual output
if not actual:
result = re.findall(r'Result: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "result " + result[-1]
actual_val = result[0]
if not actual:
result = re.findall(r'Error: \[trap\] (.*?) \(', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "trap " + result[-1]
if not actual:
result = re.findall(r'Error: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "error " + result[-1]
if not actual:
actual = "<No Result>"
force_fail = True
if actual == "error no operation ()":
actual = "<Not Implemented>"
stats.missing += 1
force_fail = True
elif actual == "<Crashed>":
stats.crashed += 1
force_fail = True
elif actual == "<Timeout>":
stats.timeout += 1
force_fail = True
# Prepare the expected result
expect = None
if "expected" in test:
if len(test.expected) == 0:
expect = "result <Empty Stack>"
elif len(test.expected) == 1:
t = test.expected[0]['type']
value = str(test.expected[0]['value'])
expect = "result " + value
if actual_val != None:
if (t == "f32" or t == "f64") and (value == "<Canonical NaN>" or value == "<Arithmetic NaN>"):
val = binaryToFloat(actual_val, t)
#warning(f"{actual_val} => {val}")
if math.isnan(val):
actual = "<Some NaN>"
expect = "<Some NaN>"
else:
expect = "result " + formatValue(value, t)
actual = "result " + formatValue(actual_val, t)
else:
warning(f"Test {test.source} specifies multiple results")
expect = "result <Multiple>"
elif "expected_trap" in test:
if test.expected_trap in trapmap:
test.expected_trap = trapmap[test.expected_trap]
expect = "trap " + str(test.expected_trap)
elif "expected_anything" in test:
expect = "<Anything>"
else:
expect = "<Unknown>"
def showTestResult():
print(" ----------------------")
print(f"Test: {ansi.HEADER}{test_id}{ansi.ENDC}")
print(f"Args: {', '.join(displayArgs)}")
print(f"Expected: {ansi.OKGREEN}{expect}{ansi.ENDC}")
print(f"Actual: {ansi.WARNING}{actual}{ansi.ENDC}")
if args.show_logs and len(output):
print(f"Log:")
print(output)
log.write(f"{test.source}\t|\t{test.wasm} {test.action.field}({', '.join(displayArgs)})\t=>\t\t")
if actual == expect or (expect == "<Anything>" and not force_fail):
stats.success += 1
log.write(f"OK: {actual}\n")
if args.line:
showTestResult()
else:
stats.failed += 1
log.write(f"FAIL: {actual}, should be: {expect}\n")
if args.silent: return
showTestResult()
#sys.exit(1)
if args.file:
jsonFiles = args.file
else:
jsonFiles = glob.glob(os.path.join(".", "core", "*.json"))
jsonFiles = list(map(lambda x: os.path.relpath(x, scriptDir), jsonFiles))
jsonFiles.sort()
for fn in jsonFiles:
with open(fn) as f:
data = json.load(f)
wast_source = filename(data["source_filename"])
wasm_module = ""
print(f"Running {fn}")
wasm3.init()
for cmd in data["commands"]:
test = dotdict()
test.line = int(cmd["line"])
test.source = wast_source + ":" + str(test.line)
test.wasm = wasm_module
test.type = cmd["type"]
if test.type == "module":
wasm_module = cmd["filename"]
if args.verbose:
print(f"Loading {wasm_module}")
try:
wasm_fn = os.path.join(pathname(fn), wasm_module)
wasm3.load(wasm_fn)
except Exception as e:
pass #fatal(str(e))
elif ( test.type == "action" or
test.type == "assert_return" or
test.type == "assert_trap" or
test.type == "assert_exhaustion" or
test.type == "assert_return_canonical_nan" or
test.type == "assert_return_arithmetic_nan"):
if args.line and test.line != args.line:
continue
if test.type == "action":
test.expected_anything = True
elif test.type == "assert_return":
test.expected = cmd["expected"]
elif test.type == "assert_return_canonical_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "<Canonical NaN>"
elif test.type == "assert_return_arithmetic_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "<Arithmetic NaN>"
elif test.type == "assert_trap":
test.expected_trap = cmd["text"]
elif test.type == "assert_exhaustion":
test.expected_trap = "stack overflow"
else:
stats.skipped += 1
warning(f"Skipped {test.source} ({test.type} not implemented)")
continue
test.action = dotdict(cmd["action"])
if test.action.type == "invoke":
# TODO: invoking in modules not implemented
if test.action.module:
stats.skipped += 1
warning(f"Skipped {test.source} (invoke in module)")
continue
test.action.field = escape_str(test.action.field)
runInvoke(test)
else:
stats.skipped += 1
warning(f"Skipped {test.source} (unknown action type '{test.action.type}')")
# These are irrelevant
elif (test.type == "assert_invalid" or
test.type == "assert_malformed" or
test.type == "assert_uninstantiable"):
pass
# Others - report as skipped
else:
stats.skipped += 1
warning(f"Skipped {test.source} ('{test.type}' not implemented)")
if (stats.failed + stats.success) != stats.total_run:
warning("Statistics summary invalid")
pprint(stats)
if stats.failed > 0:
failed = (stats.failed*100)/stats.total_run
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {failed:.2f}%")
if stats.crashed > 0:
print(f" Crashed: {stats.crashed}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
elif stats.success > 0:
print(f"{ansi.OKGREEN}=======================")
print(f" {stats.success}/{stats.total_run} tests OK")
if stats.skipped > 0:
print(f"{ansi.WARNING} ({stats.skipped} tests skipped){ansi.OKGREEN}")
print(f"======================={ansi.ENDC}")
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete tests."""
import flask
import unittest
import webtest
from datastore import data_types
from handlers.testcase_detail import delete
from libs import form
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class HandlerTest(unittest.TestCase):
"""Test HandlerTest."""
def setUp(self):
test_helpers.patch(self, [
'libs.auth.get_current_user',
'libs.auth.is_current_user_admin',
])
self.mock.is_current_user_admin.return_value = True
self.mock.get_current_user().email = 'test@user.com'
flaskapp = flask.Flask('testflask')
flaskapp.add_url_rule('/', view_func=delete.Handler.as_view('/'))
self.app = webtest.TestApp(flaskapp)
def test_assigned_issue(self):
"""The testcase is assigned an issue."""
testcase = data_types.Testcase()
testcase.bug_information = '1234'
testcase.put()
resp = self.app.post_json(
'/', {
'testcaseId': testcase.key.id(),
'csrf_token': form.generate_csrf_token()
},
expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertIsNotNone(testcase.key.get())
def test_succeed(self):
"""Delete."""
testcase = data_types.Testcase()
testcase.bug_information = None
testcase.put()
resp = self.app.post_json('/', {
'testcaseId': testcase.key.id(),
'csrf_token': form.generate_csrf_token()
})
self.assertEqual(200, resp.status_int)
self.assertIsNone(testcase.key.get())
|
from lektor.constants import PRIMARY_ALT
from lektor.i18n import get_i18n_block
from lektor.types.base import Type
class FakeType(Type):
def value_from_raw(self, raw):
return None
def to_json(self, pad, record=None, alt=PRIMARY_ALT):
rv = Type.to_json(self, pad, record, alt)
rv["is_fake_type"] = True
return rv
class LineType(FakeType):
widget = "f-line"
class SpacingType(FakeType):
widget = "f-spacing"
class InfoType(FakeType):
widget = "f-info"
class HeadingType(FakeType):
widget = "f-heading"
def to_json(self, pad, record=None, alt=PRIMARY_ALT):
rv = FakeType.to_json(self, pad, record, alt)
rv["heading_i18n"] = get_i18n_block(self.options, "heading")
return rv
|
"""Module implémentant des classes en relations avec le menu."""
from typing import Callable, Dict, List, Tuple, AnyStr
class Trafficlight:
"""Modélise un feu de circulation présentant un état lumineux donné.
wrarn : la couleur affectée par défaut n'est pas validée par l'init
autrement dit on peut initialiser avec une couleur inexistante dans le choix final """
def __init__(self, name: str = "None", current: str = "rouge") -> None:
"""Initialise une instance de feu.
Args:
name: identifiant du feu courant
current: etat par defaut
"""
self._name: str = name
self._current: str = current
self._triggers: Dict = {}
def add(self, trigger: AnyStr, source: AnyStr, dest: AnyStr) -> "Trafficlight":
"""Ajoute une transition entre 2 etats.
Args:
trigger: action déclenchant la transition,
source: etat de départ,
dest: etat d'arivee
"""
trigger = trigger.lower().strip()
source = source.lower().strip()
self._triggers[trigger] = self._triggers.get(trigger, {})
self._triggers[trigger][source] = dest
return self
def next(self, **args):
"""Recupere le prochain etat et appelle le renderer."""
# vérification du choix de la couleur
if "color" in args and self._triggers["next"][self._current] != args["color"]:
print(
"La couleur {} ne peut suivre la couleur {} ".format(
args["color"], self._current
)
)
return getattr(self, "render"), args
self._current = self._triggers["next"][self._current]
return getattr(self, "render"), args
def quitter(self, **args):
"""Quitte le parcours."""
return None, None
def __str__(self) -> str:
"""Formate le contenu de l'objet en vue de sa présentation à l'utilisateur."""
lines: List[str] = [f"{self._name.title()}\t({self._current})\n"]
for key, value in enumerate(self._triggers.keys()):
lines.append(f"{key} - {value}")
lines.append("Ou entrez la valeur en toutes lettres.")
lines.append("")
lines.append(">>> ")
return "\n".join(lines)
def render(self, **args) -> Tuple[Callable, Dict]:
"""Affiche le choix des triggers à l'utilisateur et attend la réponse de ce dernier.
"""
entries: Dict = {
str(key): value for (key, value) in enumerate(self._triggers.keys())
}
while True:
choice = input(self).lower().strip()
if choice in entries:
return getattr(self, entries[choice]), {}
elif choice in self._triggers["next"]:
return getattr(self, "next"), {"color": choice}
else:
print("Erreur de saisie.")
def get_current(self) -> str:
"""renvoie l'etat courant du feu.
"""
return self._current
def set_current(self, value: AnyStr):
""" affecte l'etat courant du feu.
"""
self.next(color=value)
|
''' clip.py:
Implement's the clip ONNX node as a flexnode (for use with any accelerator)
'''
import uuid
import numpy as np
from operators.flexnode import FlexNode
from core.defines import Operator
from core.messaging import Message
class Clip(FlexNode):
def __init__(self, onnx_node, inputs, outputs):
FlexNode.__init__(self, onnx_node, inputs, outputs)
self._min = -3.402823466e+38
self._max = 3.402823466e+38
if len(inputs) != 1 and len(inputs) != 3:
raise ValueError("Clip can only have 1 or 3 inputs.")
self._input = inputs[0]
if len(inputs) == 3:
self._min = inputs[1]
self._max = inputs[2]
def map(self, memory_mapper):
pass
def unmap(self, memory_mapper):
pass
def _inputs2mem(self, memory_xfer_engine):
pass
def _mem2output(self, memory_xfer_engine):
pass
def compile(self, source, destinations):
tile_commands = list()
# Here, we are NOT generating tile_commands, (although, this is not difficult.)
np.copyto(self._outputs[0], np.clip(self._input, self._min, self._max))
return tile_commands
|
"""
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import warnings
import re
from django.db import utils
from django.db.backends import *
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.models import fields
from django.db.models.sql import aggregates
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes
from django.utils import six
from django.utils import timezone
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
try:
import pytz
except ImportError:
pytz = None
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("SQLite received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("decimal"), decoder(util.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2, 4, 1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it (Python 2.6 and up).
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
autocommits_when_autocommit_is_off = True
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_aggregate_support(self, aggregate):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg,
aggregates.Variance, aggregates.StdDev)
if (isinstance(aggregate.source, bad_fields) and
isinstance(aggregate, bad_aggregates)):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev and Variance aggregations '
'on date/time fields in sqlite3 '
'since date/time is saved as text.')
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def value_to_db_datetime(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return util.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return parse_date(value)
elif internal_type == 'DateTimeField':
return parse_datetime_with_timezone_support(value)
elif internal_type == 'TimeField':
return parse_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overriden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside atomic
# blocks. To work around that bug, on SQLite, atomic starts a
# transaction explicitly rather than simply disable autocommit.
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_extract(lookup_type, dt, tzname):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = util.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(dt)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, re_string))
|
titulo = 'Cadastro de Pessoas'
print(titulo.center(50, '='))
print('')
idade = total = homens = mulheres = 0
sexo = ''
while True:
idade = int(input('Idade: '))
sexo = input('Sexo: [M] ou [F]? ').upper().strip()[0]
while sexo not in 'MF':
sexo = input('Sexo: [M] ou [F]? ').upper().strip()[0]
resposta = input('Continuar? [S] ou [N] ').upper().strip()[0]
while resposta not in 'SN':
resposta = input('Continuar? [S] ou [N] ').upper().strip()[0]
print('-' * 50)
if idade >= 18:
total += 1
if sexo == 'M':
homens += 1
if sexo == 'F' and idade < 20:
mulheres += 1
if resposta == 'N':
break
print(f'Total de maiores de 18 anos: {total}')
print(f'Total de homens: {homens}')
print(f'Total de mulheres menores de 20 anos: {mulheres}')
|
import os
# Finds path of any file in the assets folder #
def findPath(folders, file, extension):
# Checks if it's an array (or other type of list idk, basically this should do the job) #
if(isinstance(folders, list)):
# Default folder path, being nothing #
folderPath = ""
# Loops through all the folders #
for x in folders:
# Joins them together #
folderPath = os.path.join(folderPath, x)
return os.path.join("assets", folderPath, file + "." + extension)
else:
# Error handling, so the game doesn't just tell you that file doesn't exist #
raise ValueError('The folder path you inputted, is not an array! Your folder path: "' + str(folders) + '"')
|
#!/usr/bin/env python
# Copyright (c) 2017 The sqlalchemy-bigquery Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import io
import itertools
import os
import re
from setuptools import setup
# Package metadata.
name = "sqlalchemy-bigquery"
description = "SQLAlchemy dialect for BigQuery"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
package_root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(package_root, "sqlalchemy_bigquery", "version.py")) as f:
version = re.search('__version__ = "([^"]+)"', f.read()).group(1)
def readme():
with io.open("README.rst", "r", encoding="utf8") as f:
return f.read()
extras = dict(
geography=["GeoAlchemy2", "shapely"],
alembic=["alembic"],
tests=["packaging", "pytz"],
)
extras["all"] = set(itertools.chain.from_iterable(extras.values()))
setup(
name=name,
version=version,
description=description,
long_description=readme(),
long_description_content_type="text/x-rst",
author="The Sqlalchemy-Bigquery Authors",
author_email="googleapis-packages@google.com",
packages=["sqlalchemy_bigquery"],
url="https://github.com/googleapis/python-bigquery-sqlalchemy",
keywords=["bigquery", "sqlalchemy"],
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
"Topic :: Database :: Front-Ends",
],
platforms="Posix; MacOS X; Windows",
install_requires=[
"google-api-core>=1.30.0", # Work-around bug in cloud core deps.
# NOTE: Maintainers, please do not require google-auth>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
"google-auth>=1.25.0,<3.0.0dev", # Work around pip wack.
"google-cloud-bigquery>=2.24.1",
"sqlalchemy>=1.2.0,<1.5.0dev",
"future",
],
extras_require=extras,
python_requires=">=3.6, <3.10",
tests_require=["packaging", "pytz"],
entry_points={
"sqlalchemy.dialects": ["bigquery = sqlalchemy_bigquery:BigQueryDialect"]
},
# Document that this replaces pybigquery, however, this isn't
# enforced by pip, because doing so would allow rogue packages to
# obsolete legitimate ones.
obsoletes=["pybigquery"],
)
|
#!/usr/bin/env python
"""APOGEE cold shutter control and status
History:
2011-08-30 ROwen
2011-09-01 ROwen Added support for cancelling commands.
2012-11-14 ROwen Stop using Checkbutton indicatoron=False; it is no longer supported on MacOS X.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import Tkinter
import RO.Constants
import RO.Wdg
import RO.TkUtil
import RO.StringUtil
import TUI.Models
import TUI.Base.Wdg
from . import BaseDeviceWdg
class ShutterWdgSet(object):
"""Widgets to control APOGEE's cold shutter and the associated LEDs
"""
_ShutterCat = "shutter"
_NumLEDs = 4
def __init__(self, gridder, statusBar, colSpan=3, helpURL=None):
"""Create a ShutterWdgSet for the APOGEE cold shutter and calibration LEDs
Inputs:
- gridder: an instance of RO.Wdg.Gridder;
the widgets are gridded starting at the next row and default column
- statusBar: status bar (to send commands)
- colSpan: the number of columns to span
- helpURL: path to an HTML help file or None
Note: you may wish to call master.columnconfigure(n, weight=1)
where n is the last column of this widget set
so that the environment widget panel can fill available space
without resizing the columns of other widgets.
"""
self.statusBar = statusBar
self.helpURL = helpURL
self.gridder = gridder
master = self.gridder._master
self.model = TUI.Models.getModel("apogee")
self.showHideWdg = RO.Wdg.Checkbutton(
master = master,
text = "Shutter",
callFunc = self._doShowHide,
helpText = "Show cold shutter controls?",
helpURL = helpURL,
)
self.summaryWdg = RO.Wdg.StrLabel(
master = master,
anchor = "w",
helpText = "Shutter status",
helpURL = helpURL,
)
gridder.gridWdg(self.showHideWdg, self.summaryWdg, sticky="w", colSpan=colSpan-1)
# hidable frame showing the controls
self.detailWdg = Tkinter.Frame(
master = master,
borderwidth = 1,
relief = "solid",
)
self.gridder.gridWdg(False, self.detailWdg, colSpan=colSpan, sticky="w", cat=self._ShutterCat)
detailGridder = RO.Wdg.Gridder(self.detailWdg, sticky="w")
self.shutterWdg = _ShutterWdg(
master = self.detailWdg,
statusBar = self.statusBar,
helpURL = helpURL,
)
detailGridder.gridWdg("Shutter", self.shutterWdg)
self.ledWdg = _LEDWdg(
master = self.detailWdg,
statusBar = self.statusBar,
numLEDs = self._NumLEDs,
helpURL = helpURL,
)
detailGridder.gridWdg("LEDs", self.ledWdg)
self.model = TUI.Models.getModel("apogee")
self.model.shutterIndexer.addCallback(self._updSummary)
self.model.shutterLimitSwitch.addCallback(self._updSummary)
self.model.shutterLED.addCallback(self._updSummary)
self.showHideWdg.addCallback(self._doShowHide, callNow = True)
def _doShowHide(self, wdg=None):
argDict = {
self._ShutterCat: self.showHideWdg.getBool(),
}
self.gridder.showHideWdg(**argDict)
def _updSummary(self, *dumArgs):
"""Update collimator summary label
"""
severity = RO.Constants.sevNormal
sumStr = "OK"
isCurrent = self.model.shutterIndexer.isCurrent
if self.model.shutterIndexer[0] == False:
sumStr = "Off"
severity = RO.Constants.sevError
else:
shutterStr, shutterSeverity = self.shutterWdg.getSummary()
ledStr, ledSeverity = self.ledWdg.getSummary()
sumStr = "%s; %s" % (shutterStr, ledStr)
severity = max(shutterSeverity, ledSeverity)
self.summaryWdg.set(sumStr, isCurrent=isCurrent, severity=severity)
class _ShutterWdg(BaseDeviceWdg.BaseDeviceWdg):
"""A widget to open or close the cold shutter
"""
actor = "apogee"
def __init__(self, master, statusBar, helpURL=None):
BaseDeviceWdg.BaseDeviceWdg.__init__(self,
master = master,
actor = "apogee",
statusBar = statusBar,
helpURL = helpURL,
)
self.shutterWdg = RO.Wdg.Checkbutton(
master = self,
onvalue = "Open",
offvalue = "Closed",
autoIsCurrent = True,
showValue = True,
callFunc = self.doShutter,
helpText = "Open or close cold shutter",
helpURL = helpURL,
)
self.shutterWdg.pack(side="left")
self.cancelBtn.pack(side="left")
self.model = TUI.Models.getModel(self.actor)
self.model.shutterLimitSwitch.addCallback(self.updateStatus)
def doShutter(self, wdg=None):
"""Send a command to open or close the shutter
"""
doOpen = self.shutterWdg.getBool()
if doOpen:
cmdStr = "shutter open"
else:
cmdStr = "shutter close"
self.doCmd(cmdStr)
def enableButtons(self, dumCmd=None):
"""Enable or disable widgets, as appropriate
"""
isRunning = self.isRunning
self.shutterWdg.setEnable(not isRunning)
self.cancelBtn.setEnable(isRunning)
def getSummary(self):
"""Return a string and severity summarizing the current state
"""
shutterLimits = tuple(self.model.shutterLimitSwitch[0:2])
return {
(False, False): ("?", RO.Constants.sevWarning),
(False, True): ("Closed", RO.Constants.sevNormal),
(True, False): ("Open", RO.Constants.sevNormal),
(True, True): ("Bad", RO.Constants.sevError),
}.get(shutterLimits, ("?", RO.Constants.sevError))
def updateStatus(self, keyVar=None):
"""shutterLimitSwitch keyword callback
"""
keyVar = self.model.shutterLimitSwitch
isCurrent = keyVar.isCurrent
isOpen, isClosed = keyVar[0:2]
with self.updateLock():
if None in (isOpen, isClosed):
self.shutterWdg.setIsCurrent(isCurrent)
return
if isOpen and not isClosed:
self.shutterWdg.setDefault(True)
self.shutterWdg.set(True, isCurrent=isCurrent)
elif isClosed and not isOpen:
self.shutterWdg.setDefault(False)
self.shutterWdg.set(False, isCurrent=isCurrent)
else:
self.shutterWdg.setIsCurrent(False)
class _LEDWdg(BaseDeviceWdg.BaseDeviceWdg):
def __init__(self, master, statusBar, numLEDs, helpURL=None):
BaseDeviceWdg.BaseDeviceWdg.__init__(self,
master = master,
actor = "apogee",
statusBar = statusBar,
helpURL = helpURL,
)
self.numLEDs = int(numLEDs)
self.allOnMask = (1 << self.numLEDs) - 1
self.ledWdgSet = []
for ledInd in range(self.numLEDs):
ledName = str(ledInd + 1)
ledWdg = RO.Wdg.Checkbutton(
master = self,
text = "",
callFunc = self.toggleOne,
autoIsCurrent = True,
helpText = "Turn LED %s on or off" % (ledName,),
helpURL = helpURL,
)
ledWdg.pack(side="left")
self.ledWdgSet.append(ledWdg)
self.allOffBtn = RO.Wdg.Button(
master = self,
text = "All Off",
callFunc = self.turnAllOff,
helpText = "Turn all LEDs off",
helpURL = helpURL,
)
self.allOffBtn.pack(side="left")
self.allOnBtn = RO.Wdg.Button(
master = self,
text = "All On",
callFunc = self.turnAllOn,
helpText = "Turn all LEDs on",
helpURL = helpURL,
)
self.allOnBtn.pack(side="left")
self.cancelBtn.pack(side="left")
self.model = TUI.Models.getModel(self.actor)
self.model.shutterLED.addCallback(self.updateStatus)
def setLEDs(self, ledMask, setWdg):
"""Send a command to turn on and off the specified LEDs
Inputs:
- ledMask: a bit mask of which LEDs should be on (0=all off)
- setWdg: it True then set the widgets;
set True for buttons that control multiple widgets;
set False if the user has toggled the button manually
"""
cmdStr = "shutter ledControl=%d" % (ledMask,)
self.doCmd(cmdStr)
if setWdg and not self.updatingStatus:
self._setLEDWdg(ledMask)
def enableButtons(self, dumCmd=None):
"""Enable or disable widgets, as appropriate
"""
isRunning = self.isRunning
self.cancelBtn.setEnable(isRunning)
for wdg in self.ledWdgSet:
wdg.setEnable(not isRunning)
self.allOffBtn.setEnable(not isRunning)
self.allOnBtn.setEnable(not isRunning)
def turnAllOff(self, wdg=None):
"""Turn all LEDs off
"""
self.setLEDs(0, setWdg=True)
def turnAllOn(self, wdg=None):
"""Turn all LEDs on
"""
self.setLEDs(self.allOnMask, setWdg=True)
def toggleOne(self, wdg=None):
"""Toggle one LED on or off
"""
ledMask = 0
for ind, wdg in enumerate(self.ledWdgSet):
if wdg.getBool():
ledMask += 1 << ind
self.setLEDs(ledMask, setWdg=False)
def getSummary(self):
"""Return a summary string and associated severity
"""
severity = RO.Constants.sevWarning
ledMask = self.model.shutterLED[0]
if ledMask is None:
sumStr = "LEDs ?"
elif ledMask == 0:
sumStr = "LEDs all off"
severity = RO.Constants.sevNormal
elif ledMask == self.allOnMask:
sumStr = "LEDs ALL ON"
else:
onList = [str(ind+1) for ind in range(self.numLEDs) if ledMask & (1 << ind) != 0]
if len(onList) == 1:
pfx = "LED"
else:
pfx = "LEDs"
sumStr = "%s %s ON" % (pfx, " ".join(onList))
return sumStr, severity
def updateStatus(self, keyVar=None):
"""shutterLED keyword callback
"""
keyVar = self.model.shutterLED
isCurrent = keyVar.isCurrent
ledMask = keyVar[0]
if ledMask is None:
for wdg in self.ledWdgSet:
wdg.setIsCurrent(isCurrent)
return
with self.updateLock():
for ind, wdg in enumerate(self.ledWdgSet):
isOn = ledMask & (1 << ind) != 0
wdg.setDefault(isOn)
wdg.setBool(isOn, isCurrent=isCurrent)
def _setLEDWdg(self, ledMask):
"""Set LED widgets to a particular state
"""
with self.updateLock():
for ind, wdg in enumerate(self.ledWdgSet):
isOn = ledMask & (1 << ind) != 0
wdg.setBool(isOn)
if __name__ == "__main__":
from . import TestData
tuiModel = TestData.tuiModel
root = tuiModel.tkRoot
statusBar = TUI.Base.Wdg.StatusBar(root)
testFrame = Tkinter.Frame(root)
gridder = RO.Wdg.Gridder(testFrame)
shutterWdgSet = ShutterWdgSet(gridder, statusBar)
testFrame.pack(side="top", expand=True)
testFrame.columnconfigure(2, weight=1)
statusBar.pack(side="top", expand=True, fill="x")
TestData.start()
tuiModel.reactor.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# nnutil2 - Tensorflow utilities for training neural networks
# Copyright (c) 2019, Abdó Roig-Maranges <abdo.roig@gmail.com>
#
# This file is part of 'nnutil2'.
#
# This file may be modified and distributed under the terms of the 3-clause BSD
# license. See the LICENSE file for details.
from typing import List
import tensorflow as tf
from ..util import kwargs_for
from .layer import Layer
class Segment(Layer):
"""A sequential collection of layers"""
def __init__(self, layers: List[Layer] = [], activation=None, **kwargs):
super(Segment, self).__init__(**kwargs)
self._segment_layers = layers
self._segment_activation = tf.keras.activations.get(activation)
self._segment_states = []
def get_config(self):
config = {
'layers': [ly.get_config() for ly in self._layers],
'activation': self._segment_activation
}
base_config = super(Segment, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, **kwargs):
x = inputs
self._segment_states.append(x)
for l in self._segment_layers:
layer_kwargs = kwargs_for(kwargs, l.call)
x = l(x, **layer_kwargs)
self._segment_states.append(x)
if self._segment_activation is not None:
x = self._segment_activation(x)
self._segment_states.append(x)
return x
def compute_output_shape(self, input_shape):
shape = input_shape
for l in self._segment_layers:
shape = l.compute_output_shape(shape)
return shape
@property
def flat_layers(self):
layers = []
def add_layers(ly):
if isinstance(ly, Segment):
for ly2 in ly.layers:
add_layers(ly2)
else:
layers.append(ly)
add_layers(self)
return layers
@property
def layers(self):
return self._segment_layers
@property
def states(self):
return self._segment_states
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup script for twodolib."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
test_requirements = ['tox', ]
setup(
name='twodolib',
version='0.5.4',
description="Functions to manage the 2DoApp from the command line.",
long_description=readme + '\n\n' + history,
author="Karsten Schulz",
author_email='github@karstenschulz.biz',
url='https://github.com/KarstenSchulz/twodolib',
packages=[
'twodolib',
],
package_dir={'twodolib': 'twodolib'},
include_package_data=True,
entry_points={
'console_scripts': [
'task2do = twodolib.cli:main'
]
},
# install_requires=requirements,
license="ISCL",
zip_safe=False,
keywords='twodolib tool task2do task-management',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: MacOS X',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
test_suite='tests',
tests_require=test_requirements
)
|
from PySide2.QtWidgets import QPushButton, QMainWindow, QLabel, QLineEdit, QGroupBox
from math import ceil
import source
class MainWindow(QMainWindow):
def __init__(self, screen_width, screen_height):
self.screen_width = screen_width
self.screen_height = screen_height
self.screen_ratio = screen_width / 3840
self.half_screen_ratio = 0.45 + self.screen_ratio / 2
self.production_speed_ratio = 1
self.window = QMainWindow()
self.window.resize(self.screen_width, self.screen_height)
self.window.setWindowTitle('戴森球计划产量计算器 ver.0.1')
self.grid_width = 75 * self.screen_ratio
self.grid_height = 50 * self.screen_ratio
self.init_bias = 50 * self.screen_ratio
self.interval = 0 * self.screen_ratio
self.box_width = self.grid_width * 4 + self.interval + 5 * self.screen_ratio
self.box_height = self.grid_height * 2 + self.init_bias + 5 * self.screen_ratio
# Subtitle: app name - author
self.subtitle_font_size = 50 * self.screen_ratio
if self.screen_ratio > 0.7:
self.subtitle_font_size = 50 * self.screen_ratio / 1.5
subtitle = QLabel(self.window)
subtitle.setText('戴森球计划 材料生产计算器 -- by 魂月')
subtitle.setStyleSheet('QLabel {font: 75 ' + str(int(self.subtitle_font_size)) + 'pt "宋体";}')
subtitle.move(1000 * self.screen_ratio, int(25 * self.screen_ratio))
subtitle.resize(1840 * self.screen_ratio, self.box_height * self.screen_ratio)
# Bottom: 取整机器数量
self.button = QPushButton('取整机器数量', self.window)
self.button.move(2840 * self.screen_ratio, int(25 * self.screen_ratio) + int(self.box_height / 3))
self.button.resize(400 * self.screen_ratio, int(self.box_height / 3))
self.button.setStyleSheet('QPushButton {font: ' + str(int(self.subtitle_font_size / 2)) + 'pt "宋体";}')
self.button.clicked.connect(self.ceil_machine_number)
self.ox = (self.screen_width - 12 * self.box_width) / 2
self.oy = self.box_height + 50 * self.screen_ratio
self.font_size = 14 * self.half_screen_ratio
self.line_edit_font_size = self.font_size * 0.9 * 0.75
self.element = source.element
self.production = source.production
self.supporter = source.support
self.bi_material = source.bi_material
self.sorted_element = source.sorted_element
self.element_box = [[[None, None, None, None] for _ in range(len(self.element[0]))] for _ in range(len(self.element))]
self.element_amount = [[[0, 0, 0, 0] for _ in range(len(self.element[0]))] for _ in range(len(self.element))]
self.table_gen()
for resource in self.sorted_element:
i, j = self.get_idx(resource)
for k in range(4):
self.element_box[i][j][k].editingFinished.connect(self.update_element_amount)
def table_gen(self):
nrows = len(self.element)
ncols = len(self.element[0])
for i in range(nrows):
for j in range(ncols):
foo = self.box_gen(self.ox + j * self.box_width, self.oy + i * self.box_height, self.element[i][j])
if len(foo) == 4:
for k in range(4):
self.element_box[i][j][k] = foo[k]
def box_gen(self, x, y, resource=''):
group_box = QGroupBox(self.window)
group_box.move(x, y)
group_box.resize(self.box_width, self.box_height)
if resource == '':
return []
group_box.setTitle('')
group_box.setStyleSheet('QGroupBox { background-color: \
rgb(255, 255, 255); border: 3px solid rgb(122, 255, 100); } \
QGroupBox::title{font: 75 ' + str(100 * self.screen_ratio) + 'pt "宋体"; color: rgb(255, 0, 0)}')
label_again = QLabel(group_box)
label_again.setStyleSheet('QLabel {font: 75 ' + str(self.font_size) + 'pt "宋体"; color: rgb(255, 0, 0)}')
label_again.setText(resource)
label_again.move(int(self.grid_width * 0.7), 5 * self.screen_ratio)
label_again.resize(int(self.grid_width * 3.3), self.init_bias - 5)
product_label00 = QLabel(group_box)
product_label00.setText('产量')
product_label00.move(3, self.init_bias)
product_label00.resize(self.grid_width, self.grid_height)
product_label00.setStyleSheet('QLabel {font: 75 ' + str(self.font_size) + 'pt "宋体"}')
product00 = QLineEdit(group_box)
product00.setText('0')
product00.move(self.grid_width, self.init_bias)
product00.resize(self.grid_width, self.grid_height)
product00.setEnabled(False)
product00.setStyleSheet('QLineEdit {font: ' + str(self.line_edit_font_size) + 'pt "宋体"}')
product_label10 = QLabel(group_box)
product_label10.setText('额外')
product_label10.move(3, self.grid_height + self.init_bias)
product_label10.resize(self.grid_width, self.grid_height)
product_label10.setStyleSheet('QLabel {font: 75 ' + str(self.font_size) + 'pt "宋体"}')
product10 = QLineEdit(group_box)
product10.setText('0')
product10.move(self.grid_width, self.grid_height + self.init_bias)
product10.resize(self.grid_width, self.grid_height)
product10.setStyleSheet('QLineEdit {font: ' + str(self.line_edit_font_size) + 'pt "宋体"}')
product_label01 = QLabel(group_box)
product_label01.setText('机器')
product_label01.move(self.grid_width * 2 + self.interval, self.init_bias)
product_label01.resize(self.grid_width, self.grid_height)
product_label01.setStyleSheet('QLabel {font: 75 ' + str(self.font_size) + 'pt "宋体"}')
product01 = QLineEdit(group_box)
product01.setText('0.0')
product01.move(self.grid_width * 3 + self.interval, self.init_bias)
product01.resize(self.grid_width, self.grid_height)
product01.setStyleSheet('QLineEdit {font: ' + str(self.line_edit_font_size) + 'pt "宋体"}')
product01.setEnabled(False)
product_label11 = QLabel(group_box)
product_label11.setText('已有')
product_label11.move(self.grid_width * 2 + self.interval, self.grid_height + self.init_bias)
product_label11.resize(self.grid_width, self.grid_height)
product_label11.setStyleSheet('QLabel {font: 75 ' + str(self.font_size) + 'pt "宋体"}')
product11 = QLineEdit(group_box)
product11.setText('0')
product11.move(self.grid_width * 3 + self.interval, self.grid_height + self.init_bias)
product11.resize(self.grid_width, self.grid_height)
product11.setStyleSheet('QLineEdit {font: ' + str(self.line_edit_font_size) + 'pt "宋体"}')
if resource in self.supporter:
product11.setEnabled(True)
else:
product11.setEnabled(False)
return [product00, product01, product10, product11]
# update the window by the values of the self.element_amount.
def update_view(self, is_int=[True, False, True, True]):
for resource in self.sorted_element:
i, j = self.get_idx(resource)
for k in range(4):
amount = round(self.element_amount[i][j][k], 1)
if is_int[k]:
amount = int(self.element_amount[i][j][k])
self.element_box[i][j][k].setText(str(amount))
def get_idx(self, resource):
idx = None
if resource != '':
for i in range(len(self.element)):
for j in range(len(self.element[0])):
if resource == self.element[i][j]:
idx = [i, j]
return idx
def produce_resource(self, resource, increase_production_number):
# Add resource amount in self.element_amount.
idx = self.get_idx(resource)
if not idx:
exit(1)
else:
i, j = idx
self.element_amount[i][j][0] += increase_production_number
production_speed = self.production[resource][0][0]
self.element_amount[i][j][1] += increase_production_number / production_speed
# Start to product required amount of the resource.
component = self.production[resource][1:]
if not component:
return
for obj_resource in component:
production_name = obj_resource[0]
production_number = increase_production_number * obj_resource[1]
self.produce_resource(production_name, production_number)
def calculate_supporter(self):
for supporter, properties in self.supporter.items():
i, j = self.get_idx(supporter)
amount = self.element_amount[i][j][3]
for production in properties:
i, j = self.get_idx(production[0])
production_amount = self.element_amount[i][j][0]
convert_amount_to_production_amount = amount * production[1]
need_negative_production = convert_amount_to_production_amount - production_amount
if need_negative_production > 0:
self.produce_resource(production[0], -1 * production_amount)
else:
self.produce_resource(production[0], -1 * convert_amount_to_production_amount)
def calculate_bi_raw_material(self):
# Calculate the need of the bi_raw_materials.
for material, properties in self.bi_material.items():
# production1
production1 = properties[0][0]
i, j = self.get_idx(production1)
production1_amount = properties[0][1]
need_production1_amount = self.element_amount[i][j][0]
need_material_amount1 = need_production1_amount / production1_amount
# production2
production2 = properties[1][0]
i, j = self.get_idx(production2)
production2_amount = properties[1][1]
need_production2_amount = self.element_amount[i][j][0]
need_material_amount2 = need_production2_amount / production2_amount
# Calculate the need of the material
need_material_amount = max(need_material_amount1, need_material_amount2)
i, j = self.get_idx(material)
self.element_amount[i][j][0] = need_material_amount
material_production_speed = self.production[material][0][0]
self.element_amount[i][j][1] = need_material_amount / material_production_speed
def update_element_amount(self, has_supporter=True):
# Read all LineEdit boxes.
for resource in self.sorted_element:
i, j = self.get_idx(resource)
for k in range(4):
input_value = self.element_box[i][j][k].text()
if k == 0 or k == 1 or input_value == '':
self.element_amount[i][j][k] = 0.0
else:
self.element_amount[i][j][k] = float(input_value)
# Produce the required amount of all resources.
for resource in self.sorted_element:
i, j = self.get_idx(resource)
production_amount = self.element_amount[i][j][2] - self.element_amount[i][j][3]
if production_amount < 0:
self.produce_resource(resource, 0)
else:
self.produce_resource(resource, production_amount)
# Calculate the second product of the special supporter.
if has_supporter:
self.calculate_supporter()
# Calculate the need of the bi_raw_material.
self.calculate_bi_raw_material()
# Update the view of the app.
self.update_view()
def ceil_machine_number(self):
# Re-update element amount without considering supporter.
self.update_element_amount(False)
# Calculate supporter.
supporter_stack = dict()
for support, products in self.supporter.items():
i, j = self.get_idx(support)
support_amount = self.element_amount[i][j][3]
for product in products:
product_name = product[0]
product_amount = product[1]
supporter_stack[product_name] = support_amount * product_amount
# Ceil machine amount and produce the required amount of the resources.
for resource in self.sorted_element:
if resource not in self.supporter:
i, j = self.get_idx(resource)
production_speed = self.production[resource][0][0]
if resource in supporter_stack:
cur_resource_amount = self.element_amount[i][j][0]
real_resource_amount = cur_resource_amount - supporter_stack[resource]
if real_resource_amount > 0:
cur_machine_amount = real_resource_amount / production_speed
new_machine_amount = ceil(cur_machine_amount)
else:
new_machine_amount = 0
else:
cur_machine_amount = self.element_amount[i][j][1]
new_machine_amount = ceil(cur_machine_amount)
cur_resource_amount = self.element_amount[i][j][0]
incre_resource_amount = new_machine_amount * production_speed - cur_resource_amount
self.produce_resource(resource, incre_resource_amount)
self.element_amount[i][j][1] = new_machine_amount
# Calculate the need of the bi_raw_material.
self.calculate_bi_raw_material()
# Update the view of the app.
# Production amount is allowed to be float since its unit is piece/min.
self.update_view([False, True, True, True])
def show(self):
self.window.show()
|
import graphene
from graphene import relay
from ....product import models
from ...core.connection import CountableDjangoObjectType
from ...core.scalars import UUID
from ...meta.types import ObjectWithMetadata
class DigitalContentUrl(CountableDjangoObjectType):
url = graphene.String(description="URL for digital content.")
token = graphene.Field(
UUID, description=("UUID of digital content."), required=True
)
class Meta:
model = models.DigitalContentUrl
only_fields = ["content", "created", "download_num"]
interfaces = (relay.Node,)
@staticmethod
def resolve_url(root: models.DigitalContentUrl, *_args):
return root.get_absolute_url()
class DigitalContent(CountableDjangoObjectType):
urls = graphene.List(
lambda: DigitalContentUrl,
description="List of URLs for the digital variant.",
)
class Meta:
model = models.DigitalContent
only_fields = [
"automatic_fulfillment",
"content_file",
"max_downloads",
"product_variant",
"url_valid_days",
"urls",
"use_default_settings",
]
interfaces = (relay.Node, ObjectWithMetadata)
@staticmethod
def resolve_urls(root: models.DigitalContent, **_kwargs):
return root.urls.all()
|
#!/usr/bin/env python
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
"""
Downloads and extracts an archive with pre-built third-party dependencies.
"""
# This script should not use any non-standard modules and should run with Python 2 and Python 3.
# It could be run before the main Python interpreter we'll be using for most of our scripts is
# even installed.
import os
import sys
import re
import logging
import socket
import random
import atexit
import subprocess
import argparse
import tempfile
import time
import getpass
import platform
import fcntl
import errno
g_verbose = False
EXPECTED_ARCHIVE_EXTENSION = '.tar.gz'
CHECKSUM_EXTENSION = '.sha256'
def remove_ignore_errors(file_path):
file_path = os.path.abspath(file_path)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
logging.warning("Error removing %s: %s, ignoring", file_path, e)
def run_cmd(args):
if g_verbose:
logging.info("Running command: %s", args)
try:
subprocess.check_call(args)
except: # noqa
logging.error("Error trying to run command: %s", args)
raise
def validate_sha256sum(checksum_str):
if not re.match(r'^[0-9a-f]{64}$', checksum_str):
raise ValueError("Invalid SHA256 checksum: '%s', expected 64 hex characters", checksum_str)
def read_file_and_strip(file_path):
with open(file_path) as f:
return f.read().strip()
def compute_sha256sum(file_path):
cmd_line = None
if sys.platform.startswith('linux'):
cmd_line = ['sha256sum', file_path]
elif sys.platform.startswith('darwin'):
cmd_line = ['shasum', '--algorithm', '256', file_path]
else:
raise ValueError("Don't know how to compute SHA256 checksum on platform %s" % sys.platform)
checksum_str = subprocess.check_output(cmd_line).strip().split()[0].decode('utf-8')
validate_sha256sum(checksum_str)
return checksum_str
def verify_sha256sum(checksum_file_path, data_file_path):
if not os.path.exists(checksum_file_path):
raise IOError("Checksum file does not exist: %s" % checksum_file_path)
if not os.path.exists(data_file_path):
raise IOError("Data file does not exist: %s", data_file_path)
if not checksum_file_path.endswith(CHECKSUM_EXTENSION):
raise ValueError("Checksum file path must end with '%s', got: %s" % (
CHECKSUM_EXTENSION, checksum_file_path))
# Guard against someone passing in the actual data file instead of the checksum file.
checksum_file_size = os.stat(checksum_file_path).st_size
if checksum_file_size > 4096:
raise IOError("Checksum file size is too big: %d bytes (file path: %s)" % (
checksum_file_size, checksum_file_path))
expected_checksum = read_file_and_strip(checksum_file_path).split()[0]
actual_checksum = compute_sha256sum(data_file_path)
if actual_checksum == expected_checksum:
return True
err_msg = "Invalid checksum for file %s: got %s, expected %s" % (
data_file_path, actual_checksum, expected_checksum)
logging.warning(err_msg)
return False
def download_url(url, dest_path):
start_time_sec = time.time()
logging.info("Downloading %s to %s", url, dest_path)
dest_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_dir):
raise IOError("Destination directory %s does not exist" % dest_dir)
run_cmd(['curl', '-LsS', url, '-o', dest_path])
if not os.path.exists(dest_path):
raise IOError("Failed to download %s: file %s does not exist" % (url, dest_path))
elapsed_sec = time.time() - start_time_sec
logging.info("Downloaded %s to %s in %.1fs" % (url, dest_path, elapsed_sec))
def move_file(src_path, dest_path):
if g_verbose:
logging.info("Trying to move file %s to %s", src_path, dest_path)
if not os.path.exists(src_path):
raise IOError("Does not exist: %s" % src_path)
if not os.path.isfile(src_path):
raise IOError("Not a file: %s" % src_path)
if os.path.isdir(dest_path):
raise IOError("Destination path can't be a directory: %s" % dest_path)
if os.path.exists(dest_path):
logging.warning("Destination path already exists: %s, moving %s there anyway" % (
dest_path, src_path))
dest_parent_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_parent_dir):
raise IOError("Destination directory %s does not exist" % dest_parent_dir)
os.rename(src_path, dest_path)
def check_dir_exists_and_is_writable(dir_path, description):
if not os.path.isdir(dir_path):
raise IOError("%s directory %s does not exist" % (description, dir_path))
if not os.access(dir_path, os.W_OK):
raise IOError("%s directory %s is not writable by current user (%s)" % (
description, dir_path, getpass.getuser()))
# From https://github.com/ianlini/mkdir-p/blob/master/mkdir_p/mkdir_p.py
def mkdir_p(path, mode=0o777):
try:
os.makedirs(path, mode=mode)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def exists_or_is_link(dest):
"""
A file could be a link to a non-existent directory, or to a directory owned by a different
user in a directory with sticky bit set. In such cases os.path.exists might return false, but
islink will return true.
"""
return os.path.exists(dest) or os.path.islink(dest)
def download_and_extract(url, dest_dir_parent, local_cache_dir, nfs_cache_dir):
tar_gz_name = os.path.basename(url)
checksum_file_name = tar_gz_name + CHECKSUM_EXTENSION
install_dir_name = tar_gz_name[:-len(EXPECTED_ARCHIVE_EXTENSION)]
dest_dir = os.path.join(dest_dir_parent, install_dir_name)
if os.path.isdir(dest_dir):
logging.info("Directory %s already exists, no need to install." % dest_dir)
return
if not os.path.isdir(local_cache_dir):
logging.info("Directory %s does not exist, trying to create", local_cache_dir)
try:
mkdir_p(local_cache_dir)
except Exception as ex:
logging.info("Failed creating directory '%s': %s", local_cache_dir, ex)
check_dir_exists_and_is_writable(local_cache_dir, "Local cache")
if not url.endswith(EXPECTED_ARCHIVE_EXTENSION):
raise ValueError("Archive download URL is expected to end with %s, got: %s" % (
url, EXPECTED_ARCHIVE_EXTENSION))
if os.path.isdir(dest_dir):
logging.info("Directory %s already exists, someone must have created it concurrently.",
dest_dir)
return
start_time_sec = time.time()
logging.info("Installing %s into directory %s", url, dest_dir)
tmp_dir_prefix = os.path.abspath(os.path.join(dest_dir_parent, install_dir_name + '.tmp.'))
mkdir_p(dest_dir_parent)
tmp_dir = tempfile.mkdtemp(prefix=tmp_dir_prefix)
def cleanup():
if os.path.isdir(tmp_dir):
run_cmd(['rm', '-rf', tmp_dir])
atexit.register(cleanup)
for cache_dir in [local_cache_dir, nfs_cache_dir]:
cached_tar_gz_path = os.path.join(cache_dir, tar_gz_name)
cached_checksum_path = cached_tar_gz_path + CHECKSUM_EXTENSION
tar_gz_path = None
if os.path.exists(cached_tar_gz_path) and os.path.exists(cached_checksum_path):
logging.info("Verifying the checksum of %s", cached_tar_gz_path)
if verify_sha256sum(cached_checksum_path, cached_tar_gz_path):
tar_gz_path = os.path.join(cache_dir, tar_gz_name)
break
else:
remove_ignore_errors(cached_tar_gz_path)
remove_ignore_errors(cached_checksum_path)
if tar_gz_path is None:
tmp_tar_gz_path = os.path.join(tmp_dir, tar_gz_name)
tmp_checksum_path = os.path.join(tmp_dir, checksum_file_name)
download_url(url + CHECKSUM_EXTENSION, tmp_checksum_path)
download_url(url, tmp_tar_gz_path)
if not verify_sha256sum(tmp_checksum_path, tmp_tar_gz_path):
raise ValueError("Checksum verification failed for the download of %s" % url)
file_names = [tar_gz_name, checksum_file_name]
for file_name in file_names:
move_file(os.path.join(tmp_dir, file_name),
os.path.join(local_cache_dir, file_name))
tar_gz_path = os.path.join(local_cache_dir, tar_gz_name)
nfs_tar_gz_path = os.path.join(nfs_cache_dir, tar_gz_name)
nfs_checksum_file_path = os.path.join(nfs_cache_dir, checksum_file_name)
if (os.path.isdir(nfs_cache_dir) and
os.access(nfs_cache_dir, os.W_OK) and
(not os.path.exists(nfs_tar_gz_path) or
not os.path.exists(nfs_checksum_file_path))):
for file_name in file_names:
run_cmd(['cp',
os.path.join(local_cache_dir, file_name),
os.path.join(nfs_cache_dir, file_name)])
logging.info("Extracting %s in %s", tar_gz_path, tmp_dir)
run_cmd(['tar', 'xf', tar_gz_path, '-C', tmp_dir])
tmp_extracted_dir = os.path.join(tmp_dir, install_dir_name)
if not os.path.exists(tmp_extracted_dir):
raise IOError(
"Extracted '%s' in '%s' but a directory named '%s' did not appear" % (
tar_gz_path, os.getcwd(), tmp_extracted_dir))
if exists_or_is_link(dest_dir):
logging.info("Looks like %s was created concurrently", dest_dir)
return
if install_dir_name.startswith('linuxbrew'):
orig_brew_home_file = os.path.join(tmp_extracted_dir, 'ORIG_BREW_HOME')
if not os.path.exists(orig_brew_home_file):
raise IOError("File '%s' not found after extracting '%s'" % (
orig_brew_home_file, tar_gz_name))
orig_brew_home = read_file_and_strip(orig_brew_home_file)
if not orig_brew_home.startswith(dest_dir):
raise ValueError(
"Original Homebrew/Linuxbrew install home directory is '%s'"
" but we are trying to install it in '%s', and that is not a prefix of"
" the former." % (orig_brew_home, dest_dir))
already_installed_msg = (
"'%s' already exists, cannot move '%s' to it. Someone else must have "
"installed it concurrently. This is OK." % (
orig_brew_home, dest_dir))
def create_brew_symlink_if_needed():
brew_link_src = os.path.basename(orig_brew_home)
# dest_dir will now be a symlink pointing to brew_link_src. We are NOT creating a
# symlink inside dest_dir.
if not exists_or_is_link(dest_dir):
logging.info("Creating a symlink '%s' -> '%s'", dest_dir, brew_link_src)
try:
os.symlink(brew_link_src, dest_dir)
except OSError as os_error:
if os_error.errno == errno.EEXIST:
if exists_or_is_link(dest_dir):
logging.info(
"Symlink '%s' was created concurrently. This is probably OK.",
dest_dir)
else:
err_msg = (
"Failed creating symlink '%s' -> '%s' with error: %s, but the "
"symlink does not actually exist!" % (
dest_dir, brew_link_src, os_error))
logging.error(err_msg)
raise IOError(err_msg)
else:
logging.error("Unexpected error when creating symlink '%s' -> '%s': %s",
dest_dir, brew_link_src, os_error)
raise os_error
assert exists_or_is_link(dest_dir)
if not os.path.islink(dest_dir):
# A defensive sanity check.
err_msg = "%s exists but is not a symbolic link" % dest_dir
logging.error(err_msg)
raise IOError(err_msg)
else:
actual_link_src = os.readlink(dest_dir)
if actual_link_src != brew_link_src:
err_msg = "Symlink %s is not pointing to %s but instead points to %s" % (
dest_dir, brew_link_src, actual_link_src)
logging.error(err_msg)
raise IOError(err_msg)
if os.path.exists(orig_brew_home):
logging.info(already_installed_msg)
create_brew_symlink_if_needed()
return
logging.info("Moving '%s' to '%s'" % (tmp_extracted_dir, orig_brew_home))
try:
os.rename(tmp_extracted_dir, orig_brew_home)
except IOError as io_error:
# A defensive sanity check in case locking is not working properly.
if io_error == errno.ENOTEMPTY:
# For whatever reason, this is what we get when the destination directory
# already exists.
logging.info(already_installed_msg)
create_brew_symlink_if_needed()
return
create_brew_symlink_if_needed()
else:
if g_verbose:
logging.info("Moving %s to %s", tmp_extracted_dir, dest_dir)
os.rename(tmp_extracted_dir, dest_dir)
logging.info("Installation of %s took %.1f sec", dest_dir, time.time() - start_time_sec)
def main():
# Created files/directories should be writable by the group.
os.umask(2)
logging.basicConfig(
level=logging.INFO,
format="%(filename)s:%(lineno)d " + socket.gethostname() + " pid " + str(os.getpid()) +
" %(asctime)s %(levelname)s: %(message)s")
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--url', help='URL to download. Must end with .tar.gz.', required=True)
parser.add_argument(
'--dest-dir-parent', help='Parent directory in which to extract the archive',
required=True)
parser.add_argument(
'--local-cache-dir',
default='/opt/yb-build/download_cache',
help='Download cache on the local disk')
parser.add_argument(
'--nfs-cache-dir',
default='/Volumes/n/jenkins/download_cache',
help='Download cache on NFS')
parser.add_argument('--verbose', action='store_true', help='Verbose logging')
args = parser.parse_args()
if args.verbose or os.getenv('YB_VERBOSE') == '1':
global g_verbose
g_verbose = True
download_and_extract(
url=args.url,
dest_dir_parent=args.dest_dir_parent,
local_cache_dir=args.local_cache_dir,
nfs_cache_dir=args.nfs_cache_dir)
if __name__ == '__main__':
main()
|
CARDS = (
(('Gr', 'Mu', 'Pe', 'Pl', 'Sc', 'Wh')),
(('Ca', 'Kn', 'Pi', 'Re', 'Ro', 'Wr')),
(('Ba', 'Bi', 'Co', 'Di', 'Ha', 'Ki', 'Li', 'Lo', 'St')),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from .sampler import *
from .mh import *
from .ensemble import *
from .ptsampler import *
from . import utils
from . import autocorr
__version__ = "2.1.0"
def test():
from inspect import getmembers, ismethod
from .tests import Tests
print("Starting tests...")
failures = 0
tests = Tests()
for o in getmembers(tests):
tests.setUp()
if ismethod(o[1]) and o[0].startswith("test"):
print("{0} ...".format(o[0]))
try:
o[1]()
except Exception as e:
print("Failed with:\n {0.__class__.__name__}: {0}"
.format(e))
failures += 1
else:
print(" Passed.")
print("{0} tests failed".format(failures))
|
"""
byceps.services.seating.area_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from typing import Optional
from sqlalchemy import select
from sqlalchemy.sql import Select
from ...database import db, paginate, Pagination
from ...typing import PartyID
from ..ticketing.dbmodels.ticket import Ticket as DbTicket
from .dbmodels.area import Area as DbArea
from .dbmodels.seat import Seat as DbSeat
from .transfer.models import Area, SeatUtilization
def create_area(party_id: PartyID, slug: str, title: str) -> Area:
"""Create an area."""
area = DbArea(party_id, slug, title)
db.session.add(area)
db.session.commit()
return _db_entity_to_area(area)
def delete_area(area_id: str) -> None:
"""Delete an area."""
db.session.query(DbArea) \
.filter_by(id=area_id) \
.delete()
db.session.commit()
def count_areas_for_party(party_id: PartyID) -> int:
"""Return the number of seating areas for that party."""
return db.session \
.query(DbArea) \
.filter_by(party_id=party_id) \
.count()
def find_area_for_party_by_slug(party_id: PartyID, slug: str) -> Optional[Area]:
"""Return the area for that party with that slug, or `None` if not found."""
area = db.session \
.query(DbArea) \
.filter_by(party_id=party_id) \
.filter_by(slug=slug) \
.first()
if area is None:
return None
return _db_entity_to_area(area)
def get_areas_with_seat_utilization(
party_id: PartyID,
) -> list[Area, SeatUtilization]:
"""Return all areas and their seat utilization for that party."""
query = _get_areas_with_seat_utilization_query(party_id)
rows = db.session.execute(query).all()
return [_map_areas_with_seat_utilization_row(row) for row in rows]
def get_areas_with_seat_utilization_paginated(
party_id: PartyID, page: int, per_page: int
) -> Pagination:
"""Return areas and their seat utilization for that party, paginated."""
items_query = _get_areas_with_seat_utilization_query(party_id)
count_query = select(db.func.count(DbArea.id)) \
.filter(DbArea.party_id == party_id)
return paginate(
items_query,
count_query,
page,
per_page,
item_mapper=_map_areas_with_seat_utilization_row,
)
def _get_areas_with_seat_utilization_query(party_id: PartyID) -> Select:
area = db.aliased(DbArea)
subquery_occupied_seat_count = select(db.func.count(DbTicket.id)) \
.filter(DbTicket.revoked == False) \
.filter(DbTicket.occupied_seat_id != None) \
.join(DbSeat) \
.filter(DbSeat.area_id == area.id) \
.scalar_subquery()
subquery_total_seat_count = select(db.func.count(DbSeat.id)) \
.filter_by(area_id=area.id) \
.scalar_subquery()
return select(
area,
subquery_occupied_seat_count,
subquery_total_seat_count,
) \
.filter(area.party_id == party_id) \
.group_by(area.id)
def _map_areas_with_seat_utilization_row(
row: tuple[DbArea, int, int]
) -> tuple[Area, SeatUtilization]:
area, occupied_seat_count, total_seat_count = row
utilization = SeatUtilization(
occupied=occupied_seat_count, total=total_seat_count
)
return _db_entity_to_area(area), utilization
def _db_entity_to_area(area: DbArea) -> Area:
return Area(
id=area.id,
party_id=area.party_id,
slug=area.slug,
title=area.title,
image_filename=area.image_filename,
image_width=area.image_width,
image_height=area.image_height,
)
|
"""
Unit test for multiple modules
This module illustrates what a proper unit test should look like.
Each function being tested has its own test procedure.
It also has a segment of "script code" that invokes the test
procedure when this module is run as an script.
Author: Walker M. White
Date: February 14, 2019
"""
import introcs # introcs assert functions
import helpers # function to be tested
def test_first_name():
"""
Test procedure for first_name(n)
"""
print('Testing first_name')
# Test case 1
result = helpers.first_name('Walker White')
introcs.assert_equals('Walker',result)
# Test case 2
result = helpers.first_name('Walker White')
introcs.assert_equals('Walker',result)
def test_last_name():
"""
Test procedure for last_name(n)
"""
print('Testing last_name')
# Test case 1
result = helpers.last_name('Walker White')
introcs.assert_equals('White',result)
# Test case 2
result = helpers.last_name('Walker White')
introcs.assert_equals('White',result)
def test_last_name_first():
"""
Test procedure for last_name_first(n)
"""
print('Testing last_name_first')
# Test case 1
result = helpers.last_name_first('Walker White')
introcs.assert_equals('White, Walker',result)
# Test case 2
result = helpers.last_name_first('Walker White')
introcs.assert_equals('White, Walker',result)
# Script code
if __name__ == '__main__':
test_first_name()
test_last_name()
test_last_name_first()
print('The module helpers passed all tests')
|
import glob, struct, random, csv
from tensorflow.core.example import example_pb2
# <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.
SENTENCE_START = '<s>'
SENTENCE_END = '</s>'
PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words
START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence
STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences
# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file.
def example_generator(data_path, single_pass):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if not len_bytes: break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
yield example_pb2.Example.FromString(example_str)
if single_pass:
print "example_generator completed reading all datafiles. No more data."
break
def abstract2sents(abstract):
"""Splits abstract text from datafile into list of sentences.
Args:
abstract: string containing <s> and </s> tags for starts and ends of sentences
Returns:
sents: List of sentence strings (no tags)"""
cur = 0
sents = []
while True:
try:
start_p = abstract.index(SENTENCE_START, cur)
end_p = abstract.index(SENTENCE_END, start_p + 1)
cur = end_p + len(SENTENCE_END)
sents.append(abstract[start_p + len(SENTENCE_START):end_p])
except ValueError as e: # no more sentences
return sents
def text_generator(example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = example_generator.next() # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[
0] # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[
0] # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
# tf.logging.error('Failed to get article or abstract from example')
continue
else:
yield (article_text, abstract_text)
def read_bin_files(input_bin_path, output_csv_path,single_pass):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = text_generator(example_generator(input_bin_path, single_pass))
with open(output_csv_path, mode='w') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
while True:
try:
(article,
abstract) = input_gen.next() # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
# tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if single_pass:
# tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
# self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
# Use the <s> and </s> tags in abstract to get a list of sentences.
abstract_sentences = [sent.strip() for sent in abstract2sents(abstract)]
output_writer.writerow(['. '.join(abstract_sentences), article])
if __name__ == "__main__":
input_bin_path = '/home/sampanna/Study/BDTS/modified-keras-text-summarization/files/cnn/finished_files/chunked/train_*.bin'
output_csv_path = 'cnn_summary_dataset.csv'
read_bin_files(input_bin_path, output_csv_path,True)
|
import os
import re
from sys import argv
from mod_pbxproj import XcodeProject
path = argv[1]
print path
project = XcodeProject.Load(path +'/Unity-iPhone.xcodeproj/project.pbxproj')
project.add_file_if_doesnt_exist('System/Library/Frameworks/Security.framework', tree='SDKROOT')
project.add_file_if_doesnt_exist('usr/lib/libicucore.dylib', tree='SDKROOT')
# regex for adjust sdk files
re_adjust_files = re.compile(r"SRWebSocket\.m")
# iterate all objects in the unity Xcode iOS project file
for key in project.get_ids():
obj = project.get_obj(key)
name = obj.get('name')
adjust_file_match = re_adjust_files.match(name if name else "")
if (adjust_file_match):
build_files = project.get_build_files(key)
for build_file in build_files:
# add the ARC compiler flag to the adjust file if doesn't exist
build_file.add_compiler_flag('-fobjc-arc')
if project.modified:
project.backup()
project.save()
|
'''
Created on Nov 9, 2017
@author: khoi.ngo
'''
def generate_random_string(prefix="", suffix="", size=20):
"""
Generate random string .
:param prefix: (optional) Prefix of a string.
:param suffix: (optional) Suffix of a string.
:param length: (optional) Max length of a string (include prefix and suffix)
:return: The random string.
"""
import random
import string
left_size = size - len(prefix) - len(suffix)
random_str = ""
if left_size > 0:
random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(left_size))
else:
print("Warning: Length of prefix and suffix more than %s chars" % str(size))
result = str(prefix) + random_str + str(suffix)
return result
def create_step(size):
from utils.step import Step
lst_step = []
for i in range(0, size):
step = Step(i, "")
lst_step.append(step)
return lst_step
def handle_exception(code):
if isinstance(code, IndexError or Exception):
raise code
else:
return code
async def perform(step, func, *agrs):
from indy.error import IndyError
from utils.report import Status
result = None
try:
result = await func(*agrs)
step.set_status(Status.PASSED)
except IndyError as E:
print("Indy error" + str(E))
step.set_message(str(E))
return E
except Exception as Ex:
print("Exception" + str(Ex))
step.set_message(str(E))
return Ex
return result
async def perform_with_expected_code(step, func, *agrs, expected_code=0):
from indy.error import IndyError
from utils.report import Status
try:
await func(*agrs)
except IndyError as E:
if E == expected_code:
step.set_status(Status.PASSED)
else:
print("Indy error" + str(E))
step.set_message(str(E))
return E
except Exception as Ex:
print("Exception" + str(Ex))
return Ex
|
import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
import wikipedia
import pyjokes
import webbrowser
import os
#import pyaudio
listenner = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty("voices")
engine.setProperty('voice', voices[1].id)
def talk(text):
engine.say(text)
engine.runAndWait()
engine.say('Hello Rashid, I am your virtual assistant. how can i help you?')
engine.runAndWait()
def take_command():
# (<-- !!!)
try:
with sr.Microphone() as source:
print("listening....")
voice = listenner.listen(source)
command = listenner.recognize_google(voice)
command = command.lower()
if 'alexa' in command:
command = command.replace('alexa', '')
print(command)
except:
pass
return command
def run_alexa():
command = take_command()
print(command)
if 'song' in command:
song = command.replace('song', '')
talk('playing' + song)
pywhatkit.playonyt(song)
elif 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
print(time)
talk("current time is" + time)
elif 'who ' in command:
person = command.replace('who ', '')
info = wikipedia.summary(person, 1)
print(info)
talk(info)
elif 'are you single' in command:
talk('No, i am in a relationship with your wifi')
elif 'joke' in command:
talk(pyjokes.get_joke())
elif 'wikipedia' in command:
ansu = command.replace('wikipedia', '')
answer = wikipedia.summary(ansu, sentences=2)
print(answer)
talk(answer)
elif 'open youtube' in command:
print('opening you tube.....')
talk('opening you tube.')
webbrowser.open('youtube.com')
elif 'open whatsapp' in command:
webbrowser.open('web.whatsapp.com')
print('opening whatsapp.....')
talk('opening whatsapp.')
elif 'open stackoverflow' in command:
webbrowser.open('stackoverflow.com')
print('opening Stackoverfolw.....')
talk('opening Stack overflow .')
elif 'music' in command:
music_dir = 'C:\\Music'
music= os.listdir(music_dir)
#print(music)
os.startfile(os.path.join(music_dir, music[5]))
elif 'open Vs code' in command:
code_path = "C:\\Users\\Rashid khan\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(code_path)
elif 'how are you' in command:
talk('I am feeling awesome and ready for your command')
elif 'hear me' in command:
talk('yes, I am getting you Rashid')
elif 'exit' in command:
exit()
else:
talk('Please say the command again.')
while True:
run_alexa()
|
# Generated by Django 3.1.1 on 2020-09-27 18:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('restaurants', '0006_auto_20200927_0920'),
]
operations = [
migrations.AddField(
model_name='restaurantlocation',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
# Generated by Django 2.2.23 on 2021-07-07 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BigModel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('a', models.BooleanField(default=True)),
('b', models.BooleanField(null=True)),
('c', models.IntegerField(default=3)),
('d', models.CharField(default='hi', max_length=10)),
],
),
]
|
# coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class JobStatusDataResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'JobResultType',
'data': 'GeneralJobResult'
}
attribute_map = {
'type': 'type',
'data': 'data'
}
def __init__(self, type=None, data=None, _configuration=None): # noqa: E501
"""JobStatusDataResult - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._type = None
self._data = None
self.discriminator = None
self.type = type
if data is not None:
self.data = data
@property
def type(self):
"""Gets the type of this JobStatusDataResult. # noqa: E501
:return: The type of this JobStatusDataResult. # noqa: E501
:rtype: JobResultType
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this JobStatusDataResult.
:param type: The type of this JobStatusDataResult. # noqa: E501
:type: JobResultType
"""
if self._configuration.client_side_validation and type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def data(self):
"""Gets the data of this JobStatusDataResult. # noqa: E501
:return: The data of this JobStatusDataResult. # noqa: E501
:rtype: GeneralJobResult
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this JobStatusDataResult.
:param data: The data of this JobStatusDataResult. # noqa: E501
:type: GeneralJobResult
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobStatusDataResult, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobStatusDataResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, JobStatusDataResult):
return True
return self.to_dict() != other.to_dict()
|
from flask import render_template
from .import main
@main.app_errorhandler(404) #if error handlers used instead app_errorhandler the instnace is available only for errors originate in blueprint
def page_not_found(e):
return render_template(''), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template(''), 500
|
from setuptools import setup
setup(name='orinoco',
version='0.1',
description='Sweet data integration',
author='Quartic Technologies',
author_email='alex@quartic.io',
license='MIT',
packages=['orinoco'],
install_requires=[
'aiohttp',
'pyformance'
],
zip_safe=False)
|
#!/usr/bin/env python3
"""
Project title: CollembolAI
Authors: Stephan Weißbach, Stanislav Sys, Clément Schneider
Original repository: https://github.com/stasys-hub/Collembola_AI.git
Module title: output_inference_images
.py
Purpose: draws bounding boxes from annotation on pictures. If provided with
groundtruth, it will also specifiy correctness of predictions
Dependencies: See ReadMe
Last Update: 18.02.2022
"""
from PIL import Image, ImageFont, ImageDraw
import os
from utils.cocoutils import coco2df
def draw_coco_bbox(
coco,
out_dir,
coco_dir,
eval_mode=False,
prefix="annotated",
line_width=10,
fontsize=80,
fontYshift=-70,
):
"""
Detectron2 module for writing annotated pictures was not so explicit to me, and default output not so pretty.
This function will draw the annotation on the pictures of a coco dataset. The dataset can be provided as a coco instance,
or as a dataframe resulting from coco2df. Modified pictures are written to the out_dir, with a name prefix.
To adjust display, simply change line_width (= box line), font_size (= label font). Labels text can be shifted vertically
with fontYshift.
"""
# define some colors for bounding boxes
with open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "colors.txt"), "r"
) as colorfile:
colors = [color.replace("\n", "") for color in colorfile]
Image.MAX_IMAGE_PIXELS = None
fnt = ImageFont.truetype(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "FreeMono.ttf"),
fontsize,
)
# convert result dataframe to coco
try:
coco_df = coco2df(coco)
except:
coco_df = coco
# create label for bounding box
if eval_mode:
coco_df["label"] = [
f"{' '.join(row['category_name'].split('__')[0].split('_'))} {round(row['score'], 2)} {'true detection' if not row['is_false_positive'] else 'false detection'}"
for _, row in coco_df.iterrows()
]
else:
coco_df["label"] = [
f"{' '.join(row['category_name'].split('__')[0].split('_'))} {round(row['score'], 2)}"
for _, row in coco_df.iterrows()
]
resh = lambda x: ((x[0], x[1]), (x[0] + x[2], x[1] + x[3]))
coco_df["coordinates"] = coco_df["bbox"].apply(resh)
# sample colors randomly
# create dictionary so that every class maps to one color
colormap = {}
for idx, classlabel in enumerate(coco_df["category_name"].unique()):
colormap[classlabel] = colors[idx % len(colors)]
# add a color column
for idx, row in coco_df.iterrows():
coco_df.loc[idx, "color"] = colormap[row["category_name"]]
for img_name in coco_df.file_name.unique():
source_img = Image.open(f"{coco_dir}/{img_name}")
draw = ImageDraw.Draw(source_img)
for row in coco_df[coco_df["file_name"] == img_name][
["label", "coordinates", "color"]
].values:
draw.rectangle(row[1], outline=row[2], width=line_width)
draw.text(
(row[1][0][0], row[1][0][1] + fontYshift), row[0], font=fnt, fill=row[2]
)
print(f"Writing {out_dir}/{prefix}_{img_name}")
source_img.save(f"{out_dir}/{prefix}_{img_name}", "JPEG")
|
def is_even(n):
if n%2 is 0:
return True
return False
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "5020e104a1349a0ae6532b007b48c68b8f64c049"
LLVM_SHA256 = "3113dbc5f7b3e6405375eedfe95e220268bcc4818c8d8453a23ef00f82d4b172"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:BUILD.bazel",
patch_file = "//third_party/llvm:macos_build_fix.patch",
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
|
import pytest
from salesman.orders.models import Order
from salesman.orders.signals import status_changed
_signal_called = False
def on_status_changed(sender, order, new_status, old_status, **kwargs):
global _signal_called
_signal_called = True
@pytest.mark.django_db
def test_order_changed_signal(rf):
status_changed.connect(on_status_changed, dispatch_uid="test_status_changed")
order = Order.objects.create(ref="1", subtotal=100, total=100)
order.status = order.statuses.COMPLETED
order.save()
assert _signal_called
status_changed.disconnect(on_status_changed, dispatch_uid="test_status_changed")
|
import sys
sys.path.append('model/')
|
'''
Created on Feb 26, 2021
@author: laurentmichel
'''
import os
class FileUtils(object):
file_path = os.path.dirname(os.path.realpath(__file__))
@staticmethod
def get_datadir():
return os.path.realpath(os.path.join(FileUtils.file_path, "../client/tests/", "data"))
@staticmethod
def get_projectdir():
return os.path.realpath(os.path.join(FileUtils.file_path, "../../"))
@staticmethod
def get_schemadir():
return os.path.realpath(os.path.join(FileUtils.file_path, "../../", "schema"))
|
import os
import numpy as np
from models import ALOCC_Model
from utils import pp, visualize, to_json, show_all_variables
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch", 40, "Epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_integer("attention_label", 1, "Conditioned label that growth attention of training label [1]")
flags.DEFINE_float("r_alpha", 0.2, "Refinement parameter [0.2]")
flags.DEFINE_integer("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size",128, "The size of batch images [64]")
flags.DEFINE_integer("input_height", 45, "The size of image to use. [45]")
flags.DEFINE_integer("input_width", None, "The size of image to use. If None, same value as input_height [None]")
flags.DEFINE_integer("output_height", 45, "The size of the output images to produce [45]")
flags.DEFINE_integer("output_width", None, "The size of the output images to produce. If None, same value as output_height [None]")
flags.DEFINE_string("dataset", "UCSD", "The name of dataset [UCSD, mnist]")
flags.DEFINE_string("dataset_address", "./dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train", "The path of dataset")
flags.DEFINE_string("input_fname_pattern", "*", "Glob pattern of filename of input images [*]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("log_dir", "log", "Directory name to save the log [log]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("train", True, "True for training, False for testing [False]")
FLAGS = flags.FLAGS
def check_some_assertions():
"""
to check some assertions in inputs and also check sth else.
"""
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
def main(_):
"""
The main function for training steps
"""
pp.pprint(flags.FLAGS.__flags)
n_per_itr_print_results = 100
kb_work_on_patch = True
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Manual Switchs ------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# DATASET PARAMETER : UCSD
#FLAGS.dataset = 'UCSD'
#FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train'
nd_input_frame_size = (240, 360)
nd_slice_size = (45, 45)
n_stride = 25
n_fetch_data = 600
# ---------------------------------------------------------------------------------------------
# # DATASET PARAMETER : MNIST
# FLAGS.dataset = 'mnist'
# FLAGS.dataset_address = './dataset/mnist'
# nd_input_frame_size = (28, 28)
# nd_slice_size = (28, 28)
FLAGS.train = True
FLAGS.input_width = nd_slice_size[0]
FLAGS.input_height = nd_slice_size[1]
FLAGS.output_width = nd_slice_size[0]
FLAGS.output_height = nd_slice_size[1]
FLAGS.sample_dir = 'export/'+FLAGS.dataset +'_%d.%d'%(nd_slice_size[0],nd_slice_size[1])
FLAGS.input_fname_pattern = '*'
check_some_assertions()
# manual handling of GPU
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
run_config = tf.ConfigProto(gpu_options=gpu_options)
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
tmp_model = ALOCC_Model(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.batch_size,
attention_label=FLAGS.attention_label,
r_alpha=FLAGS.r_alpha,
dataset_name=FLAGS.dataset,
dataset_address=FLAGS.dataset_address,
input_fname_pattern=FLAGS.input_fname_pattern,
checkpoint_dir=FLAGS.checkpoint_dir,
is_training = FLAGS.train,
log_dir=FLAGS.log_dir,
sample_dir=FLAGS.sample_dir,
nd_patch_size=nd_slice_size,
n_stride=n_stride,
n_per_itr_print_results=n_per_itr_print_results,
kb_work_on_patch=kb_work_on_patch,
nd_input_frame_size = nd_input_frame_size,
n_fetch_data=n_fetch_data)
#show_all_variables()
if FLAGS.train:
print('Program is on Train Mode')
tmp_model.train(FLAGS)
else:
if not tmp_model.load(FLAGS.checkpoint_dir)[0]:
print('Program is on Test Mode')
raise Exception("[!] Train a model first, then run test mode from file test.py")
if __name__ == '__main__':
tf.app.run()
|
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
import sys
import os.path as path
# when developing, prefer local kapture to the one installed on the system
HERE_PATH = path.abspath(path.normpath(path.dirname(__file__)))
KATURE_LOCALIZATION_REPO_PATH = path.normpath(path.join(HERE_PATH, '../'))
# check the presence of kapture directory in repo to be sure its not the installed version
if path.isdir(path.join(KATURE_LOCALIZATION_REPO_PATH, 'kapture_localization')):
# workaround for sibling import
sys.path.insert(0, KATURE_LOCALIZATION_REPO_PATH)
# KATURE_LOCALIZATION_TOOLS_PATH = path.normpath(path.join(HERE_PATH, '../'))
# # check the presence of pipeline directory in repo to be sure its not the installed version
# if path.isdir(path.join(KATURE_LOCALIZATION_TOOLS_PATH, 'pipeline')):
# # workaround for sibling import
# sys.path.insert(0, KATURE_LOCALIZATION_TOOLS_PATH)
|
from dataclasses import dataclass
from apischema import serialize
@dataclass
class Foo:
bar: int = 0
baz: str | None = None
assert serialize(Foo, Foo(), exclude_defaults=True) == {}
assert serialize(Foo, Foo(), exclude_none=True) == {"bar": 0}
|
# coding=utf-8
"""
@Time: 2020/11/14 2:15 下午
@Author: Aopolin
@File: MolweniConfig.py
@Contact: aopolin.ii@gmail.com
@Description:
"""
class Config(object):
def __init__(self):
self.SQUAD_DIR = "../../Dataset/squad2.0"
self.MOLWENI_DIR = "../../Dataset/Molweni"
self.model_type = "bert" # ["distilbert", "albert", "bert", "xlnet", ...]
self.model_name_or_path = "bert-base-uncased"
self.output_dir = "/tmp/debug_squad/" # 输出目录路径
self.data_dir = ""
self.train_file = self.MOLWENI_DIR + "/train_small.json"
self.predict_file = self.MOLWENI_DIR + "/dev_small.json"
self.config_name = ""
self.tokenizer_name = ""
self.cache_dir = ""
self.version_2_with_negative = True
self.null_score_diff_threshold = 0.0
self.n_gpu = 0
self.max_seq_length = 384
self.doc_stride = 128
self.max_query_length = 64
self.do_train = True
self.do_eval = True
self.evaluate_during_training = False
self.do_lower_case = True
self.per_gpu_train_batch_size = 12
self.per_gpu_eval_batch_size = 8
self.learning_rate = 3e-5
self.gradient_accumulation_steps = 1 # Number of updates steps to accumulate before performing a backward/update pass
self.weight_decay = 0.0
self.adam_epsilon = 1e-8
self.max_grad_norm = 1.0
self.num_train_epochs = 1.0 # 训练的epoch数
self.max_steps = -1 # 最多运行多少步,若设置>0, 将会覆盖num_train_epochs
self.warmup_steps = 0
self.n_best_size = 20
self.max_answer_length = 30
self.verbose_logging = False
self.lang_id = 0
self.logging_steps = 500 # 打log的步长
self.save_steps = 2000 # 保存模型及其参数的步长
self.eval_all_checkpoints = False
self.no_cuda = True
self.overwrite_cache = False # 重写缓存文件
self.seed = 42 # 随机种子
self.local_rank = -1 # 分布式计算用到的进程编号,-1表示不使用分布式
self.fp16 = False
self.fp16_opt_level = "01"
self.server_ip = ""
self.server_port = ""
self.threads = 1
self.bert_dir = "../../Model_files/bert-base-uncased/"
self.device = "cpu"
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
from google.cloud.aiplatform_v1.types import io
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"Dataset",
"ImportDataConfig",
"ExportDataConfig",
},
)
class Dataset(proto.Message):
r"""A collection of DataItems and Annotations on them.
Attributes:
name (str):
Output only. The resource name of the
Dataset.
display_name (str):
Required. The user-defined name of the
Dataset. The name can be up to 128 characters
long and can be consist of any UTF-8 characters.
description (str):
Optional. The description of the Dataset.
metadata_schema_uri (str):
Required. Points to a YAML file stored on
Google Cloud Storage describing additional
information about the Dataset. The schema is
defined as an OpenAPI 3.0.2 Schema Object. The
schema files that can be used here are found in
gs://google-cloud-aiplatform/schema/dataset/metadata/.
metadata (google.protobuf.struct_pb2.Value):
Required. Additional information about the
Dataset.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Dataset was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Dataset was
last updated.
etag (str):
Used to perform consistent read-modify-write
updates. If not set, a blind "overwrite" update
happens.
labels (Sequence[google.cloud.aiplatform_v1.types.Dataset.LabelsEntry]):
The labels with user-defined metadata to organize your
Datasets.
Label keys and values can be no longer than 64 characters
(Unicode codepoints), can only contain lowercase letters,
numeric characters, underscores and dashes. International
characters are allowed. No more than 64 user labels can be
associated with one Dataset (System labels are excluded).
See https://goo.gl/xmQnxf for more information and examples
of labels. System reserved label keys are prefixed with
"aiplatform.googleapis.com/" and are immutable. Following
system labels exist for each Dataset:
- "aiplatform.googleapis.com/dataset_metadata_schema":
output only, its value is the
[metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]
title.
encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
Customer-managed encryption key spec for a
Dataset. If set, this Dataset and all
sub-resources of this Dataset will be secured by
this key.
"""
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=16,
)
metadata_schema_uri = proto.Field(
proto.STRING,
number=3,
)
metadata = proto.Field(
proto.MESSAGE,
number=8,
message=struct_pb2.Value,
)
create_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
etag = proto.Field(
proto.STRING,
number=6,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=7,
)
encryption_spec = proto.Field(
proto.MESSAGE,
number=11,
message=gca_encryption_spec.EncryptionSpec,
)
class ImportDataConfig(proto.Message):
r"""Describes the location from where we import data into a
Dataset, together with the labels that will be applied to the
DataItems and the Annotations.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_source (google.cloud.aiplatform_v1.types.GcsSource):
The Google Cloud Storage location for the
input content.
This field is a member of `oneof`_ ``source``.
data_item_labels (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig.DataItemLabelsEntry]):
Labels that will be applied to newly imported DataItems. If
an identical DataItem as one being imported already exists
in the Dataset, then these labels will be appended to these
of the already existing one, and if labels with identical
key is imported before, the old label value will be
overwritten. If two DataItems are identical in the same
import data operation, the labels will be combined and if
key collision happens in this case, one of the values will
be picked randomly. Two DataItems are considered identical
if their content bytes are identical (e.g. image bytes or
pdf bytes). These labels will be overridden by Annotation
labels specified inside index file referenced by
[import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri],
e.g. jsonl file.
import_schema_uri (str):
Required. Points to a YAML file stored on Google Cloud
Storage describing the import format. Validation will be
done against the schema. The schema is defined as an
`OpenAPI 3.0.2 Schema
Object <https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject>`__.
"""
gcs_source = proto.Field(
proto.MESSAGE,
number=1,
oneof="source",
message=io.GcsSource,
)
data_item_labels = proto.MapField(
proto.STRING,
proto.STRING,
number=2,
)
import_schema_uri = proto.Field(
proto.STRING,
number=4,
)
class ExportDataConfig(proto.Message):
r"""Describes what part of the Dataset is to be exported, the
destination of the export and how to export.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination):
The Google Cloud Storage location where the output is to be
written to. In the given directory a new directory will be
created with name:
``export-data-<dataset-display-name>-<timestamp-of-export-call>``
where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
format. All export output will be written into that
directory. Inside that directory, annotations with the same
schema will be grouped into sub directories which are named
with the corresponding annotations' schema title. Inside
these sub directories, a schema.yaml will be created to
describe the output format.
This field is a member of `oneof`_ ``destination``.
annotations_filter (str):
A filter on Annotations of the Dataset. Only Annotations on
to-be-exported DataItems(specified by [data_items_filter][])
that match this filter will be exported. The filter syntax
is the same as in
[ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations].
"""
gcs_destination = proto.Field(
proto.MESSAGE,
number=1,
oneof="destination",
message=io.GcsDestination,
)
annotations_filter = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
"""
:mod:`pyformlang.regular_expression`
====================================
This module deals with regular expression.
By default, this module does not use the standard way to write regular
expressions. Please read the documentation of Regex for more information.
Available Classes
-----------------
Regex
A regular expression
MisformedRegexError
An error occurring when the input regex is incorrect
"""
from .regex import Regex
from .regex_objects import MisformedRegexError
from .python_regex import PythonRegex
__all__ = ["Regex", "PythonRegex", "MisformedRegexError"]
|
from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10'.split()
requirements = cfg.get('requirements','').split()
lic = licenses[cfg['license']]
min_python = cfg['min_python']
setuptools.setup(
name = cfg['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + cfg['language'].title(),
] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]],
url = cfg['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
dependency_links = cfg.get('dep_links','').split(),
python_requires = '>=' + cfg['min_python'],
long_description = open('README.md', encoding='utf8').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': cfg.get('console_scripts','').split() },
**setup_cfg)
|
# -*- coding: utf-8 -*-
"""
train bert
python tagging/train.py --train ../../data/v6/corpus.wordbiased.tag.train --test ../../data/v6/corpus.wordbiased.tag.test --working_dir TEST --train_batch_size 3 --test_batch_size 10 --hidden_size 32 --debug_skip
"""
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
from collections import defaultdict
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from tqdm import tqdm
import torch
import torch.nn as nn
import pickle
import sys
import os
import numpy as np
from pytorch_pretrained_bert.modeling import BertForTokenClassification
from torch.nn import CrossEntropyLoss
from tensorboardX import SummaryWriter
import argparse
import sklearn.metrics as metrics
import model as tagging_model
import utils as tagging_utils
import sys; sys.path.append('.')
from shared.data import get_dataloader
from shared.args import ARGS
from shared.constants import CUDA
if not os.path.exists(ARGS.working_dir):
os.makedirs(ARGS.working_dir)
with open(ARGS.working_dir + '/command.sh', 'w') as f:
f.write('python' + ' '.join(sys.argv) + '\n')
# # # # # # # # ## # # # ## # # DATA # # # # # # # # ## # # # ## # #
print('LOADING DATA...')
tokenizer = BertTokenizer.from_pretrained(ARGS.bert_model, cache_dir=ARGS.working_dir + '/cache')
tok2id = tokenizer.vocab
tok2id['<del>'] = len(tok2id)
train_dataloader, num_train_examples = get_dataloader(
ARGS.train,
tok2id, ARGS.train_batch_size,
ARGS.working_dir + '/train_data.pkl',
categories_path=ARGS.categories_file)
eval_dataloader, num_eval_examples = get_dataloader(
ARGS.test,
tok2id, ARGS.test_batch_size, ARGS.working_dir + '/test_data.pkl',
test=True, categories_path=ARGS.categories_file)
# # # # # # # # ## # # # ## # # MODEL # # # # # # # # ## # # # ## # #
print('BUILDING MODEL...')
if ARGS.tagger_from_debiaser:
model = tagging_model.TaggerFromDebiaser(
cls_num_labels=ARGS.num_categories, tok_num_labels=ARGS.num_tok_labels,
tok2id=tok2id)
elif ARGS.extra_features_top:
model = tagging_model.BertForMultitaskWithFeaturesOnTop.from_pretrained(
ARGS.bert_model,
cls_num_labels=ARGS.num_categories,
tok_num_labels=ARGS.num_tok_labels,
cache_dir=ARGS.working_dir + '/cache',
tok2id=tok2id)
elif ARGS.extra_features_bottom:
model = tagging_model.BertForMultitaskWithFeaturesOnBottom.from_pretrained(
ARGS.bert_model,
cls_num_labels=ARGS.num_categories,
tok_num_labels=ARGS.num_tok_labels,
cache_dir=ARGS.working_dir + '/cache',
tok2id=tok2id)
else:
model = tagging_model.BertForMultitask.from_pretrained(
ARGS.bert_model,
cls_num_labels=ARGS.num_categories,
tok_num_labels=ARGS.num_tok_labels,
cache_dir=ARGS.working_dir + '/cache',
tok2id=tok2id)
if CUDA:
model = model.cuda()
print('PREPPING RUN...')
# # # # # # # # ## # # # ## # # OPTIMIZER, LOSS # # # # # # # # ## # # # ## # #
optimizer = tagging_utils.build_optimizer(
model, int((num_train_examples * ARGS.epochs) / ARGS.train_batch_size),
ARGS.learning_rate)
loss_fn = tagging_utils.build_loss_fn()
# # # # # # # # ## # # # ## # # TRAIN # # # # # # # # ## # # # ## # #
writer = SummaryWriter(ARGS.working_dir)
print('INITIAL EVAL...')
model.eval()
results = tagging_utils.run_inference(model, eval_dataloader, loss_fn, tokenizer)
writer.add_scalar('eval/tok_loss', np.mean(results['tok_loss']), 0)
writer.add_scalar('eval/tok_acc', np.mean(results['labeling_hits']), 0)
print('TRAINING...')
model.train()
for epoch in range(ARGS.epochs):
print('STARTING EPOCH ', epoch)
losses = tagging_utils.train_for_epoch(model, train_dataloader, loss_fn, optimizer)
writer.add_scalar('train/loss', np.mean(losses), epoch + 1)
# eval
print('EVAL...')
model.eval()
results = tagging_utils.run_inference(model, eval_dataloader, loss_fn, tokenizer)
writer.add_scalar('eval/tok_loss', np.mean(results['tok_loss']), epoch + 1)
writer.add_scalar('eval/tok_acc', np.mean(results['labeling_hits']), epoch + 1)
model.train()
print('SAVING...')
torch.save(model.state_dict(), ARGS.working_dir + '/model_%d.ckpt' % epoch)
|
from re import X
from tkinter import Y
import cv2
cap = cv2.VideoCapture("demo2.mp4")
ret, img = cap.read()
roibb = cv2.selectROI("image", img, fromCenter=False, showCrosshair=True)
print('X', roibb[0])
print('Y', roibb[1])
print('Width', roibb[2])
print('Height', roibb[3])
with open('roi.cfg', 'w+') as rf:
rf.write(str(roibb[0]))
rf.write('\n')
rf.write(str(roibb[1]))
rf.write('\n')
rf.write(str(roibb[2]))
rf.write('\n')
rf.write(str(roibb[3]))
|
from distutils.version import LooseVersion
from ... import logging
from .base import HAVE_DIPY, dipy_version, dipy_to_nipype_interface, get_dipy_workflows
IFLOGGER = logging.getLogger("nipype.interface")
if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion("0.15"):
from dipy.workflows import align
l_wkflw = get_dipy_workflows(align)
for name, obj in l_wkflw:
new_name = name.replace("Flow", "")
globals()[new_name] = dipy_to_nipype_interface(new_name, obj)
del l_wkflw
else:
IFLOGGER.info(
"We advise you to upgrade DIPY version. This upgrade will"
" open access to more function"
)
|
from approaches.abstract_approach import AbstractApproach
from approaches.simple import SimpleApproach
from approaches.tfidf import TfIdfApproach
from approaches.intersection import IntersectionApproach
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv')
available_indicators = df['Indicator Name'].unique()
app.layout = html.Div([
html.Div([
html.Div([
dcc.Dropdown(id='crossfilter-xaxis-column',
options=[{
'label': i,
'value': i
} for i in available_indicators],
value='Fertility rate, total (births per woman)'),
dcc.RadioItems(id='crossfilter-xaxis-type',
options=[{
'label': i,
'value': i
} for i in ['Linear', 'Log']],
value='Linear',
labelStyle={
'display': 'inline-block',
'marginTop': '5px'
})
],
style={
'width': '49%',
'isplay': 'inline-block'
}),
html.Div([
dcc.Dropdown(id='crossfilter-yaxis-column',
options=[{
'label': i,
'value': i
} for i in available_indicators],
value='Life expectancy at birth, total (years)'),
dcc.RadioItems(id='crossfilter-yaxis-type',
options=[{
'label': i,
'value': i
} for i in ['Linear', 'Log']],
value='Linear',
labelStyle={
'display': 'inline-block',
'marginTop': '5px'
})
],
style={
'width': '49%',
'float': 'right',
'display': 'inline-block'
})
],
style={'padding': '10px 5px'}),
html.Div([
dcc.Graph(id='crossfilter-indicator-scatter',
hoverData={'points': [{
'customdata': 'Japan'
}]})
],
style={
'width': '49%',
'display': 'inline-block',
'padding': '0 20'
}),
html.Div([
dcc.Graph(id='x-time-series'),
dcc.Graph(id='y-time-series'),
],
style={
'display': 'inline-block',
'width': '49%'
}),
html.Div(dcc.Slider(
id='crossfilter-year--slider',
min=df['Year'].min(),
max=df['Year'].max(),
value=df['Year'].max(),
marks={str(year): str(year)
for year in df['Year'].unique()},
step=None),
style={
'width': '49%',
'padding': '0px 20px 20px 20px'
})
])
@app.callback(
dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [
dash.dependencies.Input('crossfilter-xaxis-column', 'value'),
dash.dependencies.Input('crossfilter-yaxis-column', 'value'),
dash.dependencies.Input('crossfilter-xaxis-type', 'value'),
dash.dependencies.Input('crossfilter-yaxis-type', 'value'),
dash.dependencies.Input('crossfilter-year--slider', 'value')
])
def update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type,
year_value):
dff = df[df['Year'] == year_value]
fig = px.scatter(
x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'],
y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'],
hover_name=dff[dff['Indicator Name'] ==
yaxis_column_name]['Country Name'])
fig.update_traces(customdata=dff[dff['Indicator Name'] ==
yaxis_column_name]['Country Name'])
fig.update_xaxes(title=xaxis_column_name,
type='linear' if xaxis_type == 'Linear' else 'log')
fig.update_yaxes(title=yaxis_column_name,
type='linear' if yaxis_type == 'Linear' else 'log')
fig.update_layout(margin={
'l': 40,
'b': 40,
't': 10,
'r': 0
},
hovermode='closest')
return fig
def create_time_series(dff, axis_type, title):
fig = px.scatter(dff, x='Year', y='Value')
fig.update_traces(mode='lines+markers')
fig.update_xaxes(showgrid=False)
fig.update_yaxes(type='linear' if axis_type == 'Linear' else 'log')
fig.add_annotation(x=0,
y=0.85,
xanchor='left',
yanchor='bottom',
xref='paper',
yref='paper',
showarrow=False,
align='left',
text=title)
fig.update_layout(height=225, margin={'l': 20, 'b': 30, 'r': 10, 't': 10})
return fig
@app.callback(dash.dependencies.Output('x-time-series', 'figure'), [
dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),
dash.dependencies.Input('crossfilter-xaxis-column', 'value'),
dash.dependencies.Input('crossfilter-xaxis-type', 'value')
])
def update_y_timeseries(hoverData, xaxis_column_name, axis_type):
country_name = hoverData['points'][0]['customdata']
dff = df[df['Country Name'] == country_name]
dff = dff[dff['Indicator Name'] == xaxis_column_name]
title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name)
return create_time_series(dff, axis_type, title)
@app.callback(dash.dependencies.Output('y-time-series', 'figure'), [
dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),
dash.dependencies.Input('crossfilter-yaxis-column', 'value'),
dash.dependencies.Input('crossfilter-yaxis-type', 'value')
])
def update_x_timeseries(hoverData, yaxis_column_name, axis_type):
dff = df[df['Country Name'] == hoverData['points'][0]['customdata']]
dff = dff[dff['Indicator Name'] == yaxis_column_name]
return create_time_series(dff, axis_type, yaxis_column_name)
if __name__ == '__main__':
app.run_server(debug=True)
|
import pytest
from poetry.core.semver import Version
from poetry.core.semver import VersionRange
from poetry.core.semver import VersionUnion
from poetry.core.semver import parse_constraint
@pytest.mark.parametrize(
"constraint,version",
[
("~=3.8", VersionRange(min=Version(3, 8), max=Version(4, 0), include_min=True)),
(
"== 3.8.*",
VersionRange(min=Version(3, 8), max=Version(3, 9, 0), include_min=True),
),
(
"~= 3.8",
VersionRange(min=Version(3, 8), max=Version(4, 0), include_min=True),
),
("~3.8", VersionRange(min=Version(3, 8), max=Version(3, 9), include_min=True)),
("~ 3.8", VersionRange(min=Version(3, 8), max=Version(3, 9), include_min=True)),
(">3.8", VersionRange(min=Version(3, 8))),
(">=3.8", VersionRange(min=Version(3, 8), include_min=True)),
(">= 3.8", VersionRange(min=Version(3, 8), include_min=True)),
(
">3.8,<=6.5",
VersionRange(min=Version(3, 8), max=Version(6, 5), include_max=True),
),
(
">3.8,<= 6.5",
VersionRange(min=Version(3, 8), max=Version(6, 5), include_max=True),
),
(
"> 3.8,<= 6.5",
VersionRange(min=Version(3, 8), max=Version(6, 5), include_max=True),
),
(
"> 3.8,<=6.5",
VersionRange(min=Version(3, 8), max=Version(6, 5), include_max=True),
),
(
">3.8 ,<=6.5",
VersionRange(min=Version(3, 8), max=Version(6, 5), include_max=True),
),
(
">3.8, <=6.5",
VersionRange(min=Version(3, 8), max=Version(6, 5), include_max=True),
),
(
">3.8 , <=6.5",
VersionRange(min=Version(3, 8), max=Version(6, 5), include_max=True),
),
(
"==3.8",
VersionRange(
min=Version(3, 8), max=Version(3, 8), include_min=True, include_max=True
),
),
(
"== 3.8",
VersionRange(
min=Version(3, 8), max=Version(3, 8), include_min=True, include_max=True
),
),
(
"~2.7 || ~3.8",
VersionUnion(
VersionRange(min=Version(2, 7), max=Version(2, 8), include_min=True),
VersionRange(min=Version(3, 8), max=Version(3, 9), include_min=True),
),
),
(
"~2.7||~3.8",
VersionUnion(
VersionRange(min=Version(2, 7), max=Version(2, 8), include_min=True),
VersionRange(min=Version(3, 8), max=Version(3, 9), include_min=True),
),
),
(
"~ 2.7||~ 3.8",
VersionUnion(
VersionRange(min=Version(2, 7), max=Version(2, 8), include_min=True),
VersionRange(min=Version(3, 8), max=Version(3, 9), include_min=True),
),
),
],
)
def test_parse_constraint(constraint, version):
assert parse_constraint(constraint) == version
|
#!/usr/bin/env python3
"""Using repeat() and map()
"""
#end_pymotw_header
from itertools import *
for i in map(lambda x, y: (x, y, x * y), repeat(2), range(5)):
print('{:d} * {:d} = {:d}'.format(*i))
|
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
import random
import math
from randomdict import RandomDict
# from chest import *
import shelve
from Patch import *
from AgentBranch import *
import gc
from memory_profiler import memory_usage
#Model.py
class Model():
def __init__(self, gui, num_agents, mutate, genetic, live_visual, agent_attributes,
model_attributes):
if live_visual:
self.GUI = gui
self.live_visual = live_visual
self.name = gui.name
self.run = gui.run
self.initial_population = num_agents
self.mutate = mutate
self.genetic = genetic
self.agent_attributes = agent_attributes
self.model_attributes = model_attributes
self.attributes = agent_attributes + model_attributes
# attributes that are not copied during mutation or herding
self.drop_attr = ["col", "row", "dx", "dy", "id", "wealth", "top_wealth",
"sugar", "water","target", "not_target",
"exchange_target", "not_exchange_target", "parent", "image"]
# if self.GUI.live_visual:
# self.drop_attr.append("image")
if self.mutate:
self.max_mutate_rate = 0.5 if mutate else 0 #.5
if self.genetic:
self.cross_over_rate = .5
############ set model parameters ############
self.total_agents_created = 0
self.goods = ["sugar", "water"]
self.goods_params = {good:{"min":5,
"max":25} for good in self.goods}
self.max_init_demand_vals = {"price":{"min": 1/2,
"max": 2},
"quantity":{"min":10,
"max":25}}
self.consumption_rate = {"sugar":.5,
"water":.5}
self.primary_breeds = ["basic", "switcher", "arbitrageur"]
self.secondary_breeds = ["herder"]
self.breeds = self.primary_breeds + self.secondary_breeds
# all agents start as basic, only mutation can create other agents
basic = 1
self.breed_probabilities = {"basic":basic, # if you are not a basic, you are a switcher
"herder":0,
"arbitrageur":0}
self.max_vision = 1
# record price of every transaction
# then take average at end of period
self.transaction_prices = []
self.average_price = np.nan
self.total_exchanges = 0
############ import map and build nav_dict ############
# hash table that identifies possible moves relative to agent position
self.nav_dict = {
v:{
i:{
j: True for j in range(-v, v + 1) if 0 < (i ** 2 + j ** 2) <= (v ** 2)}
for i in range(-v, v + 1)}
for v in range(1, self.max_vision + 1)}
#sugarMap.shape calls the a tuple with dimensions
#of the dataframe
self.sugarMap = pd.read_csv('sugar-map.txt', header = None, sep = ' ')
# add 1 to each max_Val
for key in self.sugarMap:
self.sugarMap[key] = self.sugarMap[key].add(1)
self.rows, self.cols = self.sugarMap.shape
############ Initialization ############
self.initializePatches()
self.initializeAgents()
self.data_dict = shelve.open("shelves\\masterShelve", writeback = True)
for attribute in self.attributes:
self.data_dict[attribute] = shelve.open("shelves\\subshelve-"+attribute, writeback = True)
def initializePatches(self):
#Instantiate Patches
#Create a dictionary to hold the patches, organize as grid.
#We first fill these with zeros as placeh holders
self.patch_dict = {row:{col:0}
for row in range(self.rows) for col in range(self.cols)}
for row in range(self.rows):
for col in range(self.cols):
# replace zeros with actual Patch objects
good = "sugar" if row + col < self.cols else "water"
self.patch_dict[row][col] = Patch(self, row , col,
self.sugarMap[row][col], good)
# use RandomDict - O(n) time complexity - for choosing random empty patch
self.empty_patches = RandomDict({
(row,col):self.patch_dict[row][col]
for row in range(self.rows) for col in range(self.cols)})
def initializeAgents(self):
# agents stored in a dict by ID
self.agent_dict = {} #if self.live_visual else Chest(path = data_aggregator.folder) #shelve.open("agent_dict")
# dead agents will be removed from agent_dict
for i in range(self.initial_population):
self.total_agents_created += 1
ID = self.total_agents_created
row, col = self.chooseRandomEmptyPatch()
self.agent_dict[ID] = Agent(self, row, col, ID)
self.patch_dict[row][col].agent = self.agent_dict[ID]
self.population = self.total_agents_created
# def recordAgentLocationInDict(self, agent):
# patchIndex = self.convert2dTo1d(agent.row, agent.col)
# self.agentLocationDict[patchIndex] = agent
def chooseRandomEmptyPatch(self):
row, col = self.empty_patches.random_key()
del self.empty_patches[row, col]
return row, col
def runModel(self, periods):
def updateModelVariables():
self.population = len(agent_list)
self.average_price = gmean(self.transaction_prices)
self.transaction_prices = []
for period in range(1, periods + 1):
self.growPatches()
agent_list = list(self.agent_dict.values())
random.shuffle(agent_list)
for agent in agent_list:
agent.move()
agent.harvest()
agent.trade()
agent.consume()
agent.checkAlive()
agent.reproduce()
agent.updateParams()
# data_aggregator.collectData(self, self.name,
# self.run, period)
updateModelVariables()
self.collectData(str(period))
if self.live_visual:
if period % self.GUI.every_t_frames == 0:
print("period", period, "population", self.population, sep = "\t")
self.GUI.parent.title("Sugarscape: " + str(period))
self.GUI.updatePatches()
self.GUI.moveAgents()
self.GUI.canvas.update()
if period == periods:
mem_usage = memory_usage(-1, interval=1)#, timeout=1)
print(period, "end memory usage before sync//collect:", mem_usage[0], sep = "\t")
self.data_dict.sync()
gc.collect()
mem_usage = memory_usage(-1, interval=1)#, timeout=1)
print(period, "end memory usage after sync//collect:", mem_usage[0], sep = "\t")
def growPatches(self):
for i in self.patch_dict:
for patch in self.patch_dict[i].values():
if patch.Q < patch.maxQ:
patch.Q += 1
def collectData(self, period):
def collectAgentAttributes():
temp_dict={}
for attribute in self.agent_attributes:
temp_dict[attribute] = []
for ID, agent in self.agent_dict.items():
for attribute in self.agent_attributes:
temp_dict[attribute].append(getattr(agent, attribute))
for attribute, val in temp_dict.items():
self.data_dict[attribute][period] = np.mean(val)
def collectModelAttributes():
for attribute in self.model_attributes:
self.data_dict[attribute][period] = getattr(self, attribute)
collectAgentAttributes()
collectModelAttributes()
|
# coding=utf-8
import time
import os
class CreateID:
def __init__(self, rpapp):
self.rpapp = rpapp
def create_docs(self):
driver = self.rpapp.driver
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='ti'])[1]/following::button[6]").click()
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='RUS'])[1]/following::td[1]").click()
time.sleep(1)
driver.find_element_by_xpath('//button[text()="Create Identity Document"]').click()
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='Document Type'])[1]/following::div[3]").click()
driver.implicitly_wait(20)
driver.find_element_by_id("react-select-7-option-2").click()
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='File'])[1]/following::div[1]").click()
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='File'])[1]/preceding::input[1]").\
send_keys(os.getcwd() + "/111.png")
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='File'])[1]/following::button[1]").click()
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='Create Identity Document'])[1]/preceding::a[1]").\
click()
driver.find_element_by_xpath(
"//a[contains(@href, '/client')]").click()
|
import datetime
from moto.core import BaseBackend
from moto.core.utils import iso_8601_datetime
class Token(object):
def __init__(self, duration, name=None, policy=None):
now = datetime.datetime.now()
self.expiration = now + datetime.timedelta(seconds=duration)
self.name = name
self.policy = None
@property
def expiration_ISO8601(self):
return iso_8601_datetime(self.expiration)
class AssumedRole(object):
def __init__(self, role_session_name, role_arn, policy, duration, external_id):
self.session_name = role_session_name
self.arn = role_arn
self.policy = policy
now = datetime.datetime.now()
self.expiration = now + datetime.timedelta(seconds=duration)
self.external_id = external_id
@property
def expiration_ISO8601(self):
return iso_8601_datetime(self.expiration)
class STSBackend(BaseBackend):
def get_session_token(self, duration):
token = Token(duration=duration)
return token
def get_federation_token(self, name, duration, policy):
token = Token(duration=duration, name=name, policy=policy)
return token
def assume_role(self, **kwargs):
role = AssumedRole(**kwargs)
return role
sts_backend = STSBackend()
|
def holaTodos(nombre1, nombre2):
print("Hola,", nombre2)
print("Hola,", nombre1)
holaTodos("Sebastián", "Felipe")
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from io import StringIO
import io
import os
import sys
import datetime
import argparse
import numpy as np # type: ignore
from onnx import defs, FunctionProto, helper, OperatorStatus
from onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN
from onnx.backend.test.case import collect_snippets
from onnx.backend.sample.ops import collect_sample_implementations
from typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple
import pprint
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run-onnx-ops",
help="Output ONNXOps.td.inc content to stdout.",
action="store_true",
default=False)
parser.add_argument("--dry-run-op-build-table",
help="Output OpBuildTable.inc content to stdout.",
action="store_true",
default=False)
parser.add_argument("--check-operation-version",
help="check whether the imported onnx package has new operation or "
" newer version of operation compared with version stored in version_dicts",
action="store_true",
default=False)
args = parser.parse_args()
check_operation_version = args.check_operation_version
# Record the version of each operation that is treated as the current version.
# To check whether the onnx package being used has newer version operation,
# run this script with --check-operation-version flag.
# Update this dictionary when a newer version is implemented
# TODO: how to keep the old version
version_dict = {'Abs': 13,
'Acos': 7,
'Acosh': 9,
'Adagrad': 1,
'Adam': 1,
'Add': 13,
'And': 7,
'ArgMax': 13,
'ArgMin': 13,
'ArrayFeatureExtractor': 1,
'Asin': 7,
'Asinh': 9,
'Atan': 7,
'Atanh': 9,
'AveragePool': 11,
'BatchNormalization': 9,
'Binarizer': 1,
'BitShift': 11,
'Cast': 13,
'CastMap': 1,
'CategoryMapper': 1,
'Ceil': 13,
'Celu': 12,
'Clip': 13,
'Compress': 11,
'Concat': 13,
'ConcatFromSequence': 11,
'Constant': 13,
'ConstantOfShape': 9,
'Conv': 11,
'ConvInteger': 10,
'ConvTranspose': 11,
'Cos': 7,
'Cosh': 9,
'CumSum': 11,
'DepthToSpace': 13,
'DequantizeLinear': 13,
'Det': 11,
'DictVectorizer': 1,
'Div': 13,
'Dropout': 13,
'DynamicQuantizeLinear': 11,
'Einsum': 12,
'Elu': 6,
'Equal': 13,
'Erf': 13,
'Exp': 13,
'Expand': 13,
'EyeLike': 9,
'FeatureVectorizer': 1,
'Flatten': 13,
'Floor': 13,
'GRU': 7,
'Gather': 13,
'GatherElements': 13,
'GatherND': 13,
'Gemm': 13,
'GlobalAveragePool': 1,
'GlobalLpPool': 2,
'GlobalMaxPool': 1,
'Gradient': 1,
'Greater': 13,
'GreaterOrEqual': 12,
'HardSigmoid': 6,
'Hardmax': 13,
'Identity': 13,
'If': 13,
'Imputer': 1,
'InstanceNormalization': 6,
'IsInf': 10,
'IsNaN': 13,
'LRN': 13,
'LSTM': 7,
'LabelEncoder': 2,
'LeakyRelu': 6,
'Less': 13,
'LessOrEqual': 12,
'LinearClassifier': 1,
'LinearRegressor': 1,
'Log': 13,
'LogSoftmax': 13,
'Loop': 13,
'LpNormalization': 1,
'LpPool': 11,
'MatMul': 13,
'MatMulInteger': 10,
'Max': 13,
'MaxPool': 12,
'MaxRoiPool': 1,
'MaxUnpool': 11,
'Mean': 13,
'MeanVarianceNormalization': 13,
'Min': 13,
'Mod': 13,
'Momentum': 1,
'Mul': 13,
'Multinomial': 7,
'Neg': 13,
'NegativeLogLikelihoodLoss': 13,
'NonMaxSuppression': 11,
'NonZero': 13,
'Normalizer': 1,
'Not': 1,
'OneHot': 11,
'OneHotEncoder': 1,
'Or': 7,
'PRelu': 9,
'Pad': 13,
'Pow': 13,
'QLinearConv': 10,
'QLinearMatMul': 10,
'QuantizeLinear': 13,
'RNN': 7,
'RandomNormal': 1,
'RandomNormalLike': 1,
'RandomUniform': 1,
'RandomUniformLike': 1,
'Range': 11,
'Reciprocal': 13,
'ReduceL1': 13,
'ReduceL2': 13,
'ReduceLogSum': 13,
'ReduceLogSumExp': 13,
'ReduceMax': 13,
'ReduceMean': 13,
'ReduceMin': 13,
'ReduceProd': 13,
'ReduceSum': 13,
'ReduceSumSquare': 13,
'Relu': 13,
'Reshape': 13,
'Resize': 13,
'ReverseSequence': 10,
'RoiAlign': 10,
'Round': 11,
'SVMClassifier': 1,
'SVMRegressor': 1,
'Scaler': 1,
'Scan': 11,
'Scatter': 11,
'ScatterElements': 13,
'ScatterND': 13,
'Selu': 6,
'SequenceAt': 11,
'SequenceConstruct': 11,
'SequenceEmpty': 11,
'SequenceErase': 11,
'SequenceInsert': 11,
'SequenceLength': 11,
'Shape': 13,
'Shrink': 9,
'Sigmoid': 13,
'Sign': 13,
'Sin': 7,
'Sinh': 9,
'Size': 13,
'Slice': 13,
'Softmax': 13,
'SoftmaxCrossEntropyLoss': 13,
'Softplus': 1,
'Softsign': 1,
'SpaceToDepth': 13,
#'Split': 13,
'Split': 11,
'SplitToSequence': 11,
'Sqrt': 13,
#'Squeeze': 13,
'Squeeze': 11,
'StringNormalizer': 10,
'Sub': 13,
'Sum': 13,
'Tan': 7,
'Tanh': 13,
'TfIdfVectorizer': 9,
'ThresholdedRelu': 10,
'Tile': 13,
'TopK': 11,
'Transpose': 13,
'TreeEnsembleClassifier': 1,
'TreeEnsembleRegressor': 1,
'Unique': 11,
#'Unsqueeze': 13,
'Unsqueeze': 11,
'Upsample': 10,
'Where': 9,
'Xor': 7,
'ZipMap': 1}
# Manual specification of attribute defaults.
special_attr_defaults = dict([
# ("AveragePool.kernel_shape", ('ints', '{}')),
# ("MaxPool.kernel_shape", ('ints', '{}')),
# ("Cast.to", ('int', '0')),
# ("Concat.axis", ('int', '0')),
# ("Conv.group", ('int', '1')),
# ("Unsqueeze.axes", ('ints', '{}')),
# ("RNN.activation_alpha", ('floats', '{}')),
# ("RNN.activation_beta", ('floats', '{}')),
])
# Manual specification of attribute type.
special_attr_types = dict([("Cast.to", 'type')])
# Special operation importing handlers.
special_op_handler = dict([
("BatchNormalization", "ImportNodeBatchNormalization"),
("Dropout", "ImportNodeDropout"),
("Cast", "ImportNodeCast"),
("MaxPool", "ImportNodeMaxPool"),
("Pad", "ImportNodePad"),
("Slice", "ImportNodeSlice"),
#("Transpose", "ImportNodeTranspose")
])
# Operations supporting shape inference.
OpsWithShapeInference=[
'Abs',
'Add',
'And',
'Atan',
'AveragePool',
'Cast',
'Concat',
'Constant',
'ConstantOfShape',
'Conv',
'ConvInteger',
'ConvTranspose',
'Cos',
'Cosh',
'DequantizeLinear',
'Div',
'Dropout',
'DynamicQuantizeLinear',
'Elu',
'Erf',
'Exp',
'Expand',
'Flatten',
'GRU',
'Gather',
'Gemm',
'GlobalAveragePool',
'GlobalLpPool',
'GlobalMaxPool',
'HardSigmoid',
'Identity',
'LSTM',
'LeakyRelu',
'Less',
'Log',
'MatMul',
'Max',
'Min',
'Mul',
'Neg',
'OneHotEncoder',
'Or',
'Pad',
'Pow',
'PRelu',
'QLinearConv',
'QuantizeLinear',
'QLinearMatMul',
'RNN',
'Reciprocal',
'ReduceMax',
'ReduceMean',
'ReduceMin',
'ReduceProd',
'ReduceSum',
'Relu',
'Reshape',
'Scaler',
'Selu',
'Shape',
'Sigmoid',
'Sign',
'Sin',
'Sinh',
'Size',
'Slice',
'Softmax',
'Softplus',
'Softsign',
'Split',
'Sqrt',
'Squeeze',
'Sub',
'Sum',
'Tan',
'Tanh',
'Tile',
'Transpose',
'Unsqueeze',
'Xor',
'Loop',
]
# Operations supporting canonicalization.
OpsWithCanonicalizer = ['Add', 'Constant', 'Identity', 'Gemm', 'Cast', 'Transpose',
'Dropout', 'Shape', 'Size', 'GlobalAveragePool',
'GlobalMaxPool', 'Squeeze', 'Unsqueeze']
OpsWithHelpers = {
"Loop": """
mlir::Operation::result_range v_final();
mlir::Operation::result_range scan_outputs();
""",
"Scan": """
mlir::Operation::operand_range v_initial();
mlir::Operation::result_range v_final();
mlir::Operation::operand_range scan_inputs();
mlir::Operation::result_range scan_outputs();
"""
}
# Interface for special handling of type inference
# The common code are put into get_type_inference_func
OpsWithResultTypeInference = {
"Constant":
'''if (auto attr = valueAttr()) {
resultTypes.push_back(attr.getType());
} else if (auto attr = sparse_valueAttr()) {
resultTypes.push_back(attr.getType());
}''',
"Cast":
'''auto builder = mlir::OpBuilder(getContext());
resultTypes.push_back(mlir::UnrankedTensorType::get(to()));''',
"ConstantOfShape":
'''if (auto attr = valueAttr()) {
resultTypes.push_back(mlir::UnrankedTensorType::get(
attr.getType().cast<ShapedType>().getElementType()));
} else {
resultTypes.push_back(mlir::UnrankedTensorType::get(
FloatType::getF32(getContext())));
}'''
}
# Add an Op in this list if the Op needs result type deduction which is required
# when writing declarative rewriting rules. Deduced type is always
# an UnrankedTensorType whose element type is the same as the first operand's
# element type.
#
# Currenlty, there are only two build methods generated:
# - one with operands and attributes having a separate parameter, and
# - one with operands and attributes having aggregated parameters.
custom_builder_unranked_ops_list = ['Abs', 'Exp', 'ReduceSum', 'ReduceSumSquare',
'Pad', 'Sqrt', 'Neg', 'Unsqueeze', 'Softmax',
'ReduceMax', 'ReduceLogSum', 'Squeeze',
'Identity', 'Split']
# Custom builder op list for operations with broadcast; we can deduce the right
# output type, no need to leave it undef as in the above list.
# Ops must have two operands, not one, not three... And there shall be two.
# TODO: handle variadic ops omitted here: Max, Min, Min, Sum.
custom_builder_broadcast_ops_list = ['Add', 'And', 'Div', 'Equal', 'Greater',
'Less', 'Mul', 'Or', 'Pow', 'Sub', 'Xor']
# union of both
custom_builder_ops_list = custom_builder_unranked_ops_list + custom_builder_broadcast_ops_list
#a dictionary to add any special definition for an operation
custom_definition_misc = dict([ ('Constant',
''' let builders = [
OpBuilder<(ins "Attribute":$sparse_value, "Attribute":$value), [{
if (value) {
auto tensorType = value.getType();
build($_builder, $_state, tensorType, sparse_value, value,
FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());
} else {
auto tensorType = sparse_value.getType();
build($_builder, $_state, tensorType, sparse_value, value,
FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());
}
}]>
];'''),
('Cast',
''' let builders = [
OpBuilder<(ins "Value":$input, "TypeAttr":$to), [{
auto resultType = mlir::UnrankedTensorType::get(to.getValue());
build($_builder, $_state, resultType, input, to);
}] >
];'''
)])
onnx_types = (
'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',
'float', 'double', 'complex64', 'complex128', 'string'
)
tblgen_types = ('AnyI1', 'AnyI8', 'AnyI16', 'AnyI32', 'AnyI64', 'BF16', 'F16', 'F32', 'F64',
'Complex<F32>', 'Complex<F64>', 'StringType'
)
MAX_NUM_TYPES=20
def should_render_domain(domain): # type: (Text) -> bool
return True
def display_attr_type(v): # type: (OpSchema.AttrType) -> Text
assert isinstance(v, OpSchema.AttrType)
s = Text(v)
s = s[s.rfind('.') + 1:].lower()
if s[-1] == 's':
s = 'list of ' + s
return s
def get_unique_output_name(schema, name):
for input in schema.inputs:
if input.name == name:
return 'out_' + name
return name
def onnx_attr_type_to_mlir_attr_type(t):
onnx_attr_type = Text(t)
onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()
if onnx_attr_type == 'int':
mlir_attr_type = 'SI64Attr'
elif onnx_attr_type == 'float':
mlir_attr_type = 'F32Attr'
elif onnx_attr_type == 'ints':
mlir_attr_type = 'I64ArrayAttr'
elif onnx_attr_type == 'floats':
mlir_attr_type = 'F32ArrayAttr'
elif onnx_attr_type == "string":
mlir_attr_type = 'StrAttr'
elif onnx_attr_type == "strings":
mlir_attr_type = 'StrArrayAttr'
elif onnx_attr_type == 'type':
mlir_attr_type = 'TypeAttr'
else:
mlir_attr_type = 'AnyAttr'
#TODO: tensor and sparse tensor
return mlir_attr_type
#TODO: any better way to do this.
def tblgen_attr_type_to_cpp_type(t):
if 'I64Attr' in t:
cpp_type = 'IntegerAttr'
elif 'F32Attr' in t:
cpp_type = 'FloatAttr'
elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:
cpp_type = 'ArrayAttr'
elif 'StrAttr' in t:
cpp_type = 'StringAttr'
elif 'strings' in t:
cpp_type = 'ArrayAttr'
else:
cpp_type = 'Attribute'
return cpp_type
def tblgen_operand_type_to_cpp_type(op_type):
if op_type.startswith('Variadic'):
mytype = 'ValueRange'
else:
mytype = 'Value'
return mytype
def np_type_to_tblgen_attr_type(tstr):
index = -1
for i in range(len(onnx_types)):
if onnx_types[i] in tstr:
index = i
break
if index == -1:
return None
else:
return tblgen_types[i]
def get_tblgen_type_index(type_str):
return tblgen_types.index(type_str)
#the possible data structures are tensor, map and seq(tensor())
def get_data_structure_element(allowed_type_str):
structure_list = ['tensor', 'seq', 'map']
for structure in structure_list:
if allowed_type_str.startswith(structure) :
element = allowed_type_str.replace(
structure+'(', '', 1).replace(')', '', 1)
return (structure, element)
return (None, None)
def get_allowed_elem_types(schema, input):
#allowed_types_str = None
# return allowed_types_str
# TODO: enable type constraints.
if input.typeStr :
tstr = input.typeStr
structure, element = get_data_structure_element(tstr);
# In case the type is directly specified
if structure and element :
t = np_type_to_tblgen_attr_type(element)
if t == None :
return allowed_structure, None
else :
return structure, [t]
else :
return None
if schema.type_constraints:
for type_constraint in schema.type_constraints:
if type_constraint.type_param_str != tstr :
continue
allowed_type_list=[]
allowedTypes = type_constraint.allowed_type_strs
allowed_structure = None
for allowedType in allowedTypes:
structure, element = get_data_structure_element(allowedType);
if structure == None or element == None:
return None, None
if allowed_structure != None and allowed_structure != structure :
return None, None
allowed_structure = structure
t = np_type_to_tblgen_attr_type(element)
if t == None :
return allowed_structure, None
if not t in allowed_type_list :
allowed_tyoe_list = allowed_type_list.append(t)
return allowed_structure,allowed_type_list
return None, None
def inc_indent(indent=None):
return "" if indent is None else indent + ' ' * 2
def dec_indent(indent):
return indent[:-2]
def join_args(args):
return ", ".join(args)
def get_operands_or_results(schema, type_str_dict, is_input):
value_list = schema.inputs if is_input else schema.outputs
if not value_list:
return OrderedDict()
def any_type_of(types):
assert isinstance(types, list)
if len(types) == 1:
return types[0]
else:
return "AnyTypeOf<[{}]>".format(", ".join(types))
name_to_types = OrderedDict()
for i, value in enumerate(value_list):
types = get_onnx_mlir_types(schema, type_str_dict, value)
'''
structure, elem_types = get_allowed_elem_types(schema, type_str_dict, value)
if structure == 'tensor' :
if elem_types is None:
types = ["AnyMemRef", "AnyTensor"]
else:
elem_types_str = ','.join(elem_types)
types = ["TensorOf<[{}]>", "MemRefOf<[{}]>"]
types = list(map(lambda x: x.format(elem_types_str), types))
elif structure == 'seq' :
# Seq is not supported yet.
# Use of TensorOf<[AnyTensor]> as a placeholder for tablegen.
# When the Operation is used, warning/error will be generated at runtime.
if elem_types is None:
types = ["AnyMemRef", "TensorOf<[AnyTensor]>"]
else:
elem_types_str = ','.join(elem_types)
types = ["TensorOf<[TensorOf<[{}]>]>", "MemRefOf<[{}]>"]
types = list(map(lambda x: x.format(elem_types_str), types))
elif structure == 'map' :
# Map is not supported yet.
# Use of TupleOf as a placeholder for tablegen.
# When the Operation is used, warning/error will be generated at runtime.
if elem_types is None:
types = ["AnyMemRef", "TupleOf<[AnyTensor]>"]
else:
elem_types_str = ','.join(elem_types)
types = ["TupleOf<[TensorOf<[{}]>]>", "MemRefOf<[{}]>"]
types = list(map(lambda x: x.format(elem_types_str), types))
else:
types = ["AnyMemRef", "AnyTensor"]
'''
if OpSchema.FormalParameterOption.Optional == value.option:
types.append("NoneType")
elif OpSchema.FormalParameterOption.Variadic == value.option:
if value.isHomogeneous:
types = ["Variadic<{}>".format(any_type_of(types))]
else:
#TODO handle(variadic, heterogeneous) "
types = ["Variadic<{}>".format(any_type_of(types))]
sys.stderr.write("warning: (variadic, heterogeneous) for " + schema.name +
' ' + value.name + "\n")
# Since output name can coincide with that of an input, we explicitly
# append a suffix "_out" to such names for disambiguation.
if is_input:
value_name = value.name
else:
value_name = get_unique_output_name(schema, value.name)
name_to_types[value_name] = any_type_of(types)
return name_to_types
def get_attrs(schema):
def get_attr_type_optional(attr_type):
return 'OptionalAttr<{}>'.format(
onnx_attr_type_to_mlir_attr_type(attr_type))
def get_attr_type_with_default(attr_type, attr_default):
return 'DefaultValuedAttr<{}, "{}">'.format(
onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)
if not schema.attributes:
return OrderedDict()
name_to_type = OrderedDict()
for _, attr in sorted(schema.attributes.items()):
if attr.type == OpSchema.AttrType.GRAPH:
continue
qualified_attr_name = "{}.{}".format(schema.name, attr.name)
if qualified_attr_name in special_attr_defaults:
name_to_type[attr.name] = get_attr_type_with_default(
*special_attr_defaults[qualified_attr_name])
if qualified_attr_name in special_attr_types:
name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(
special_attr_types[qualified_attr_name])
# option holds either required or default value
elif attr.required:
name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(
attr.type)
elif attr.default_value.name:
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if (len(formatted) > 10):
formatted = str("({:e})".format(value))
return formatted
elif isinstance(
value,
(bytes, bytearray)) and sys.version_info[0] == 3:
return str(value.decode('utf-8'))
return str(value)
default_value = helper.get_attribute_value(attr.default_value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
default_value_str = '{}'.format(default_value)
default_value_str = default_value_str.replace('[', '{', 1)
default_value_str = default_value_str.replace(']', '}', 1)
if Text(attr.type) == "AttrType.STRINGS":
default_value_str = default_value_str.replace("'", '\\"')
else:
default_value_str = default_value_str.replace("'", '')
else:
default_value = format_value(default_value)
default_value_str = default_value
name_to_type[attr.name] = get_attr_type_with_default(
attr.type, default_value_str)
else:
name_to_type[attr.name] = get_attr_type_optional(attr.type)
return name_to_type
def get_numberof_list(mylist):
expected_num = len(mylist)
for element in mylist :
if OpSchema.FormalParameterOption.Variadic == element.option:
expected_num = -1
return expected_num
def get_output_type_mapping(schema):
mapping=[]
for output in schema.outputs :
#if only one type is allowed, just set that
structure, allowed_elem_types = get_allowed_elem_types(schema, output)
if allowed_elem_types != None and len(allowed_elem_types) == 1 :
mapping.append(str(get_tblgen_type_index(allowed_elem_types[0])))
continue
#map the type string
if output.typeStr :
tstr = output.typeStr
found = False
for i, input in enumerate(schema.inputs):
if input.typeStr and input.typeStr == tstr:
mapping.append(str(i+MAX_NUM_TYPES))
found = True
break
if found:
continue
#unknown output type
mapping.append(str(-1))
return mapping
def get_numberof_inout(s, indent, schema):
expected_num_operands = get_numberof_list(schema.inputs)
indent = inc_indent(indent)
s += indent + "static int getNumberOfOperands() {\n"
indent = inc_indent(indent)
s += indent + "return {};\n".format(expected_num_operands)
indent = dec_indent(indent)
s += indent + "}\n"
expected_num_results = get_numberof_list(schema.outputs)
s += indent + "static int getNumberOfResults() {\n"
indent = inc_indent(indent)
s += indent + "return {};\n".format(expected_num_results)
indent = dec_indent(indent)
s += indent + "}\n"
s += indent + "static std::vector<int> getTypeMap() {\n"
mapping = get_output_type_mapping(schema)
indent = inc_indent(indent)
s += indent + "return {" + ",".join(mapping) + "};\n"
indent = dec_indent(indent)
s += indent + "}\n"
return s
def get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):
cpp_name_to_idx_literal = "{" + ", ".join([
"{{\"{}\", {}}}".format(*name_to_idx)
for name_to_idx in const_operands_name_to_idx
]) + "}"
#s += indent + "let extraClassDeclaration = [{\n"
indent = inc_indent(indent)
s += indent + "std::map<std::string, size_t> promotableConstOperands() {\n"
indent = inc_indent(indent)
s += indent + "return {};\n".format(cpp_name_to_idx_literal)
indent = dec_indent(indent)
s += indent + "}\n"
#indent = dec_indent(indent)
#s += indent + "}];\n"
return s
def get_type_inference_func(s, indent, type_inference_code):
indent = inc_indent(indent)
s += indent + "std::vector<mlir::Type> resultTypeInference() {" + "\n"
indent = inc_indent(indent)
s += indent + "std::vector<mlir::Type> resultTypes;" + "\n"
s += indent + type_inference_code + '\n'
s += indent + "return resultTypes;" + "\n"
indent = dec_indent(indent)
s += indent + "}" + "\n"
indent = dec_indent(indent)
return s
def parse_type_str(allowedType):
# AnyI may be used for uint because the onnx_mlir is not generating uint output
# This will be fixed later and UI will be replace AnyI
onnx_to_mlir_type_dict = { '(': '<[',
')': ']>',
'tensor' : 'TensorOf',
'seq' : 'SeqOf',
'map' : 'TupleOf',
'bool': 'I1',
#'uint8' : 'AnyI8',
#uint16' : 'AnyI16',
#uint32' : 'AnyI32',
#uint64' : 'AnyI64',
'uint8' : 'UI8',
'uint16' : 'UI16',
'uint32' : 'UI32',
'uint64' : 'UI64',
'int8' : 'I8',
'int16' : 'I16',
'int32' : 'I32',
'int64' : 'I64',
'float16' : 'F16',
'bfloat16' : 'BF16',
'float' : 'F32',
'double' : 'F64',
'unkown' : 'BF16',
'complex64' : 'Complex<F32>',
'complex128' : 'Complex<F64>',
'string' : 'StringType'}
# Apply substitutions in decreasing order of key-length, so that float16 is replaced
# before float, and uint16 is replaced before int16, etc.
mapping = list(onnx_to_mlir_type_dict.items())
mapping.sort(key=lambda pair:len(pair[0]), reverse=True)
for key, item in mapping:
allowedType = allowedType.replace(key, item)
return allowedType
def parse_a_type_constraint(constraint):
allowedTypes = constraint.allowed_type_strs
mlirTypes = []
for allowedType in allowedTypes:
mlirType = parse_type_str(allowedType)
mlirTypes.append(mlirType)
# Remove redundant and sort.
# However onnx keeps a consitently meaningful order
# There is no redundancy as long as each onnx type is mapped uniquely
# mlirTypes = sorted(list(set(mlirTypes)))
# MemRef is always needed
mlirTypes.append("AnyMemRef")
return mlirTypes
def parse_type_constraints(schema):
type_str_dict = dict()
for type_constraint in schema.type_constraints:
type_str_dict[type_constraint.type_param_str] = parse_a_type_constraint(type_constraint)
return type_str_dict
def get_onnx_mlir_types(schema, type_str_dict, input):
if input.typeStr :
if not input.typeStr in type_str_dict :
# some arguments use type description directly
# instead of constraint
return [parse_type_str(input.typeStr), "AnyMemRef"]
else :
return type_str_dict[input.typeStr]
else :
print('No typeStr ', schema.name)
return []
def gen_op_def(schema):
indent = inc_indent()
s = 'def ONNX{0}Op:ONNX_Op<"{0}",\n'.format(schema.name)
regions = OrderedDict()
for _, attr in sorted(schema.attributes.items()):
if attr.type == OpSchema.AttrType.GRAPH:
if attr.required:
regions[attr.name] = "SizedRegion<1>"
else:
regions[attr.name] = "AnyRegion"
# Generate decl for op traits.
traits = ["NoSideEffect"]
# OpsWithShapeInference:
# Now the ShapeInference traits are added to all operation
# Dummy implementations are added to ONNXOps.cpp
# Error will be report if these operations are encountered at runtime
traits.append("DeclareOpInterfaceMethods<ShapeInferenceOpInterface>")
if schema.name in OpsWithResultTypeInference.keys():
traits.append("OpInterface<\"ResultTypeInferenceOpInterface\">")
if len(regions):
traits.append("OpInterface<\"HasOnnxSubgraphOpInterface\">")
s += inc_indent(indent) + '[{}]> {{\n'.format(join_args(traits))
# Generate decl for canonicalizer.
indent = inc_indent(indent)
if schema.name in OpsWithCanonicalizer:
s += indent + 'let hasCanonicalizer = 1;\n'
# Generate decl for summary.
s += indent + 'let summary = "ONNX {} operation";\n'.format(schema.name)
# Generate description.
s += indent + 'let description = [{\n'
if schema.doc:
lines = schema.doc.lstrip().splitlines()
for line in lines:
escaped_line = line.replace('"', '\\"')\
.replace('}]', '\\}\\]')
s += indent + '"{}"\n'.format(escaped_line)
s += indent + '}];\n'
# handle the type constraint for input and output
# parse type constraint into onnx-mlir type string list
type_str_dict = parse_type_constraints(schema)
# Generate ins (consisting of operands and attributes).
ins = get_operands_or_results(schema, type_str_dict, is_input=True)
ins.update(get_attrs(schema))
ins_strs = ["{1}:${0}".format(*i) for i in ins.items()]
s += indent + 'let arguments = (ins {});\n'.format(
(',\n' + inc_indent(indent)).join(ins_strs))
# Generate outs (operation results).
outs = get_operands_or_results(schema, type_str_dict, is_input=False)
outs_strs = ["{1}:${0}".format(*i) for i in outs.items()]
s += indent + 'let results = (outs {});\n'.format(
(',\n' + inc_indent(indent)).join(outs_strs))
regions_strs = ["{1}:${0}".format(*i) for i in regions.items()]
if len(regions):
s += indent + 'let regions = (region {});\n'.format(
(',\n' + inc_indent(indent)).join(regions_strs))
# custom_builder_broadcast_ops_list
# add custom builders
# use element type of the first operand to construct an UnrankedTensorType for the output.
if schema.name in custom_builder_ops_list:
if len(ins) == 0:
raise RuntimeWarning(
"warning: not generate custom build methods for " +
schema.name + " since it does not have operands.")
else:
s += indent + 'let builders = [\n'
# Custom builders with operands and attributes having a separate parameter.
# E.g. OpBuilder<(ins "Value":$X, "Value":$Y, "Attribute":$A), [{}]>
indent = inc_indent(indent)
s += indent + 'OpBuilder<(ins '
operands_dict = get_operands_or_results(schema, type_str_dict, is_input=True)
attrs_dict = get_attrs(schema)
s += ', '.join('"{}":${}'.format(tblgen_operand_type_to_cpp_type(ty),
name) for name, ty in operands_dict.items())
if operands_dict and attrs_dict:
s += ', '
s += ', '.join('"{}":${}'.format(tblgen_attr_type_to_cpp_type(ty),
name) for name, ty in attrs_dict.items())
s += '), [{\n'
indent = inc_indent(indent)
# Get output type from first operand's type.
first_operand_name = list(ins.items())[0][0]
build_type_name = ''
if schema.name in custom_builder_broadcast_ops_list:
second_operand_name = list(ins.items())[1][0]
s += indent + 'auto lhsTy = {}.getType();\n'. \
format(first_operand_name)
s += indent + 'auto rhsTy = {}.getType();\n'. \
format(second_operand_name)
s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy);\n'
s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\n';
s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\n';
s += indent + indent + 'elementType = {}'.format(first_operand_name) + \
'.getType().cast<ShapedType>().getElementType();\n';
s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\n'
s += indent + '}\n';
build_type_name = 'elementType'
else:
s += indent + 'auto elementType = {}'.format(first_operand_name) + \
'.getType().cast<ShapedType>().getElementType();\n'
build_type_name = 'UnrankedTensorType::get(elementType)'
s += indent + 'build($_builder, $_state, {}'.format(build_type_name)
for name, _ in ins.items():
s += ', ' + name
s += ');\n'
indent = dec_indent(indent)
s += indent + '}]>,\n'
# Custom builders with all operands and attributes having aggregate parameters.
# E.g. OpBuilder<(ins "ValueRange operands,
# ArrayRef<NamedAttribute> attributes", [{}]>'
s += indent + 'OpBuilder<(ins ' + \
'"ValueRange":$operands, "ArrayRef<NamedAttribute>":$attributes), [{\n'
indent = inc_indent(indent)
if schema.name in custom_builder_broadcast_ops_list:
s += indent + 'auto lhsTy = operands[0].getType();\n'
s += indent + 'auto rhsTy = operands[1].getType();\n'
s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy);\n'
s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\n';
s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\n';
s += indent + indent + 'elementType = operands[0]' + \
'.getType().cast<ShapedType>().getElementType();\n';
s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\n'
s += indent + '}\n';
else:
s += indent + 'auto elementType = operands[0].getType().' + \
'cast<ShapedType>().getElementType();\n'
s += indent + 'std::vector<mlir::Type> outputTypes;\n'
s += indent + 'outputTypes.emplace_back({});\n'.format(build_type_name)
s += indent + 'build($_builder, $_state, outputTypes, operands, attributes);\n'
indent = dec_indent(indent)
s += indent + '}]>'
s += '\n' + indent + '];\n'
# Generate extracClassDeclaration.
s += indent + "let extraClassDeclaration = [{\n"
#indent = inc_indent(indent)
# Generate input/output number.
s = get_numberof_inout(s, indent, schema)
if schema.name in OpsWithResultTypeInference:
s = get_type_inference_func(
s, indent, OpsWithResultTypeInference[schema.name])
if schema.name in OpsWithHelpers:
s += OpsWithHelpers[schema.name]
if len(regions):
s += indent + "int64_t getSubgraphRegionIdx(const std::string& name) {\n"
indent = inc_indent(indent)
for idx, region_name in enumerate(regions.keys()):
s += indent + "if (name == \"{}\") return {};\n".format(region_name, idx)
s += indent + "llvm_unreachable(\"region with the specified name does not exist\");\n"
indent = dec_indent(indent)
s += indent + "}\n"
s += indent + '}];\n'
if ( schema.name in custom_definition_misc) :
s += custom_definition_misc[schema.name] + '\n'
s += '}\n\n'
return s
"""
special cases:
* Split: attr split default value: sizeof(output1) namely 1
* Conv: attr dilations default value is {num_dim of first input - 2, 1}
* Conv: attr kernel_shape type is ints
* Transpose: attr perm default value is {} empty int list
"""
def gen_op_importer(schema, file):
indent = inc_indent()
s = indent + 'import_handler_map_["' + schema.name +'"] = \n '
expected_num_operands = len(schema.inputs)
expected_num_results = len(schema.outputs)
for input in schema.inputs:
if OpSchema.FormalParameterOption.Variadic == input.option:
expected_num_operands = -1
for output in schema.outputs:
if OpSchema.FormalParameterOption.Variadic == output.option:
expected_num_results = -1
handler_func = special_op_handler.get(
schema.name, "buildOperation<mlir::ONNX{}Op>".format(schema.name))
# Special handlers currently require expected num operands/results to be specified.
# TODO: remove special handlers.
args = ["node"]
"""
if expected_num_operands != -1 or expected_num_results != -1 or "buildOperation" not in handler_func:
args.append(
"/* expected_num_operands = */ {}".format(expected_num_operands))
args.append(
'/* expected_num_results = */ {}'.format(expected_num_results))
"""
s += inc_indent(indent) + '&onnx_mlir::detail::FrontendGenImpl::'
s += handler_func+';\n'
file.write(s)
def build_operator_schemas():
# domain -> support level -> name -> [schema]
index = defaultdict(lambda: defaultdict(lambda: defaultdict(
list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]
for schema in defs.get_all_schemas_with_history():
index[schema.domain][int(
schema.support_level)][schema.name].append(schema)
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas = list(
) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]
exsting_ops = set() # type: Set[Text]
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain):
continue
processed_supportmap = list()
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = list()
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions,
key=lambda s: s.since_version)
schema = versions[-1]
if schema.name in exsting_ops:
continue
if check_operation_version :
# Generate operation of the latest version of your onnx.
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
# Add checks against version_dict
if schema.name not in version_dict :
print("Check-operation-version: Operation {} is new with version {}"
.format(schema.name, schema.since_version))
elif schema.since_version > version_dict[schema.name]:
print("Check-operation-version: Operation {}"
.format(schema.name)+
" has a newer version {} over old version {}"
.format(schema.since_version, version_dict[schema.name]))
else:
# Generate operation according to the version in version_dict.
if schema.name not in version_dict :
continue
found = False
for schema in reversed(versions):
# Check the version number against the version_dict
if schema.since_version == version_dict[schema.name]:
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
found = True
break
if not found:
print("Your onnx installation may be too old. "
"The desired version for operation {} is not found.".format(
schema.name))
sys.exit()
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
return operator_schemas
def main(args): # type: (Type[Args]) -> None
curr_utc_time = datetime.datetime.now(
datetime.timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
autogen_warning = (
'//********************************************************\n'
'// Do not modify this file directly.\n'
'// This file is automatically generated via script.\n'
'// Details can be found in docs/ImportONNXDefs.md .\n'
'//********************************************************\n\n')
autogen_warning = autogen_warning.format(curr_utc_time)
op_def = args.op_def
op_def.write(autogen_warning)
op_importer = args.op_importer
op_importer.write(autogen_warning)
version_dict = dict()
for domain, supportmap in build_operator_schemas():
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
if check_operation_version:
version_dict[schema.name] = schema.since_version
else:
gen_op_importer(schema, op_importer)
r = gen_op_def(schema)
op_def.write(r)
if check_operation_version :
pprint.pprint(version_dict)
if __name__ == '__main__':
curr_dir = os.path.dirname(os.path.realpath(__file__))
class Args(object):
if args.dry_run_onnx_ops:
op_def = StringIO()
else:
op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')
op_def = io.open(op_def_file_path, 'w', newline='')
if args.dry_run_op_build_table:
op_importer = StringIO()
else:
op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')
op_importer = io.open(op_importer_file_path, 'w', newline='')
main(Args)
if args.dry_run_onnx_ops:
sys.stdout.write(Args.op_def.getvalue())
if args.dry_run_op_build_table:
sys.stdout.write(Args.op_importer.getvalue())
|
"""
Test for the SmartThings sensors platform.
The only mocking required is of the underlying SmartThings API object so
real HTTP calls are not initiated during testing.
"""
from pysmartthings import ATTRIBUTES, CAPABILITIES, Attribute, Capability
from homeassistant.components.sensor import DEVICE_CLASSES, DOMAIN as SENSOR_DOMAIN
from homeassistant.components.smartthings import sensor
from homeassistant.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
STATE_UNKNOWN,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_mapping_integrity():
"""Test ensures the map dicts have proper integrity."""
for capability, maps in sensor.CAPABILITY_TO_SENSORS.items():
assert capability in CAPABILITIES, capability
for sensor_map in maps:
assert sensor_map.attribute in ATTRIBUTES, sensor_map.attribute
if sensor_map.device_class:
assert (
sensor_map.device_class in DEVICE_CLASSES
), sensor_map.device_class
async def test_async_setup_platform():
"""Test setup platform does nothing (it uses config entries)."""
await sensor.async_setup_platform(None, None, None)
async def test_entity_state(hass, device_factory):
"""Tests the state attributes properly match the sensor types."""
device = device_factory("Sensor 1", [Capability.battery], {Attribute.battery: 100})
await setup_platform(hass, SENSOR_DOMAIN, devices=[device])
state = hass.states.get("sensor.sensor_1_battery")
assert state.state == "100"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == "%"
assert state.attributes[ATTR_FRIENDLY_NAME] == device.label + " Battery"
async def test_entity_three_axis_state(hass, device_factory):
"""Tests the state attributes properly match the three axis types."""
device = device_factory(
"Three Axis", [Capability.three_axis], {Attribute.three_axis: [100, 75, 25]}
)
await setup_platform(hass, SENSOR_DOMAIN, devices=[device])
state = hass.states.get("sensor.three_axis_x_coordinate")
assert state.state == "100"
assert state.attributes[ATTR_FRIENDLY_NAME] == device.label + " X Coordinate"
state = hass.states.get("sensor.three_axis_y_coordinate")
assert state.state == "75"
assert state.attributes[ATTR_FRIENDLY_NAME] == device.label + " Y Coordinate"
state = hass.states.get("sensor.three_axis_z_coordinate")
assert state.state == "25"
assert state.attributes[ATTR_FRIENDLY_NAME] == device.label + " Z Coordinate"
async def test_entity_three_axis_invalid_state(hass, device_factory):
"""Tests the state attributes properly match the three axis types."""
device = device_factory(
"Three Axis", [Capability.three_axis], {Attribute.three_axis: []}
)
await setup_platform(hass, SENSOR_DOMAIN, devices=[device])
state = hass.states.get("sensor.three_axis_x_coordinate")
assert state.state == STATE_UNKNOWN
state = hass.states.get("sensor.three_axis_y_coordinate")
assert state.state == STATE_UNKNOWN
state = hass.states.get("sensor.three_axis_z_coordinate")
assert state.state == STATE_UNKNOWN
async def test_entity_and_device_attributes(hass, device_factory):
"""Test the attributes of the entity are correct."""
# Arrange
device = device_factory("Sensor 1", [Capability.battery], {Attribute.battery: 100})
entity_registry = await hass.helpers.entity_registry.async_get_registry()
device_registry = await hass.helpers.device_registry.async_get_registry()
# Act
await setup_platform(hass, SENSOR_DOMAIN, devices=[device])
# Assert
entry = entity_registry.async_get("sensor.sensor_1_battery")
assert entry
assert entry.unique_id == device.device_id + "." + Attribute.battery
entry = device_registry.async_get_device({(DOMAIN, device.device_id)}, [])
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_update_from_signal(hass, device_factory):
"""Test the binary_sensor updates when receiving a signal."""
# Arrange
device = device_factory("Sensor 1", [Capability.battery], {Attribute.battery: 100})
await setup_platform(hass, SENSOR_DOMAIN, devices=[device])
device.status.apply_attribute_update(
"main", Capability.battery, Attribute.battery, 75
)
# Act
async_dispatcher_send(hass, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
# Assert
await hass.async_block_till_done()
state = hass.states.get("sensor.sensor_1_battery")
assert state is not None
assert state.state == "75"
async def test_unload_config_entry(hass, device_factory):
"""Test the binary_sensor is removed when the config entry is unloaded."""
# Arrange
device = device_factory("Sensor 1", [Capability.battery], {Attribute.battery: 100})
config_entry = await setup_platform(hass, SENSOR_DOMAIN, devices=[device])
# Act
await hass.config_entries.async_forward_entry_unload(config_entry, "sensor")
# Assert
assert not hass.states.get("sensor.sensor_1_battery")
|
#!/usr/bin/env python3
# pvoutput.py
#
# Simple library for uploading data to PVOutput.
import urllib.request
import urllib.parse
import urllib.error
import logging
import sys
logger = logging.getLogger(__name__)
class System:
"""Provides methods for direct uploading to PVOutput for set system."""
def __init__(self, api_key, system_id):
self.api_key = api_key
self.system_id = system_id
def add_output(self, data):
"""Add end of day output information. Data should be a dictionary with
parameters as described here:
http://pvoutput.org/help.html#api-addoutput ."""
url = "http://pvoutput.org/service/r2/addoutput.jsp"
self.__make_request(url, data)
def add_status(self, data):
"""Add live output data. Data should contain the parameters as described
here: http://pvoutput.org/help.html#api-addstatus ."""
url = "http://pvoutput.org/service/r2/addstatus.jsp"
self.__make_request(url, data)
# Could add methods like 'get_status'
def __make_request(self, url, data):
logger.debug('Making request: %s, %s', url, data)
data = urllib.parse.urlencode(data).encode('ascii')
req = urllib.request.Request(url, data)
req.add_header('X-Pvoutput-Apikey', self.api_key)
req.add_header('X-Pvoutput-SystemId', self.system_id)
try:
f = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
logger.error('Upload failed: %s', e.read().decode())
except urllib.error.URLError as e:
logger.error('Upload failed: %s', e)
else:
with f:
logger.debug('Response: %s', f.read().decode())
def __str__(self):
return self.system_id.__str__()
def __repr__(self):
return self.system_id.__repr__()
def __hash__(self):
return self.system_id.__hash__()
def __eq__(self, other):
return self.system_id == other.system_id
# Test code
if __name__ == '__main__':
import time
import configparser
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
data = {
'd': time.strftime('%Y%m%d'),
't': time.strftime('%H:%M'),
'v1': 0, # Energy today
'v2': 0, # Output power
'v5': 20.0, # Temperature
'v6': 230.0 # Grid voltage
}
config = configparser.ConfigParser()
config.read_file(open('samil_upload.ini'))
# Assumes a default API key and system ID
api_key = config['DEFAULT']['API key']
system_id = config['DEFAULT']['System ID']
pv = System(api_key, system_id)
pv.add_status(data)
|
from django.conf.urls import include, url # pragma: no cover
from django.contrib import admin # pragma: no cover
from weddingServices import views as ws_views # pragma: no cover
from django.contrib.auth import views as auth_views # pragma: no cover
urlpatterns = [ # pragma: no cover
# Examples:
# url(r'^$', 'weddingPlanner.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', auth_views.login, {'template_name': 'weddingServices/login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/weddingServices'}, name='logout'),
url(r'^signup/$', ws_views.signup, name='signup'),
url(r'^weddingServices/', include('weddingServices.urls')),
]
|
# -*- coding: utf-8 -*-
""" Sahana Eden Guided Tour Model
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
@todo: update for new template path modules/template
"""
__all__ = ("S3GuidedTourModel",
"tour_rheader",
"tour_builder",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3GuidedTourModel(S3Model):
""" Details about which guided tours this Person has completed """
names = ("tour_config",
"tour_details",
"tour_user",
)
def model(self):
T = current.T
db = current.db
NONE = current.messages["NONE"]
s3 = current.response.s3
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
person_id = self.pr_person_id
# ---------------------------------------------------------------------
# Guided tours that are available
#
tablename = "tour_config"
define_table(tablename,
Field("name",
represent=lambda v: v or NONE,
label=T("Display name"),
requires = IS_NOT_EMPTY(),
),
Field("code",
length=255,
notnull=True,
unique=True,
represent=lambda v: v or NONE,
label=T("Unique code")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller tour is activated")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function tour is activated")),
Field("autostart", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Auto start")),
Field("role", "string",
represent=lambda v: v or NONE,
label=T("User's role")),
* s3_meta_fields()
)
# CRUD strings
ADD_TOUR = T("Create Tour")
crud_strings[tablename] = Storage(
label_create = ADD_TOUR,
title_display = T("Tour Configuration"),
title_list = T("Tours"),
title_update = T("Edit Tour"),
label_list_button = T("List Tours"),
label_delete_button = T("Delete Tour"),
msg_record_created = T("Tour added"),
msg_record_modified = T("Tour updated"),
msg_record_deleted = T("Tour deleted"),
msg_list_empty = T("No Tours currently registered"))
represent = S3Represent(lookup=tablename, translate=True)
tour_config_id = S3ReusableField("tour_config_id", "reference %s" % tablename,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "tour_config.id",
represent,
sort=True)),
represent=represent,
label=T("Tour Name"),
ondelete="SET NULL")
# Components
add_components(tablename,
# Details
tour_details="tour_config_id",
# Users
tour_user="tour_config_id",
)
# ---------------------------------------------------------------------
# Details of the tour.
#
tablename = "tour_details"
define_table(tablename,
tour_config_id(empty = False),
Field("posn", "integer",
default=0,
label=T("Position in tour")),
Field("controller",
represent=lambda v: v or NONE,
label=T("Controller name")),
Field("function",
represent=lambda v: v or NONE,
label=T("Function name")),
Field("args",
represent=lambda v: v or NONE,
label=T("Arguments")),
Field("tip_title",
represent=lambda v: v or NONE,
label=T("Title")),
Field("tip_details",
represent=lambda v: v or NONE,
label=T("Details")),
Field("html_id",
represent=lambda v: v or NONE,
label=T("HTML ID")),
Field("html_class",
represent=lambda v: v or NONE,
label=T("HTML class")),
Field("button",
represent=lambda v: v or NONE,
label=T("Button name")),
Field("tip_location",
represent=lambda v: v or NONE,
label=T("Loctaion of tip")),
Field("datatable_id",
represent=lambda v: v or NONE,
label=T("DataTable ID")),
Field("datatable_row",
represent=lambda v: v or NONE,
label=T("DataTable row")),
Field("redirect",
represent=lambda v: v or NONE,
label=T("Redirect URL")),
)
# CRUD strings
ADD_DETAILS = T("Create Details")
crud_strings[tablename] = Storage(
label_create = ADD_DETAILS,
title_display = T("Tour Details"),
title_list = T("Details"),
title_update = T("Edit Details"),
label_list_button = T("List Details"),
label_delete_button = T("Delete Detail"),
msg_record_created = T("Detail added"),
msg_record_modified = T("Detail updated"),
msg_record_deleted = T("Detail deleted"),
msg_list_empty = T("No Details currently registered"))
configure(tablename,
orderby = "tour_details.tour_config_id,tour_details.posn"
)
# ---------------------------------------------------------------------
# Details of the tours that the user has taken.
#
tablename = "tour_user"
define_table(tablename,
person_id(label = T("Person"),
ondelete="CASCADE",
empty = False,
),
tour_config_id(),
Field("place",
represent=lambda v: v or NONE,
label=T("Where reached")),
Field("resume",
represent=lambda v: v or NONE,
label=T("URL to resume tour")),
Field("completed", "boolean",
default=False,
represent=lambda v: \
T("Yes") if v else T("No"),
label=T("Completed tour?")),
Field("trip_counter", "integer",
default=0,
label=T("Times Completed")),
)
# CRUD strings
ADD_USER = T("Create User")
crud_strings[tablename] = Storage(
label_create = ADD_USER,
title_display = T("Tour User"),
title_list = T("Users"),
title_update = T("Edit User"),
label_list_button = T("List Users"),
label_delete_button = T("Delete User"),
msg_record_created = T("User added"),
msg_record_modified = T("User updated"),
msg_record_deleted = T("User deleted"),
msg_list_empty = T("No users have taken a tour"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(tour_config_id = tour_config_id,
)
# =============================================================================
def tour_rheader(r):
""" Resource Header for Guided Tour """
if r.representation == "html":
tour = r.record
if tour:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Details"), "details"),
(T("People"), "user"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
tour.name,
),
TR(TH("%s: " % table.code.label),
tour.code,
),
),
rheader_tabs
)
return rheader
return None
# =============================================================================
def tour_builder(output):
"""
Helper function to attach a guided tour (if required) to the output
"""
auth = current.auth
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
T = current.T
req_vars = request.vars
tour_id = req_vars.tour
# Now see if the details are on the database for this user
tour = None
user_id = None
if auth.is_logged_in():
user_id = auth.s3_logged_in_person()
# Find out if the user has done this tour before
utable = s3db.tour_user
uquery = (utable.person_id == user_id) & \
(utable.tour_config_id == tour_id)
tour = db(uquery).select(utable.id,
utable.completed,
utable.place,
utable.resume,
limitby=(0, 1)).first()
# If the tour has just been started (from the menu) then
# it may be necessary to redirect to a different controller
# @todo: does place need to be changed to controller and function?
if not req_vars.tour_running:
if (tour and not tour.completed and tour.place != request.controller):
redirect("%s?tour=%s" %(tour.resume, tour_id))
# get the details from the database
dtable = s3db.tour_details
dquery = (dtable.tour_config_id == tour_id) &\
(dtable.controller == request.controller) &\
(dtable.function == request.function)
details = db(dquery).select(dtable.args,
dtable.tip_title,
dtable.tip_details,
dtable.button,
dtable.tip_location,
dtable.html_id,
dtable.html_class,
dtable.datatable_id,
dtable.datatable_row,
dtable.redirect,
orderby=(dtable.posn)
)
# tour_filename = os.path.join(request.folder,
# "private",
# "tour",
# tour_name)
# tour_file = open (tour_filename, "rb")
# # now open the details of the guided_tour into a dictionary
# import csv
# tour_details = csv.DictReader(tour_file, skipinitialspace=True)
# load the list of tour items in the html
joyride_OL = OL(_id="joyrideID_1")
pre_step_data = []
post_step_data = []
post_ride_data = []
last_row = None
last_used = None
req_args = request.args
cnt = -1
for row in details:
if row.args:
args = row.args.split(",")
else:
args = []
# if the page has a nested login form then "login" will be added to
# the req_args list so it needs to be added to the args list as well
if "login" in req_args:
if "login" not in args:
args.append("login")
# The following will capture the actual id used for the req_arg
# Example org/organisation/10, where 10 is the id from the database
posn = 0
for arg in args:
if arg == "dt_id":
args[posn] = req_args[posn]
posn += 1
# Now check that the tour url matches the current url
if (args == req_args):
cnt += 1 # number of records used in this part of the tour
if row.datatable_id:
dt_id = row.datatable_id
# cols = []
# if "DataTable_columns" in row:
# cols = row["DataTable_columns"].split(",")
row_num = 0
if row.datatable_row:
row_num = row.datatable_row
# Now set this up for the pre-processor hook in joyride
pre_step_data.append([cnt, dt_id, row_num])
if row.redirect:
redirect_row = row.redirect.split(",")
if len(redirect_row) >= 3:
url = URL(c=redirect_row[0],
f=redirect_row[1],
args=redirect_row[2:],
vars={"tour_running":True,
"tour":tour_id}
)
if "dt_id" in redirect_row[2]:
post_step_data.append([cnt, url, dt_id, row_num])
elif len(redirect_row) == 2:
url = URL(c=redirect_row[0],
f=redirect_row[1],
vars={"tour_running":True,
"tour":tour_id}
)
post_step_data.append([cnt, url])
else:
url = URL(c=redirect_row[0],vars={"tour_running":True,
"tour":tour_id})
post_step_data.append([cnt, url])
extra = {}
if row.html_id:
extra["_data-id"] = row.html_id
elif row.html_class:
extra["_data-class"] = row.html_class
if row.button:
extra["_data-button"] = row.button
else:
extra["_data-button"] = "Next"
if row.tip_location:
extra["_data-options"] = "tipLocation:%s" % row.tip_location.lower()
else:
extra["_data-options"] = "tipLocation:right"
joyride_OL.append(LI(H2(T(row.tip_title)),
P(T(row.tip_details)),
**extra
)
)
last_used = row
last_row = row
# The following redirect will be triggered if the user has moved away
# from the tour, such as by clicking on a tab. However if a tab
# is part of the tour we are unable to determine if they have moved
# away or just visiting as part of the tour and so it will continue.
if len(joyride_OL) == 0:
del request.vars.tour
redirect(URL(args=req_args,
vars=request.vars))
if (user_id != None) and (last_row == last_used):
# set up an AJAX call to record that the tour has been completed
post_ride_data = [cnt, tour_id]
joyride_div = DIV(joyride_OL,
_class="hidden")
# Add the javascript configuration data
from gluon.serializers import json as jsons
if pre_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="prestep_data",
_name="prestep_data",
_value=jsons(pre_step_data))
)
if post_step_data:
joyride_div.append(INPUT(_type="hidden",
_id="poststep_data",
_name="poststep_data",
_value=jsons(post_step_data))
)
if post_ride_data:
joyride_div.append(INPUT(_type="hidden",
_id="postride_data",
_name="postride_data",
_value=jsons(post_ride_data))
)
# Now add the details to the tour_user table
if user_id != None:
if tour == None:
# this user has never done this tour before so create a new record
utable.insert(person_id = user_id,
tour_config_id = tour_id,
place = request.controller,
resume = request.url)
else:
# the user has done some of this tour so update the record
db(uquery).update(place = request.controller,
resume = request.url,
completed = False)
output["joyride_div"] = joyride_div
if s3.debug:
appname = request.application
s3.scripts.append("/%s/static/scripts/jquery.joyride.js" % appname)
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.js" % appname)
s3.stylesheets.append("plugins/joyride.min.css")
else:
s3.scripts.append("/%s/static/scripts/S3/s3.guidedtour.min.js" % request.application)
s3.stylesheets.append("plugins/joyride.css")
return output
# END =========================================================================
|
import asyncio
import inspect
import json
import logging
from asyncio import Queue, CancelledError
from sanic import Blueprint, response
from sanic.request import Request
from sanic.response import HTTPResponse, ResponseStream
from typing import Text, Dict, Any, Optional, Callable, Awaitable, NoReturn, Union
import rasa.utils.endpoints
from rasa.core.channels.channel import (
InputChannel,
CollectingOutputChannel,
UserMessage,
)
logger = logging.getLogger(__name__)
class RestInput(InputChannel):
"""A custom http input channel.
This implementation is the basis for a custom implementation of a chat
frontend. You can customize this to send messages to Rasa and
retrieve responses from the assistant."""
@classmethod
def name(cls) -> Text:
return "rest"
@staticmethod
async def on_message_wrapper(
on_new_message: Callable[[UserMessage], Awaitable[Any]],
text: Text,
queue: Queue,
sender_id: Text,
input_channel: Text,
metadata: Optional[Dict[Text, Any]],
) -> None:
collector = QueueOutputChannel(queue)
message = UserMessage(
text, collector, sender_id, input_channel=input_channel, metadata=metadata
)
await on_new_message(message)
await queue.put("DONE")
async def _extract_sender(self, req: Request) -> Optional[Text]:
return req.json.get("sender", None)
# noinspection PyMethodMayBeStatic
def _extract_message(self, req: Request) -> Optional[Text]:
return req.json.get("message", None)
def _extract_input_channel(self, req: Request) -> Text:
return req.json.get("input_channel") or self.name()
def stream_response(
self,
on_new_message: Callable[[UserMessage], Awaitable[None]],
text: Text,
sender_id: Text,
input_channel: Text,
metadata: Optional[Dict[Text, Any]],
) -> Callable[[Any], Awaitable[None]]:
async def stream(resp: Any) -> None:
q = Queue()
task = asyncio.ensure_future(
self.on_message_wrapper(
on_new_message, text, q, sender_id, input_channel, metadata
)
)
while True:
result = await q.get()
if result == "DONE":
break
else:
await resp.write(json.dumps(result) + "\n")
await task
return stream
def blueprint(
self, on_new_message: Callable[[UserMessage], Awaitable[None]]
) -> Blueprint:
custom_webhook = Blueprint(
"custom_webhook_{}".format(type(self).__name__),
inspect.getmodule(self).__name__,
)
# noinspection PyUnusedLocal
@custom_webhook.route("/", methods=["GET"])
async def health(request: Request) -> HTTPResponse:
return response.json({"status": "ok"})
@custom_webhook.route("/webhook", methods=["POST"])
async def receive(request: Request) -> Union[ResponseStream, HTTPResponse]:
sender_id = await self._extract_sender(request)
text = self._extract_message(request)
should_use_stream = rasa.utils.endpoints.bool_arg(
request, "stream", default=False
)
input_channel = self._extract_input_channel(request)
metadata = self.get_metadata(request)
if should_use_stream:
return response.stream(
self.stream_response(
on_new_message, text, sender_id, input_channel, metadata
),
content_type="text/event-stream",
)
else:
collector = CollectingOutputChannel()
# noinspection PyBroadException
try:
await on_new_message(
UserMessage(
text,
collector,
sender_id,
input_channel=input_channel,
metadata=metadata,
)
)
except CancelledError:
logger.error(
f"Message handling timed out for " f"user message '{text}'."
)
except Exception:
logger.exception(
f"An exception occured while handling "
f"user message '{text}'."
)
return response.json(collector.messages)
return custom_webhook
class QueueOutputChannel(CollectingOutputChannel):
"""Output channel that collects send messages in a list
(doesn't send them anywhere, just collects them)."""
@classmethod
def name(cls) -> Text:
return "queue"
# noinspection PyMissingConstructor
def __init__(self, message_queue: Optional[Queue] = None) -> None:
super().__init__()
self.messages = Queue() if not message_queue else message_queue
def latest_output(self) -> NoReturn:
raise NotImplementedError("A queue doesn't allow to peek at messages.")
async def _persist_message(self, message: Dict[Text, Any]) -> None:
await self.messages.put(message)
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from weaver.mills.LineMill import LineMill
class ChemkinPickler(LineMill):
names = ["chemkin"]
def _renderDocument(self, mechanism, options=None):
self.pickleElementSection(mechanism)
self.pickleSpeciesSection(mechanism)
self.pickleThermoSection(mechanism)
self.pickleReactionSection(mechanism)
return
def pickleElementSection(self, mechanism):
self._rep += ["", "! Element section", "", "Elements"]
line = " " * 4
for element in mechanism.element():
symbol = element.symbol
if len(line) + len(symbol) > 75:
self._rep.append(line)
line = " " * 4
line += " " + symbol
self._rep.append(line)
self._rep.append("End")
return
def pickleSpeciesSection(self, mechanism):
self._rep += ["", "! Species section", "", "Species"]
line = " " * 4
for species in mechanism.species():
symbol = species.symbol
if len(line) + len(symbol) > 75:
self._rep.append(line)
line = " " * 4
line += " " + symbol
self._rep.append(line)
self._rep.append("End")
return
def pickleThermoSection(self, mechanism):
self._rep += ["", "! Thermo section", ""]
line = "Thermo"
if mechanism.thermoAll():
line += " All"
self._rep.append(line)
if mechanism.thermoRange():
line = "%15.8g " * 3 % mechanism.thermoRange()
self._rep.append(line)
format = "%15.8e" * 5 + "%5d"
for species in mechanism.species():
if not species.thermo:
continue
self._rep.append("!")
# compute line 1
line_1 = "%-18s" % species.symbol + " " * 6
composition = [
"%-2s%3d" % (element, factor)
for element, factor in species.composition
]
line_1 += "".join(composition[: min(len(composition), 4)])
line_1 += (" " * 5) * (max(0, 4 - len(composition)))
line_1 += species.phase.upper()
line_1 += "%10.3f" % species.thermo[1].lowT
line_1 += "%10.3f" % species.thermo[0].highT
if species.thermo[1].highT != species.thermo[0].lowT:
import journal
journal.firewall("fuego").hit("bad mechanism")
continue
if species.thermo[1].lowT:
line_1 += "%10.3f" % species.thermo[1].lowT
else:
line_1 += " " * 10
if len(composition) >= 5:
line_1 += "%-2s%2d" % composition[4]
else:
line_1 += " " * 4
line_1 += "1"
self._rep.append(line_1)
# get the thermo parametrization
highParameters = species.thermo[0].parameters
lowParameters = species.thermo[1].parameters
# compute line 2
line_2 = ""
line_2 += "%15.8e" % highParameters[0]
line_2 += "%15.8e" % highParameters[1]
line_2 += "%15.8e" % highParameters[2]
line_2 += "%15.8e" % highParameters[3]
line_2 += "%15.8e" % highParameters[4]
line_2 += " " * 4 + "2"
self._rep.append(line_2)
# compute line 3
line_3 = ""
line_3 += "%15.8e" % highParameters[5]
line_3 += "%15.8e" % highParameters[6]
line_3 += "%15.8e" % lowParameters[0]
line_3 += "%15.8e" % lowParameters[1]
line_3 += "%15.8e" % lowParameters[2]
line_3 += " " * 4 + "3"
self._rep.append(line_3)
# compute line 4
line_4 = ""
line_4 += "%15.8e" % lowParameters[3]
line_4 += "%15.8e" % lowParameters[4]
line_4 += "%15.8e" % lowParameters[5]
line_4 += "%15.8e" % lowParameters[6]
line_4 += " " * 15
line_4 += " " * 4 + "4"
self._rep.append(line_4)
self._rep.append("")
self._rep.append("End")
return
def pickleReactionSection(self, mechanism):
self._rep.append("")
self._rep.append("! Reaction section")
self._rep.append("")
self._rep.append("Reactions")
i = 0
for reaction in mechanism.reaction():
i += 1
self.pickleReaction(reaction, i)
self._rep.append("")
self._rep.append("End")
return
def pickleReaction(self, reaction, i):
lines = []
form = _printReagents(reaction, reaction.reactants)
if reaction.reversible:
form += " <=> "
else:
form += " => "
form += _printReagents(reaction, reaction.products)
line = "%-40s" % form
line += "%10.3g" % reaction.arrhenius[0]
line += "%10.3g" % reaction.arrhenius[1]
line += "%10.3g" % reaction.arrhenius[2]
line += " " * 5 + "! %5d" % i
lines.append(line)
if reaction.efficiencies:
efficiencies = " "
for species, coefficient in reaction.efficiencies:
efficiencies += "%s / %4.2f / " % (
species,
coefficient + 1,
) # remember adjustment
lines.append(efficiencies)
if reaction.low:
low = " LOW /%s/" % _printParameters(reaction.low)
lines.append(low)
if reaction.troe:
troe = " TROE /%s/" % _printParameters(reaction.troe)
lines.append(troe)
if reaction.sri:
sri = " SRI /%s/" % _printParameters(reaction.sri)
lines.append(sri)
if reaction.rev:
rev = " REV /%s/" % _printParameters(reaction.rev)
lines.append(rev)
if reaction.lt:
lt = " LT /%s/" % _printParameters(reaction.lt)
lines.append(lt)
if reaction.rlt:
rlt = " RLT /%s/" % _printParameters(reaction.rlt)
lines.append(rlt)
if reaction.radiation:
radiation = " HV / %g /" % reaction.radiation
lines.append(radiation)
if reaction.duplicate:
duplicate = " DUPLICATE"
lines.append(duplicate)
self._rep += lines
return lines
def __init__(self, options=None):
LineMill.__init__(self, "!", _FIRSTLINE)
return
# helpers
_FIRSTLINE = "! -*- chemkin -*-"
def _printReagents(reaction, composition):
terms = []
for species, factor in composition:
str = ""
if factor != 1:
str += "%d " % factor
str += species
terms.append(str)
line = " + ".join(terms)
if reaction.thirdBody:
species, factor = reaction.thirdBody
if species == "<mixture>":
species = "M"
if reaction.falloff:
line += " (+"
else:
line += " + "
if factor != 1:
line += "%d" % factor
line += species
if reaction.falloff:
line += ")"
return line
def _printParameters(ptuple):
format = "%10.3e " * len(ptuple)
return format % ptuple
# version
__id__ = "$Id$"
# End of file
|
import os
import os.path
import json
import pathlib
from types import prepare_class
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import (
KeywordQueryEvent,
ItemEnterEvent,
PreferencesEvent,
PreferencesUpdateEvent,
)
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.item.ExtensionSmallResultItem import ExtensionSmallResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from fuzzywuzzy import process, fuzz
class Utils:
@staticmethod
def get_path(filename, from_home=False):
base_dir = pathlib.Path.home() if from_home else pathlib.Path(
__file__).parent.absolute()
return os.path.join(base_dir, filename)
class Code:
open_command_paths = ["/opt/vscodium-bin"]
def get_installed_path(self):
for path in self.open_command_paths:
if os.path.exists(path):
return path
return False
def is_installed(self):
return bool(self.installed_path)
def get_recents(self):
recents = []
storage = json.load(
open(Utils.get_path(".config/VSCodium/storage.json", True), "r"))
openedPaths = storage["openedPathsList"]["entries"]
for path in openedPaths:
folder = "folderUri" in path
uri = path["folderUri"] if folder else path["fileUri"]
label = path["label"] if "label" in path else uri.split("/")[-1]
recents.append({
"folder": folder,
"uri": uri,
"label": label
})
return recents
def open_vscode(self, recent):
if not self.is_installed():
return
option = "--folder-uri" if recent["folder"] else "--file-uri"
os.system(f"{self.installed_path} {option} {recent['uri']}")
def __init__(self):
self.installed_path = self.get_installed_path()
class CodeExtension(Extension):
keyword = None
code = None
def __init__(self):
super(CodeExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent, PreferencesUpdateEventListener())
self.code = Code()
def get_ext_result_items(self, query):
query = query.lower() if query else ""
recents = self.code.get_recents()
items = []
data = []
label_matches = process.extract(query, choices=map(lambda c: c["label"], recents), limit=20, scorer=fuzz.partial_ratio)
uri_matches = process.extract(query, choices=map(lambda c: c["uri"], recents), limit=20, scorer=fuzz.partial_ratio)
for match in label_matches:
recent = next((c for c in recents if c["label"] == match[0]), None)
if (recent is not None and match[1] > 95):
data.append(recent)
for match in uri_matches:
recent = next((c for c in recents if c["uri"] == match[0]), None)
existing = next((c for c in data if c["uri"] == recent["uri"]), None)
if (recent is not None and existing is None):
data.append(recent)
for recent in data[:20]:
items.append(
ExtensionSmallResultItem(
icon=Utils.get_path(
f"images/{'folder' if recent['folder'] else 'file'}.svg"),
name=recent["label"],
on_enter=ExtensionCustomAction(recent),
)
)
return items
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
if not extension.code.is_installed():
items.append(
ExtensionResultItem(
icon=Utils.get_path("images/icon.svg"),
name="No VSCodium?",
description="Can't find the VSCodium's `codium` command in your system :(",
highlightable=False,
on_enter=HideWindowAction(),
)
)
return RenderResultListAction(items)
argument = event.get_argument() or ""
items.extend(extension.get_ext_result_items(argument))
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
recent = event.get_data()
extension.code.open_vscode(recent)
class PreferencesEventListener(EventListener):
def on_event(self, event, extension):
extension.keyword = event.preferences["code_kw"]
class PreferencesUpdateEventListener(EventListener):
def on_event(self, event, extension):
if event.id == "code_kw":
extension.keyword = event.new_value
if __name__ == "__main__":
CodeExtension().run()
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..representation import PoseEstimation3dAnnotation, PoseEstimation3dPrediction
from .postprocessor import Postprocessor
class Translate3dPoses(Postprocessor):
__provider__ = 'translate_3d_poses'
annotation_types = (PoseEstimation3dAnnotation,)
prediction_types = (PoseEstimation3dPrediction,)
def process_image(self, annotations, predictions):
for batch_id, prediction in enumerate(predictions):
for pose_id in range(prediction.size):
translation = prediction.translations[pose_id]
translation[2] *= annotations[batch_id].fx if annotations[batch_id] is not None else 1
prediction.x_3d_values[pose_id] += translation[0]
prediction.y_3d_values[pose_id] += translation[1]
prediction.z_3d_values[pose_id] += translation[2]
return annotations, predictions
|
from gensim.models import KeyedVectors
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.font_manager as fm
import pandas as pd
glove_vector_file = "vectors.txt"
gensim_glove_vector_file = "gensim_glove_vectors.txt"
top_k = 10
words_triple_file = 'similarity_words.ttl'
# GloVeの単語ベクトルファイルを読み込み、単語数とベクトルサイズを付与した処理用のファイルを作成する。
vectors = pd.read_csv(glove_vector_file, delimiter=' ', index_col=0, header=None)
vocab_count = vectors.shape[0] # 単語数
num_features = vectors.shape[1] # 次元数
print("単語数:{} 次元数:{}".format(vocab_count, num_features))
glove_vectors = KeyedVectors.load_word2vec_format(gensim_glove_vector_file, binary=False)
words = list(glove_vectors.vocab.keys())
sim_words_list = []
with open(words_triple_file, 'w') as f:
for word in words:
sim_words = glove_vectors.most_similar(word, [], top_k)
for sim_word in sim_words:
triple = '"{}" owl:equivalentClass "{}"'.format(word, sim_word[0])
sim_words_list.append(triple)
f.writelines(triple + '\n')
len(sim_words_list)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Leonardo Arcari @leonardoarcari"
from networkx.relabel import convert_node_labels_to_integers
import networkx as nx
from pathlib import Path
from osgeo import ogr
from osgeo import osr
from math import cos, sin, asin, sqrt, radians
import random
## Modules
# Elementary modules
from math import radians, cos, sin, asin, sqrt
import argparse
import copy
import json
import os
speed_limits = {"motorway" :130, "trunk" :110, "primary" :90,
"secondary" :70, "tertiary" : 70, "unclassified" : 30,
"residential" : 50, "service" : 10, "motorway_link" : 60,
"trunk_link" : 60, "primary_link" : 60, "secondary_link": 60,
"tertiary_link": 35, "living_street": 5, "pedestrian" : 5,
"track" : 5, "road" : 5, "footway" : 5,
"steps" : 5, "path" : 5, "cycleway" : 5,
"it:urban": 50, "it:rural": 90, "it:motorway": 130,
"it:trunk": 110}
# Specific modules
import xml.sax # parse osm file
from pathlib import Path # manage cached tiles
banned_tags = [
"footway",
"bridleway",
"steps",
"path",
"cycleway",
"construction"
]
def street_filter(way):
ht = way.tags["highway"]
if ht in banned_tags:
return False
return True
def haversine(lon1, lat1, lon2, lat2, unit_m = True):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
default unit : km
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
if (unit_m):
r *= 1000
return c * r
def download_osm(left, bottom, right, top, proxy = False, proxyHost = "10.0.4.2", proxyPort = "3128", cache = False, cacheTempDir = "/tmp/tmpOSM/", verbose = True):
""" Return a filehandle to the downloaded data from osm api."""
import urllib.request # To request the web
if 'map' in os.listdir('.'):
print("Assuming ./map is the right file")
return open('map', 'r')
if (cache):
## cached tile filename
cachedTileFilename = "osm_map_{:.8f}_{:.8f}_{:.8f}_{:.8f}.map".format(left, bottom, right, top)
if (verbose):
print("Cached tile filename :", cachedTileFilename)
Path(cacheTempDir).mkdir(parents = True, exist_ok = True) ## Create cache path if not exists
osmFile = Path(cacheTempDir + cachedTileFilename).resolve() ## Replace the relative cache folder path to absolute path
if osmFile.is_file():
# download from the cache folder
if (verbose):
print("Tile loaded from the cache folder.")
fp = urllib.request.urlopen("file://"+str(osmFile))
return fp
if (proxy):
# configure the urllib request with the proxy
proxy_handler = urllib.request.ProxyHandler({'https': 'https://' + proxyHost + ":" + proxyPort, 'http': 'http://' + proxyHost + ":" + proxyPort})
opener = urllib.request.build_opener(proxy_handler)
urllib.request.install_opener(opener)
# request = "http://api.openstreetmap.org/api/0.6/map?bbox=%f,%f,%f,%f"%(left,bottom,right,top)
# request = "http://overpass.osm.rambler.ru/cgi/xapi_meta?*[bbox=%f,%f,%f,%f]"%(left,bottom,right,top)
request = "http://www.overpass-api.de/api/xapi_meta?*[bbox=%f,%f,%f,%f]"%(left,bottom,right,top)
if (verbose):
print("Download the tile from osm web api ... in progress")
print("Request :", request)
fp = urllib.request.urlopen(request)
if (verbose):
print("OSM Tile downloaded")
if (cache):
if (verbose):
print("Write osm tile in the cache"
)
content = fp.read()
with open(osmFile, 'wb') as f:
f.write(content)
if osmFile.is_file():
if (verbose):
print("OSM tile written in the cache")
fp = urllib.request.urlopen("file://"+str(osmFile)) ## Reload the osm tile from the cache (because fp.read moved the cursor)
return fp
return fp
def read_osm(filename_or_stream, only_roads=True):
"""Read graph in OSM format from file specified by name or by stream object.
Parameters
----------
filename_or_stream : filename or stream object
Returns
-------
G : Graph
Examples
--------
>>> G=nx.read_osm(nx.download_osm(-122.33,47.60,-122.31,47.61))
>>> import matplotlib.pyplot as plt
>>> plt.plot([G.node[n]['lat']for n in G], [G.node[n]['lon'] for n in G], 'o', color='k')
>>> plt.show()
"""
osm = OSM(filename_or_stream)
G = nx.DiGraph()
## Add ways
for w in osm.ways.values():
if only_roads and 'highway' not in w.tags:
continue
if not street_filter(w):
continue
speed = 50
if 'maxspeed' in w.tags:
speed = w.tags['maxspeed']
elif w.tags['highway'] in speed_limits.keys():
speed = speed_limits[w.tags['highway']]
if ('oneway' in w.tags):
if (w.tags['oneway'] == 'yes'):
# ONLY ONE DIRECTION
G.add_path(w.nds, id=w.id, speed = speed)
else:
# BOTH DIRECTION
G.add_path(w.nds, id=w.id, speed = speed)
G.add_path(w.nds[::-1], id=w.id, speed = speed)
else:
# BOTH DIRECTION
G.add_path(w.nds, id=w.id, speed = speed)
G.add_path(w.nds[::-1], id=w.id, speed = speed)
## Complete the used nodes' information
for n_id in G.nodes():
n = osm.nodes[n_id]
G.node[n_id]['lat'] = n.lat
G.node[n_id]['lon'] = n.lon
G.node[n_id]['id'] = n.id
## Estimate the length of each way
for u,v,d in G.edges(data=True):
distance = haversine(G.node[u]['lon'], G.node[u]['lat'], G.node[v]['lon'], G.node[v]['lat'], unit_m = True) # Give a realistic distance estimation (neither EPSG nor projection nor reference system are specified)
speed = d['speed']
try:
time_seconds = distance / (float(speed)*1000) *3600
except ValueError:
speed = speed.lower()
if 'none' in speed:
speed = 50
elif 'mph' in speed or 'mp/h' in speed:
speed = ''.join(c for c in speed if c.isdigit())
speed = int(float(speed) * 1.609344)
elif 'kmh' in speed or 'km/h' in speed or 'kph' in speed or 'kp/h' in speed:
speed = ''.join(c for c in speed if c.isdigit())
elif speed in speed_limits.keys():
speed = speed_limits[speed]
else:
speed = 50
speed = int(speed)
time_seconds = distance / (speed*1000) *3600
G.add_weighted_edges_from([( u, v, time_seconds)], weight='time')
return G
class Node:
def __init__(self, id, lon, lat):
self.id = id
self.lon = lon
self.lat = lat
self.tags = {}
def __str__(self):
return "Node (id : %s) lon : %s, lat : %s "%(self.id, self.lon, self.lat)
class Way:
def __init__(self, id, osm):
self.osm = osm
self.id = id
self.nds = []
self.tags = {}
def split(self, dividers):
# slice the node-array using this nifty recursive function
def slice_array(ar, dividers):
for i in range(1,len(ar)-1):
if dividers[ar[i]]>1:
left = ar[:i+1]
right = ar[i:]
rightsliced = slice_array(right, dividers)
return [left]+rightsliced
return [ar]
slices = slice_array(self.nds, dividers)
# create a way object for each node-array slice
ret = []
i=0
for slice in slices:
littleway = copy.copy( self )
littleway.id += "-%d"%i
littleway.nds = slice
ret.append( littleway )
i += 1
return ret
class OSM:
def __init__(self, filename_or_stream):
""" File can be either a filename or stream/file object."""
nodes = {}
ways = {}
superself = self
class OSMHandler(xml.sax.ContentHandler):
@classmethod
def setDocumentLocator(self,loc):
pass
@classmethod
def startDocument(self):
pass
@classmethod
def endDocument(self):
pass
@classmethod
def startElement(self, name, attrs):
if name=='node':
self.currElem = Node(attrs['id'], float(attrs['lon']), float(attrs['lat']))
elif name=='way':
self.currElem = Way(attrs['id'], superself)
elif name=='tag':
self.currElem.tags[attrs['k']] = attrs['v']
elif name=='nd':
self.currElem.nds.append( attrs['ref'] )
@classmethod
def endElement(self,name):
if name=='node':
nodes[self.currElem.id] = self.currElem
elif name=='way':
ways[self.currElem.id] = self.currElem
@classmethod
def characters(self, chars):
pass
xml.sax.parse(filename_or_stream, OSMHandler)
self.nodes = nodes
self.ways = ways
#count times each node is used
node_histogram = dict.fromkeys( self.nodes.keys(), 0 )
for way in self.ways.values():
if len(way.nds) < 2: #if a way has only one node, delete it out of the osm collection
del self.ways[way.id]
else:
for node in way.nds:
node_histogram[node] += 1
#use that histogram to split all ways, replacing the member set of ways
new_ways = {}
for id, way in self.ways.items():
split_ways = way.split(node_histogram)
for split_way in split_ways:
new_ways[split_way.id] = split_way
self.ways = new_ways
class MapProvider:
"""
This is an interface for classes providing data about a geographical map.
A MapProvider offers general-information about the map, conversion between
source IDs and normalized IDs (i.e. starting from 0). Moreover, it enables
map serialization to .gr file format.
"""
def getName(self):
raise NotImplementedError()
def getNumVertices(self):
raise NotImplementedError()
def getNumEdges(self):
raise NotImplementedError()
def getXRange(self):
raise NotImplementedError()
def getYRange(self):
raise NotImplementedError()
def getPoint(self, id, targetEPSG):
raise NotImplementedError()
def getDistanceKm(self, id1, id2):
raise NotImplementedError()
def toID(self, normalized_id):
raise NotImplementedError()
def toNormalizedID(self, id):
raise NotImplementedError()
def getNormalizedVertices(self):
raise NotImplementedError()
def getNormalizedEdges(self):
raise NotImplementedError()
def generateRandomP2P(self, n, seed):
raise NotImplementedError()
def writeP2P(self, path, p2p_seq):
raise NotImplementedError()
def write(self, path):
raise NotImplementedError()
def write_coo(self, path):
raise NotImplementedError()
def writeWkt(self, out_path, alt_paths, targetEPSG):
raise NotImplementedError()
class OSMProvider(MapProvider):
def __init__(self, name, left, bottom, right, top):
super().__init__()
self.name = name
self.left = left
self.bottom = bottom
self.right = right
self.top = top
self.srcEPSG = 4326 # WGS84
self.G = read_osm(download_osm(left, bottom, right, top, cache=True))
self.G = convert_node_labels_to_integers(self.G, label_attribute='id')
def getName(self):
return self.name
def getNumVertices(self):
return self.G.number_of_nodes()
def getNumEdges(self):
return self.G.number_of_edges()
def getXRange(self):
raise NotImplementedError()
def getYRange(self):
raise NotImplementedError()
def getPoint(self, id, targetEPSG):
lat = self.G.node[id]['lat']
lon = self.G.node[id]['lon']
# Load source EPSG reference system
source = osr.SpatialReference()
source.ImportFromEPSG(self.srcEPSG)
# Load target EPSG
target = osr.SpatialReference()
target.ImportFromEPSG(targetEPSG)
# Transform coordinates
transform = osr.CoordinateTransformation(source, target)
point = ogr.CreateGeometryFromWkt('POINT ({} {})'.format(lon, lat))
point.Transform(transform)
x_prime, y_prime = (point.GetX(), point.GetY())
return (x_prime, y_prime)
def getDistanceKm(self, id1, id2):
WGS84 = 4326
lon1, lat1 = self.getPoint(id1, WGS84)
lon2, lat2 = self.getPoint(id2, WGS84)
return self._calc_distance(lat1, lon1, lat2, lon2)
def toID(self, normalized_id):
return normalized_id
def toNormalizedID(self, id):
return id
def getNormalizedVertices(self):
return self.G.nodes()
def getNormalizedEdges(self):
return self.G.edges()
def generateRandomP2P(self, n=1000, seed=None):
if seed is not None:
random.seed(seed)
def sample_node():
return random.randrange(self.getNumVertices())
p2p = [(sample_node(), sample_node()) for _ in range(n)]
return p2p
def writeP2P(self, path, p2p_seq):
p = Path(path)
print('[OSMProvider] Writing P2P in ARLib-format to {}...'.format(p))
with open(p, mode='w') as f:
for s, t in p2p_seq:
f.write('{} {}\n'.format(s, t))
def write(self, path):
p = Path(path)
print('[OSMProvider] Writing graph in ARLib-format to {}...'.format(p))
with open(p, mode='w') as f:
for u, v, w in self.G.edges(data='time'):
f.write('{} {} {}\n'.format(u, v, w))
def write_coo(self, path):
p = Path(path)
WGS84 = 4326
print('[OSMProvider] Writing coordinates in ARLib-format to {}...'.format(p))
with open(p, mode='w') as f:
for v in sorted(nx.nodes(self.G)):
lon, lat = self.getPoint(v, WGS84)
f.write('{} {}\n'.format(lon, lat))
def writeWkt(self, out_path, alt_paths, targetEPSG):
p = Path(out_path)
print('[OSMProvider] Writing alternative paths in WKT to {}...'.format(p))
# Fill Multiline
lines = []
for alt_path in alt_paths:
# Fill path Line
line = ogr.Geometry(ogr.wkbLineString)
for v in alt_path:
v = self.toID(v)
x, y = self.getPoint(v, targetEPSG)
line.AddPoint(x, y)
lines.append(line)
# Write to out_path
with open(p, mode='w') as f:
f.write('K;Line\n')
for k, line in enumerate(lines):
f.write('{};{}\n'.format(k, line.ExportToWkt()))
def writeDIMACS(self, path):
p = Path(path)
print('[OSMProvider] Writing graph in DIMACS-format to {}...'.format(p))
with open(p, mode='w') as f:
header = ['p sp '
'{} {}\n'.format(self.getNumVertices(), self.getNumEdges())]
f.writelines(header)
for u, v, w in self.G.edges(data='time'):
f.write('a {} {} {}\n'.format(u, v, w))
@staticmethod
def _calc_distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
km = 6371 * c
return km
if __name__ == "__main__":
milan = {
"top": 45.555946,
"bottom": 45.366724,
"left": 9.020613,
"right": 9.2979979
}
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', dest='bboxfile',
help='File path of the JSON bounding box', default=None)
args = parser.parse_args()
if args.bboxfile is None:
bbox = milan
else:
with open(args.bboxfile, 'r') as fp:
bbox = json.load(fp)
maps = OSMProvider('Milan', **bbox)
maps.write('/tmp/weights')
# maps.writeDIMACS('/tmp/milan_map.gr')
maps.write_coo('/tmp/ids')
# maps.writeP2P('/tmp/milan.p2p', map.generateRandomP2P(seed=1234))
|
import re
import sys
def walk(m, n, x, y):
c = 0
while n > y:
c, m, n, x, y = c + m, n - 1, m, n - y, x
return c + x
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
print(walk(*map(int, re.findall(r'\d+', test))))
|
# JoyStick
#
# Copyright (c) 2021 Hajime Saito
#
# Released under the MIT license.
# see https://opensource.org/licenses/MIT
import pygame
from pygame.locals import *
import Repeater
JOY_MAX_TRIGGER = 16
JOY_NOINPUT = 0
JOY_UP = 0x1 << JOY_MAX_TRIGGER
JOY_RIGHT = 0x2 << JOY_MAX_TRIGGER
JOY_DOWN = 0x4 << JOY_MAX_TRIGGER
JOY_LEFT = 0x8 << JOY_MAX_TRIGGER
JOY_TRIGGER1 = 0x1 << 0
JOY_TRIGGER2 = 0x1 << 1
JOY_TRIGGER3 = 0x1 << 2
JOY_TRIGGER4 = 0x1 << 3
JOY_TRIGGER5 = 0x1 << 4
JOY_TRIGGER6 = 0x1 << 5
JOY_TRIGGER7 = 0x1 << 6
JOY_TRIGGER8 = 0x1 << 7
JOY_TRIGGER9 = 0x1 << 8
JOY_TRIGGER10 = 0x1 << 9
JOY_TRIGGER11 = 0x1 << 10
JOY_TRIGGER12 = 0x1 << 11
JOY_TRIGGER13 = 0x1 << 12
JOY_TRIGGER14 = 0x1 << 13
JOY_TRIGGER15 = 0x1 << 14
JOY_TRIGGER16 = 0x1 << 15
JOY_MASK_STICK = (JOY_UP | JOY_RIGHT | JOY_DOWN | JOY_LEFT)
JOY_MASK_BUTTON = ~JOY_MASK_STICK
class JoyStickBase(object):
def __init__(self):
self.data = JOY_NOINPUT
self.prevData = JOY_NOINPUT
self.xorData = JOY_NOINPUT
self.latestButtonDown = JOY_NOINPUT
self.latestButtonUp = JOY_NOINPUT
self.repeater = Repeater.XorRepeater()
self.repeater.setDefaultValue(JOY_NOINPUT)
self.repeatedData = JOY_NOINPUT
def update(self):
# update self.data at subclass before call this.
self.repeatedData = self.repeater.update(self.data)
self.xorData = self.data ^ self.prevData
self.latestButtonDown = self.xorData & self.data
self.latestButtonUp = self.xorData & ~self.data
self.prevData = self.data
class JoyKey(JoyStickBase):
def __init__(self):
super().__init__()
self.vk_up = K_UP
self.vk_right = K_RIGHT
self.vk_down = K_DOWN
self.vk_left = K_LEFT
self.vk_button = [ 0 ] * JOY_MAX_TRIGGER
self.vk_button[0] = K_z
self.vk_button[1] = K_x
self.vk_button[2] = K_c
def update(self):
key = pygame.key.get_pressed()
self.data = JOY_NOINPUT
if key[self.vk_up] == 1:
self.data |= JOY_UP
if key[self.vk_right] == 1:
self.data |= JOY_RIGHT
if key[self.vk_down] == 1:
self.data |= JOY_DOWN
if key[self.vk_left] == 1:
self.data |= JOY_LEFT
for i in range(JOY_MAX_TRIGGER):
if key[self.vk_button[i]] == 1:
self.data |= 1 << i
super().update()
class JoyStick(JoyStickBase):
def __init__(self, joyStickId=0):
super().__init__()
if joyStickId >= pygame.joystick.get_count():
raise ValueError("Invalid JoyStick ID {}".format(joyStickId))
self.joyStick = pygame.joystick.Joystick(joyStickId)
self.joyStick.init()
self.hasHat = True if self.joyStick.get_numhats() > 0 else False
def update(self):
self.data = JOY_NOINPUT
stickDatas = []
if self.hasHat:
for i in range(self.joyStick.get_numhats()):
x, y = self.joyStick.get_hat(i)
stickDatas.extend([ x, -y ])
else:
for i in range(self.joyStick.get_numaxes()):
stickDatas.append(self.joyStick.get_axis(i))
if stickDatas[1] < -0.5:
self.data |= JOY_UP
if stickDatas[1] > 0.5:
self.data |= JOY_DOWN
if stickDatas[0] > 0.5:
self.data |= JOY_RIGHT
if stickDatas[0] < -0.5:
self.data |= JOY_LEFT
for i in range(self.joyStick.get_numbuttons()):
if self.joyStick.get_button(i) == True:
self.data |= 1 << i
super().update()
class JoyStickIntegrator(JoyStickBase):
def __init__(self):
super().__init__()
self.joySticks = []
def append(self, joyStick):
self.joySticks.append(joyStick)
def remove(self, joyStick):
self.joySticks.remove(joyStick)
def update(self):
self.data = JOY_NOINPUT
self.repeatedData = JOY_NOINPUT
self.xorData = JOY_NOINPUT
self.latestButtonDown = JOY_NOINPUT
self.latestButtonUp = JOY_NOINPUT
for joyStick in self.joySticks:
joyStick.update()
self.data |= joyStick.data
self.repeatedData |= joyStick.repeatedData
self.xorData |= joyStick.xorData
self.latestButtonDown |= joyStick.latestButtonDown
self.latestButtonUp |= joyStick.latestButtonUp
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework.generics import CreateAPIView
User = get_user_model()
class SignupSerializer(serializers.Serializer):
error_message = "'{value}' is a registered {field}. Contact admin if you forgets password."
username = serializers.CharField()
password = serializers.CharField(write_only=True)
email = serializers.EmailField()
def validate_username(self, username):
if User.objects.filter(username=username).exists():
error_message = self.error_message.format(value=username, field="username")
raise serializers.ValidationError(error_message)
return username
def validate_email(self, email):
if User.objects.filter(email=email).exists():
error_message = self.error_message.format(value=email, field="email")
raise serializers.ValidationError(error_message)
return email
def create(self, validated_data):
data = validated_data.copy()
password = data.pop("password")
user = User(**data)
user.set_password(password)
user.save()
return user
def update(self, instance, validated_data):
raise RuntimeError("Update is disallowed.")
class SignupView(CreateAPIView):
serializer_class = SignupSerializer
authentication_classes = ()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Schedule is one of the most fundamental objects to this pulse-level programming module.
A ``Schedule`` is a representation of a *program* in Pulse. Each schedule tracks the time of each
instruction occuring in parallel over multiple signal *channels*.
"""
import abc
import copy
import itertools
import multiprocessing as mp
import sys
import warnings
from collections import defaultdict
from typing import List, Tuple, Iterable, Union, Dict, Callable, Set, Optional, Any
import numpy as np
from qiskit.circuit.parameter import Parameter
from qiskit.circuit.parameterexpression import ParameterExpression, ParameterValueType
from qiskit.pulse.channels import Channel
from qiskit.pulse.exceptions import PulseError
# pylint: disable=cyclic-import
from qiskit.pulse.instructions import Instruction
from qiskit.pulse.utils import instruction_duration_validation
from qiskit.utils.multiprocessing import is_main_process
# pylint: disable=missing-return-doc
Interval = Tuple[int, int]
"""An interval type is a tuple of a start time (inclusive) and an end time (exclusive)."""
TimeSlots = Dict[Channel, List[Tuple[int, int]]]
"""List of timeslots occupied by instructions for each channel."""
class Schedule(abc.ABC):
"""A quantum program *schedule* with exact time constraints for its instructions, operating
over all input signal *channels* and supporting special syntaxes for building.
"""
# Counter for the number of instances in this class.
instances_counter = itertools.count()
# Prefix to use for auto naming.
prefix = 'sched'
def __init__(self,
*schedules: Union[Union['Schedule', Instruction],
Tuple[int, Union['Schedule', Instruction]]],
name: Optional[str] = None,
metadata: Optional[dict] = None):
"""Create an empty schedule.
Args:
*schedules: Child Schedules of this parent Schedule. May either be passed as
the list of schedules, or a list of ``(start_time, schedule)`` pairs.
name: Name of this schedule. Defaults to an autogenerated string if not provided.
metadata: Arbitrary key value metadata to associate with the schedule. This gets
stored as free-form data in a dict in the
:attr:`~qiskit.pulse.Schedule.metadata` attribute. It will not be directly
used in the schedule.
Raises:
TypeError: if metadata is not a dict.
"""
if name is None:
name = self.prefix + str(next(self.instances_counter))
if sys.platform != "win32" and not is_main_process():
name += '-{}'.format(mp.current_process().pid)
self._name = name
self._duration = 0
# These attributes are populated by ``_mutable_insert``
self._timeslots = {}
self.__children = []
self._parameter_table = defaultdict(list)
for sched_pair in schedules:
try:
time, sched = sched_pair
except TypeError:
# recreate as sequence starting at 0.
time, sched = 0, sched_pair
self._mutable_insert(time, sched)
if not isinstance(metadata, dict) and metadata is not None:
raise TypeError("Only a dictionary or None is accepted for schedule metadata")
self._metadata = metadata
@property
def name(self) -> str:
"""Name of this Schedule"""
return self._name
@property
def timeslots(self) -> TimeSlots:
"""Time keeping attribute."""
return self._timeslots
@property
def duration(self) -> int:
"""Duration of this schedule."""
return self._duration
@property
def start_time(self) -> int:
"""Starting time of this schedule."""
return self.ch_start_time(*self.channels)
@property
def stop_time(self) -> int:
"""Stopping time of this schedule."""
return self.duration
@property
def channels(self) -> Tuple[Channel]:
"""Returns channels that this schedule uses."""
return tuple(self._timeslots.keys())
@property
def _children(self) -> Tuple[Tuple[int, Union['Schedule', Instruction]], ...]:
"""Return the child``NamedValues``s of this ``Schedule`` in the
order they were added to the schedule.
Returns:
A tuple, where each element is a two-tuple containing the initial
scheduled time of each ``NamedValue`` and the component
itself.
"""
return tuple(self.__children)
@property
def instructions(self):
"""Get the time-ordered instructions from self.
ReturnType:
Tuple[Tuple[int, Instruction], ...]
"""
def key(time_inst_pair):
inst = time_inst_pair[1]
return (time_inst_pair[0], inst.duration,
sorted(chan.name for chan in inst.channels))
return tuple(sorted(self._instructions(), key=key))
@property
def metadata(self):
"""The user provided metadata associated with the schedule
The metadata for the schedule is a user provided ``dict`` of metadata
for the schedule. It will not be used to influence the execution or
operation of the schedule, but it is expected to be passed between
all transforms of the schedule and that providers will associate any
schedule metadata with the results it returns from execution of that
schedule.
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Update the schedule metadata"""
if not isinstance(metadata, dict) and metadata is not None:
raise TypeError("Only a dictionary or None is accepted for schedule metadata")
self._metadata = metadata
def ch_duration(self, *channels: List[Channel]) -> int:
"""Return the time of the end of the last instruction over the supplied channels.
Args:
*channels: Channels within ``self`` to include.
"""
return self.ch_stop_time(*channels)
def ch_start_time(self, *channels: List[Channel]) -> int:
"""Return the time of the start of the first instruction over the supplied channels.
Args:
*channels: Channels within ``self`` to include.
"""
try:
chan_intervals = (self._timeslots[chan] for chan in channels if chan in self._timeslots)
return min(intervals[0][0] for intervals in chan_intervals)
except ValueError:
# If there are no instructions over channels
return 0
def ch_stop_time(self, *channels: List[Channel]) -> int:
"""Return maximum start time over supplied channels.
Args:
*channels: Channels within ``self`` to include.
"""
try:
chan_intervals = (self._timeslots[chan] for chan in channels if chan in self._timeslots)
return max(intervals[-1][1] for intervals in chan_intervals)
except ValueError:
# If there are no instructions over channels
return 0
def _instructions(self, time: int = 0):
"""Iterable for flattening Schedule tree.
Args:
time: Shifted time due to parent.
Yields:
Iterable[Tuple[int, Instruction]]: Tuple containing the time each
:class:`~qiskit.pulse.Instruction`
starts at and the flattened :class:`~qiskit.pulse.Instruction` s.
"""
for insert_time, child_sched in self._children:
yield from child_sched._instructions(time + insert_time)
def shift(self,
time: int,
name: Optional[str] = None,
inplace: bool = False
) -> 'Schedule':
"""Return a schedule shifted forward by ``time``.
Args:
time: Time to shift by.
name: Name of the new schedule. Defaults to the name of self.
inplace: Perform operation inplace on this schedule. Otherwise
return a new ``Schedule``.
"""
if inplace:
return self._mutable_shift(time)
return self._immutable_shift(time, name=name)
def _immutable_shift(self,
time: int,
name: Optional[str] = None
) -> 'Schedule':
"""Return a new schedule shifted forward by `time`.
Args:
time: Time to shift by
name: Name of the new schedule if call was mutable. Defaults to name of self
"""
if name is None:
name = self.name
return Schedule((time, self), name=name)
def _mutable_shift(self,
time: int
) -> 'Schedule':
"""Return this schedule shifted forward by `time`.
Args:
time: Time to shift by
Raises:
PulseError: if ``time`` is not an integer.
"""
if not isinstance(time, int):
raise PulseError(
"Schedule start time must be an integer.")
timeslots = {}
for chan, ch_timeslots in self._timeslots.items():
timeslots[chan] = [(ts[0] + time, ts[1] + time) for
ts in ch_timeslots]
_check_nonnegative_timeslot(timeslots)
self._duration = self._duration + time
self._timeslots = timeslots
self.__children = [(orig_time + time, child) for
orig_time, child in self._children]
return self
def insert(self,
start_time: int,
schedule: Union['Schedule', Instruction],
name: Optional[str] = None,
inplace: bool = False
) -> 'Schedule':
"""Return a new schedule with ``schedule`` inserted into ``self`` at ``start_time``.
Args:
start_time: Time to insert the schedule.
schedule: Schedule to insert.
name: Name of the new schedule. Defaults to the name of self.
inplace: Perform operation inplace on this schedule. Otherwise
return a new ``Schedule``.
"""
if inplace:
return self._mutable_insert(start_time, schedule)
return self._immutable_insert(start_time, schedule, name=name)
def _mutable_insert(self,
start_time: int,
schedule: Union['Schedule', Instruction]
) -> 'Schedule':
"""Mutably insert `schedule` into `self` at `start_time`.
Args:
start_time: Time to insert the second schedule.
schedule: Schedule to mutably insert.
"""
self._add_timeslots(start_time, schedule)
self.__children.append((start_time, schedule))
self._update_parameter_table(schedule)
return self
def _immutable_insert(self,
start_time: int,
schedule: Union['Schedule', Instruction],
name: Optional[str] = None,
) -> 'Schedule':
"""Return a new schedule with ``schedule`` inserted into ``self`` at ``start_time``.
Args:
start_time: Time to insert the schedule.
schedule: Schedule to insert.
name: Name of the new ``Schedule``. Defaults to name of ``self``.
"""
if name is None:
name = self.name
new_sched = Schedule(name=name)
new_sched._mutable_insert(0, self)
new_sched._mutable_insert(start_time, schedule)
return new_sched
def append(self, schedule: Union['Schedule', Instruction],
name: Optional[str] = None,
inplace: bool = False) -> 'Schedule':
r"""Return a new schedule with ``schedule`` inserted at the maximum time over
all channels shared between ``self`` and ``schedule``.
.. math::
t = \textrm{max}(\texttt{x.stop_time} |\texttt{x} \in
\texttt{self.channels} \cap \texttt{schedule.channels})
Args:
schedule: Schedule to be appended.
name: Name of the new ``Schedule``. Defaults to name of ``self``.
inplace: Perform operation inplace on this schedule. Otherwise
return a new ``Schedule``.
"""
common_channels = set(self.channels) & set(schedule.channels)
time = self.ch_stop_time(*common_channels)
return self.insert(time, schedule, name=name, inplace=inplace)
def flatten(self) -> 'Schedule':
"""Return a new schedule which is the flattened schedule contained all ``instructions``."""
return Schedule(*self.instructions, name=self.name)
def filter(self, *filter_funcs: List[Callable],
channels: Optional[Iterable[Channel]] = None,
instruction_types=None,
time_ranges: Optional[Iterable[Tuple[int, int]]] = None,
intervals: Optional[Iterable[Interval]] = None) -> 'Schedule':
"""Return a new ``Schedule`` with only the instructions from this ``Schedule`` which pass
though the provided filters; i.e. an instruction will be retained iff every function in
``filter_funcs`` returns ``True``, the instruction occurs on a channel type contained in
``channels``, the instruction type is contained in ``instruction_types``, and the period
over which the instruction operates is *fully* contained in one specified in
``time_ranges`` or ``intervals``.
If no arguments are provided, ``self`` is returned.
Args:
filter_funcs: A list of Callables which take a (int, Union['Schedule', Instruction])
tuple and return a bool.
channels: For example, ``[DriveChannel(0), AcquireChannel(0)]``.
instruction_types (Optional[Iterable[Type[qiskit.pulse.Instruction]]]): For example,
``[PulseInstruction, AcquireInstruction]``.
time_ranges: For example, ``[(0, 5), (6, 10)]``.
intervals: For example, ``[(0, 5), (6, 10)]``.
"""
composed_filter = self._construct_filter(*filter_funcs,
channels=channels,
instruction_types=instruction_types,
time_ranges=time_ranges,
intervals=intervals)
return self._apply_filter(composed_filter,
new_sched_name="{name}".format(name=self.name))
def exclude(self, *filter_funcs: List[Callable],
channels: Optional[Iterable[Channel]] = None,
instruction_types=None,
time_ranges: Optional[Iterable[Tuple[int, int]]] = None,
intervals: Optional[Iterable[Interval]] = None) -> 'Schedule':
"""Return a Schedule with only the instructions from this Schedule *failing* at least one
of the provided filters. This method is the complement of ``self.filter``, so that::
self.filter(args) | self.exclude(args) == self
Args:
filter_funcs: A list of Callables which take a (int, Union['Schedule', Instruction])
tuple and return a bool.
channels: For example, ``[DriveChannel(0), AcquireChannel(0)]``.
instruction_types (Optional[Iterable[Type[qiskit.pulse.Instruction]]]): For example,
``[PulseInstruction, AcquireInstruction]``.
time_ranges: For example, ``[(0, 5), (6, 10)]``.
intervals: For example, ``[(0, 5), (6, 10)]``.
"""
composed_filter = self._construct_filter(*filter_funcs,
channels=channels,
instruction_types=instruction_types,
time_ranges=time_ranges,
intervals=intervals)
return self._apply_filter(lambda x: not composed_filter(x),
new_sched_name="{name}".format(name=self.name))
def _apply_filter(self, filter_func: Callable, new_sched_name: str) -> 'Schedule':
"""Return a Schedule containing only the instructions from this Schedule for which
``filter_func`` returns ``True``.
Args:
filter_func: Function of the form (int, Union['Schedule', Instruction]) -> bool.
new_sched_name: Name of the returned ``Schedule``.
"""
subschedules = self.flatten()._children
valid_subschedules = [sched for sched in subschedules if filter_func(sched)]
return Schedule(*valid_subschedules, name=new_sched_name)
def _construct_filter(self, *filter_funcs: List[Callable],
channels: Optional[Iterable[Channel]] = None,
instruction_types: Optional[Iterable[Instruction]] = None,
time_ranges: Optional[Iterable[Tuple[int, int]]] = None,
intervals: Optional[Iterable[Interval]] = None) -> Callable:
"""Returns a boolean-valued function with input type ``(int, ScheduleComponent)`` that
returns ``True`` iff the input satisfies all of the criteria specified by the arguments;
i.e. iff every function in ``filter_funcs`` returns ``True``, the instruction occurs on a
channel type contained in ``channels``, the instruction type is contained in
``instruction_types``, and the period over which the instruction operates is fully
contained in one specified in ``time_ranges`` or ``intervals``.
Args:
filter_funcs: A list of Callables which take a (int, ScheduleComponent) tuple and
return a bool
channels: For example, ``[DriveChannel(0), AcquireChannel(0)]`` or ``DriveChannel(0)``
instruction_types: For example, ``[PulseInstruction, AcquireInstruction]``
or ``DelayInstruction``
time_ranges: For example, ``[(0, 5), (6, 10)]`` or ``(0, 5)``
intervals: For example, ``[Interval(0, 5), Interval(6, 10)]`` or ``Interval(0, 5)``
"""
def if_scalar_cast_to_list(to_list):
try:
iter(to_list)
except TypeError:
to_list = [to_list]
return to_list
def only_channels(channels: Union[Set[Channel], Channel]) -> Callable:
channels = if_scalar_cast_to_list(channels)
def channel_filter(time_inst) -> bool:
"""Filter channel.
Args:
time_inst (Tuple[int, Instruction]): Time
"""
return any([chan in channels for chan in time_inst[1].channels])
return channel_filter
def only_instruction_types(types: Union[Iterable[abc.ABCMeta], abc.ABCMeta]) -> Callable:
types = if_scalar_cast_to_list(types)
def instruction_filter(time_inst) -> bool:
"""Filter instruction.
Args:
time_inst (Tuple[int, Instruction]): Time
"""
return isinstance(time_inst[1], tuple(types))
return instruction_filter
def only_intervals(ranges: Union[Iterable[Interval], Interval]) -> Callable:
ranges = if_scalar_cast_to_list(ranges)
def interval_filter(time_inst) -> bool:
"""Filter interval.
Args:
time_inst (Tuple[int, Instruction]): Time
"""
for i in ranges:
inst_start = time_inst[0]
inst_stop = inst_start + time_inst[1].duration
if i[0] <= inst_start and inst_stop <= i[1]:
return True
return False
return interval_filter
filter_func_list = list(filter_funcs)
if channels is not None:
filter_func_list.append(only_channels(channels))
if instruction_types is not None:
filter_func_list.append(only_instruction_types(instruction_types))
if time_ranges is not None:
filter_func_list.append(only_intervals(time_ranges))
if intervals is not None:
filter_func_list.append(only_intervals(intervals))
# return function returning true iff all filters are passed
return lambda x: all([filter_func(x) for filter_func in filter_func_list])
def _add_timeslots(self,
time: int,
schedule: Union['Schedule', Instruction]) -> None:
"""Update all time tracking within this schedule based on the given schedule.
Args:
time: The time to insert the schedule into self.
schedule: The schedule to insert into self.
Raises:
PulseError: If timeslots overlap or an invalid start time is provided.
"""
if not np.issubdtype(type(time), np.integer):
raise PulseError("Schedule start time must be an integer.")
other_timeslots = _get_timeslots(schedule)
self._duration = max(self._duration, time + schedule.duration)
for channel in schedule.channels:
if channel not in self._timeslots:
if time == 0:
self._timeslots[channel] = copy.copy(other_timeslots[channel])
else:
self._timeslots[channel] = [(i[0] + time, i[1] + time)
for i in other_timeslots[channel]]
continue
for idx, interval in enumerate(other_timeslots[channel]):
if interval[0] + time >= self._timeslots[channel][-1][1]:
# Can append the remaining intervals
self._timeslots[channel].extend(
[(i[0] + time, i[1] + time)
for i in other_timeslots[channel][idx:]])
break
try:
interval = (interval[0] + time, interval[1] + time)
index = _find_insertion_index(self._timeslots[channel], interval)
self._timeslots[channel].insert(index, interval)
except PulseError:
raise PulseError(
"Schedule(name='{new}') cannot be inserted into Schedule(name='{old}') at "
"time {time} because its instruction on channel {ch} scheduled from time "
"{t0} to {tf} overlaps with an existing instruction."
"".format(new=schedule.name or '', old=self.name or '', time=time,
ch=channel, t0=interval[0], tf=interval[1]))
_check_nonnegative_timeslot(self._timeslots)
def _remove_timeslots(self,
time: int,
schedule: Union['Schedule', Instruction]):
"""Delete the timeslots if present for the respective schedule component.
Args:
time: The time to remove the timeslots for the ``schedule`` component.
schedule: The schedule to insert into self.
Raises:
PulseError: If timeslots overlap or an invalid start time is provided.
"""
if not isinstance(time, int):
raise PulseError("Schedule start time must be an integer.")
for channel in schedule.channels:
if channel not in self._timeslots:
raise PulseError(
'The channel {} is not present in the schedule'.format(channel))
channel_timeslots = self._timeslots[channel]
other_timeslots = _get_timeslots(schedule)
for interval in other_timeslots[channel]:
if channel_timeslots:
interval = (interval[0] + time, interval[1] + time)
index = _interval_index(channel_timeslots, interval)
if channel_timeslots[index] == interval:
channel_timeslots.pop(index)
continue
raise PulseError(
"Cannot find interval ({t0}, {tf}) to remove from "
"channel {ch} in Schedule(name='{name}').".format(
ch=channel, t0=interval[0], tf=interval[1], name=schedule.name))
if not channel_timeslots:
self._timeslots.pop(channel)
def _replace_timeslots(self,
time: int,
old: Union['Schedule', Instruction],
new: Union['Schedule', Instruction]):
"""Replace the timeslots of ``old`` if present with the timeslots of ``new``.
Args:
time: The time to remove the timeslots for the ``schedule`` component.
old: Instruction to replace.
new: Instruction to replace with.
"""
self._remove_timeslots(time, old)
self._add_timeslots(time, new)
def replace(self,
old: Union['Schedule', Instruction],
new: Union['Schedule', Instruction],
inplace: bool = False,
) -> 'Schedule':
"""Return a schedule with the ``old`` instruction replaced with a ``new``
instruction.
The replacement matching is based on an instruction equality check.
.. jupyter-kernel:: python3
:id: replace
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
sched = pulse.Schedule()
old = pulse.Play(pulse.Constant(100, 1.0), d0)
new = pulse.Play(pulse.Constant(100, 0.1), d0)
sched += old
sched = sched.replace(old, new)
assert sched == pulse.Schedule(new)
Only matches at the top-level of the schedule tree. If you wish to
perform this replacement over all instructions in the schedule tree.
Flatten the schedule prior to running::
.. jupyter-execute::
sched = pulse.Schedule()
sched += pulse.Schedule(old)
sched = sched.flatten()
sched = sched.replace(old, new)
assert sched == pulse.Schedule(new)
Args:
old: Instruction to replace.
new: Instruction to replace with.
inplace: Replace instruction by mutably modifying this ``Schedule``.
Returns:
The modified schedule with ``old`` replaced by ``new``.
Raises:
PulseError: If the ``Schedule`` after replacements will has a timing overlap.
"""
new_children = []
for time, child in self._children:
if child == old:
new_children.append((time, new))
if inplace:
self._replace_timeslots(time, old, new)
else:
new_children.append((time, child))
if inplace:
self.__children = new_children
self._parameter_table.clear()
for _, child in new_children:
self._update_parameter_table(child)
return self
else:
try:
return Schedule(*new_children)
except PulseError as err:
raise PulseError(
'Replacement of {old} with {new} results in '
'overlapping instructions.'.format(
old=old, new=new)) from err
@property
def parameters(self) -> Set:
"""Parameters which determine the schedule behavior."""
return set(self._parameter_table.keys())
def is_parameterized(self) -> bool:
"""Return True iff the instruction is parameterized."""
return bool(self.parameters)
def assign_parameters(self,
value_dict: Dict[ParameterExpression, ParameterValueType],
) -> 'Schedule':
"""Assign the parameters in this schedule according to the input.
Args:
value_dict: A mapping from Parameters to either numeric values or another
Parameter expression.
Returns:
Schedule with updated parameters (a new one if not inplace, otherwise self).
"""
for parameter in self.parameters:
if parameter not in value_dict:
continue
value = value_dict[parameter]
for inst in self._parameter_table[parameter]:
inst.assign_parameters({parameter: value})
entry = self._parameter_table.pop(parameter)
if isinstance(value, ParameterExpression):
for new_parameter in value.parameters:
if new_parameter in self._parameter_table:
new_entry = set(entry + self._parameter_table[new_parameter])
self._parameter_table[new_parameter] = list(new_entry)
else:
self._parameter_table[new_parameter] = entry
# Update timeslots according to new channel keys
for chan in copy.copy(self._timeslots):
if isinstance(chan.index, ParameterExpression):
chan_timeslots = self._timeslots.pop(chan)
# Find the channel's new assignment
new_channel = chan
for param, value in value_dict.items():
if param in new_channel.parameters:
new_channel = new_channel.assign(param, value)
# Merge with existing channel
if new_channel in self._timeslots:
sched = Schedule()
sched._timeslots = {new_channel: chan_timeslots}
self._add_timeslots(0, sched)
# Or add back under the new name
else:
self._timeslots[new_channel] = chan_timeslots
return self
def get_parameters(self,
parameter_name: str) -> List[Parameter]:
"""Get parameter object bound to this schedule by string name.
Because different ``Parameter`` objects can have the same name,
this method returns a list of ``Parameter`` s for the provided name.
Args:
parameter_name: Name of parameter.
Returns:
Parameter objects that have corresponding name.
"""
return [param for param in self.parameters if param.name == parameter_name]
def _update_parameter_table(self, schedule: 'Schedule'):
"""
Args:
schedule:
"""
schedule = schedule.flatten()
for _, inst in schedule.instructions:
for param in inst.parameters:
self._parameter_table[param].append(inst)
def draw(self,
dt: Any = None, # deprecated
style: Optional[Dict[str, Any]] = None,
filename: Any = None, # deprecated
interp_method: Any = None, # deprecated
scale: Any = None, # deprecated
channel_scales: Any = None, # deprecated
plot_all: Any = None, # deprecated
plot_range: Any = None, # deprecated
interactive: Any = None, # deprecated
table: Any = None, # deprecated
label: Any = None, # deprecated
framechange: Any = None, # deprecated
channels: Any = None, # deprecated
show_framechange_channels: Any = None, # deprecated
draw_title: Any = None, # deprecated
backend=None, # importing backend causes cyclic import
time_range: Optional[Tuple[int, int]] = None,
time_unit: str = 'dt',
disable_channels: Optional[List[Channel]] = None,
show_snapshot: bool = True,
show_framechange: bool = True,
show_waveform_info: bool = True,
show_barrier: bool = True,
plotter: str = 'mpl2d',
axis: Optional[Any] = None):
"""Plot the schedule.
Args:
style: Stylesheet options. This can be dictionary or preset stylesheet classes. See
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXStandard`,
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXSimple`, and
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IQXDebugging` for details of
preset stylesheets.
backend (Optional[BaseBackend]): Backend object to play the input pulse program.
If provided, the plotter may use to make the visualization hardware aware.
time_range: Set horizontal axis limit. Tuple `(tmin, tmax)`.
time_unit: The unit of specified time range either `dt` or `ns`.
The unit of `ns` is available only when `backend` object is provided.
disable_channels: A control property to show specific pulse channel.
Pulse channel instances provided as a list are not shown in the output image.
show_snapshot: Show snapshot instructions.
show_framechange: Show frame change instructions. The frame change represents
instructions that modulate phase or frequency of pulse channels.
show_waveform_info: Show additional information about waveforms such as their name.
show_barrier: Show barrier lines.
plotter: Name of plotter API to generate an output image.
One of following APIs should be specified::
mpl2d: Matplotlib API for 2D image generation.
Matplotlib API to generate 2D image. Charts are placed along y axis with
vertical offset. This API takes matplotlib.axes.Axes as ``axis`` input.
``axis`` and ``style`` kwargs may depend on the plotter.
axis: Arbitrary object passed to the plotter. If this object is provided,
the plotters use a given ``axis`` instead of internally initializing
a figure object. This object format depends on the plotter.
See plotter argument for details.
dt: Deprecated. This argument is used by the legacy pulse drawer.
filename: Deprecated. This argument is used by the legacy pulse drawer.
To save output image, you can call ``.savefig`` method with
returned Matplotlib Figure object.
interp_method: Deprecated. This argument is used by the legacy pulse drawer.
scale: Deprecated. This argument is used by the legacy pulse drawer.
channel_scales: Deprecated. This argument is used by the legacy pulse drawer.
plot_all: Deprecated. This argument is used by the legacy pulse drawer.
plot_range: Deprecated. This argument is used by the legacy pulse drawer.
interactive: Deprecated. This argument is used by the legacy pulse drawer.
table: Deprecated. This argument is used by the legacy pulse drawer.
label: Deprecated. This argument is used by the legacy pulse drawer.
framechange: Deprecated. This argument is used by the legacy pulse drawer.
channels: Deprecated. This argument is used by the legacy pulse drawer.
show_framechange_channels: Deprecated. This argument is used by the legacy pulse drawer.
draw_title: Deprecated. This argument is used by the legacy pulse drawer.
Returns:
Visualization output data.
The returned data type depends on the ``plotter``.
If matplotlib family is specified, this will be a ``matplotlib.pyplot.Figure`` data.
"""
# pylint: disable=cyclic-import, missing-return-type-doc
from qiskit.visualization import pulse_drawer_v2, SchedStyle
legacy_args = {'dt': dt,
'filename': filename,
'interp_method': interp_method,
'scale': scale,
'channel_scales': channel_scales,
'plot_all': plot_all,
'plot_range': plot_range,
'interactive': interactive,
'table': table,
'label': label,
'framechange': framechange,
'channels': channels,
'show_framechange_channels': show_framechange_channels,
'draw_title': draw_title}
active_legacy_args = []
for name, legacy_arg in legacy_args.items():
if legacy_arg is not None:
active_legacy_args.append(name)
if active_legacy_args:
warnings.warn('Legacy pulse drawer is deprecated. '
'Specified arguments {dep_args} are deprecated. '
'Please check the API document of new pulse drawer '
'`qiskit.visualization.pulse_drawer_v2`.'
''.format(dep_args=', '.join(active_legacy_args)),
DeprecationWarning)
if filename:
warnings.warn('File saving is delegated to the plotter software in new drawer. '
'If you specify matplotlib plotter family to `plotter` argument, '
'you can call `savefig` method with the returned Figure object.',
DeprecationWarning)
if isinstance(style, SchedStyle):
style = None
warnings.warn('Legacy stylesheet is specified. This is ignored in the new drawer. '
'Please check the API documentation for this method.')
return pulse_drawer_v2(program=self,
style=style,
backend=backend,
time_range=time_range,
time_unit=time_unit,
disable_channels=disable_channels,
show_snapshot=show_snapshot,
show_framechange=show_framechange,
show_waveform_info=show_waveform_info,
show_barrier=show_barrier,
plotter=plotter,
axis=axis)
def __eq__(self, other: Union['Schedule', Instruction]) -> bool:
"""Test if two ScheduleComponents are equal.
Equality is checked by verifying there is an equal instruction at every time
in ``other`` for every instruction in this ``Schedule``.
.. warning::
This does not check for logical equivalency. Ie.,
```python
>>> (Delay(10)(DriveChannel(0)) + Delay(10)(DriveChannel(0)) ==
Delay(20)(DriveChannel(0)))
False
```
"""
channels = set(self.channels)
other_channels = set(other.channels)
# first check channels are the same
if channels != other_channels:
return False
# then verify same number of instructions in each
instructions = self.instructions
other_instructions = other.instructions
if len(instructions) != len(other_instructions):
return False
# finally check each instruction in `other` is in this schedule
for idx, inst in enumerate(other_instructions):
# check assumes `Schedule.instructions` is sorted consistently
if instructions[idx] != inst:
return False
return True
def __add__(self, other: Union['Schedule', Instruction]) -> 'Schedule':
"""Return a new schedule with ``other`` inserted within ``self`` at ``start_time``."""
return self.append(other)
def __or__(self, other: Union['Schedule', Instruction]) -> 'Schedule':
"""Return a new schedule which is the union of `self` and `other`."""
return self.insert(0, other)
def __lshift__(self, time: int) -> 'Schedule':
"""Return a new schedule which is shifted forward by ``time``."""
return self.shift(time)
def __len__(self) -> int:
"""Return number of instructions in the schedule."""
return len(self.instructions)
def __repr__(self):
name = format(self._name) if self._name else ""
instructions = ", ".join([repr(instr) for instr in self.instructions[:50]])
if len(self.instructions) > 25:
instructions += ", ..."
return 'Schedule({}, name="{}")'.format(instructions, name)
class ParameterizedSchedule:
"""Temporary parameterized schedule class.
This should not be returned to users as it is currently only a helper class.
This class is takes an input command definition that accepts
a set of parameters. Calling ``bind`` on the class will return a ``Schedule``.
"""
def __init__(self, *schedules, parameters: Optional[Dict[str, Union[float, complex]]] = None,
name: Optional[str] = None):
warnings.warn('ParameterizedSchedule is deprecated. Use Schedule with '
'circuit.Parameter objects.', DeprecationWarning)
full_schedules = []
parameterized = []
parameters = parameters or []
self.name = name or ''
# partition schedules into callable and schedules
for schedule in schedules:
if isinstance(schedule, ParameterizedSchedule):
parameterized.append(schedule)
parameters += schedule.parameters
elif callable(schedule):
parameterized.append(schedule)
elif isinstance(schedule, Schedule):
full_schedules.append(schedule)
else:
raise PulseError('Input type: {} not supported'.format(type(schedule)))
self._parameterized = tuple(parameterized)
self._schedules = tuple(full_schedules)
self._parameters = tuple(sorted(set(parameters)))
@property
def parameters(self) -> Tuple[str]:
"""Schedule parameters."""
return self._parameters
def bind_parameters(self,
*args: Union[int, float, complex, ParameterExpression],
**kwargs: Union[int, float, complex, ParameterExpression]) -> Schedule:
"""Generate the Schedule from params to evaluate command expressions"""
bound_schedule = Schedule(name=self.name)
schedules = list(self._schedules)
named_parameters = {}
if args:
for key, val in zip(self.parameters, args):
named_parameters[key] = val
if kwargs:
for key, val in kwargs.items():
if key in self.parameters:
if key not in named_parameters.keys():
named_parameters[key] = val
else:
raise PulseError("%s got multiple values for argument '%s'"
% (self.__class__.__name__, key))
else:
raise PulseError("%s got an unexpected keyword argument '%s'"
% (self.__class__.__name__, key))
for param_sched in self._parameterized:
# recursively call until based callable is reached
if isinstance(param_sched, type(self)):
predefined = param_sched.parameters
else:
# assuming no other parameterized instructions
predefined = self.parameters
sub_params = {k: v for k, v in named_parameters.items()
if k in predefined}
schedules.append(param_sched(**sub_params))
# construct evaluated schedules
for sched in schedules:
if isinstance(sched, tuple):
bound_schedule.insert(sched[0], sched[1])
else:
bound_schedule |= sched
return bound_schedule
def __call__(self, *args: Union[int, float, complex, ParameterExpression],
**kwargs: Union[int, float, complex, ParameterExpression]) -> Schedule:
return self.bind_parameters(*args, **kwargs)
def _interval_index(intervals: List[Interval], interval: Interval) -> int:
"""Find the index of an interval.
Args:
intervals: A sorted list of non-overlapping Intervals.
interval: The interval for which the index into intervals will be found.
Returns:
The index of the interval.
Raises:
PulseError: If the interval does not exist.
"""
index = _locate_interval_index(intervals, interval)
found_interval = intervals[index]
if found_interval != interval:
raise PulseError('The interval: {} does not exist in intervals: {}'.format(
interval, intervals
))
return index
def _locate_interval_index(intervals: List[Interval],
interval: Interval,
index: int = 0) -> int:
"""Using binary search on start times, find an interval.
Args:
intervals: A sorted list of non-overlapping Intervals.
interval: The interval for which the index into intervals will be found.
index: A running tally of the index, for recursion. The user should not pass a value.
Returns:
The index into intervals that new_interval would be inserted to maintain
a sorted list of intervals.
"""
if not intervals or len(intervals) == 1:
return index
mid_idx = len(intervals) // 2
mid = intervals[mid_idx]
if interval[1] <= mid[0] and (interval != mid):
return _locate_interval_index(intervals[:mid_idx], interval, index=index)
else:
return _locate_interval_index(intervals[mid_idx:], interval, index=index + mid_idx)
def _find_insertion_index(intervals: List[Interval], new_interval: Interval) -> int:
"""Using binary search on start times, return the index into `intervals` where the new interval
belongs, or raise an error if the new interval overlaps with any existing ones.
Args:
intervals: A sorted list of non-overlapping Intervals.
new_interval: The interval for which the index into intervals will be found.
Returns:
The index into intervals that new_interval should be inserted to maintain a sorted list
of intervals.
Raises:
PulseError: If new_interval overlaps with the given intervals.
"""
index = _locate_interval_index(intervals, new_interval)
if index < len(intervals):
if _overlaps(intervals[index], new_interval):
raise PulseError("New interval overlaps with existing.")
return index if new_interval[1] <= intervals[index][0] else index + 1
return index
def _overlaps(first: Interval, second: Interval) -> bool:
"""Return True iff first and second overlap.
Note: first.stop may equal second.start, since Interval stop times are exclusive.
"""
if first[0] == second[0] == second[1]:
# They fail to overlap if one of the intervals has duration 0
return False
if first[0] > second[0]:
first, second = second, first
return second[0] < first[1]
def _check_nonnegative_timeslot(timeslots: TimeSlots):
"""Test that a channel has no negative timeslots.
Raises:
PulseError: If a channel timeslot is negative.
"""
for chan, chan_timeslots in timeslots.items():
if chan_timeslots:
if chan_timeslots[0][0] < 0:
raise PulseError(
"An instruction on {} has a negative "
" starting time.".format(chan))
def _get_timeslots(schedule: Union[Instruction, Schedule]) -> TimeSlots:
"""Generate timeslots from given schedule component.
Args:
schedule: Input schedule component.
Raises:
PulseError: When invalid schedule type is specified.
"""
if isinstance(schedule, Instruction):
duration = schedule.duration
instruction_duration_validation(duration)
timeslots = {channel: [(0, duration)] for channel in schedule.channels}
elif isinstance(schedule, Schedule):
timeslots = schedule.timeslots
else:
raise PulseError('Invalid schedule type {} is specified.'.format(type(schedule)))
return timeslots
|
import jwt
from flask import render_template, Flask, request, session, send_file
import secrets
from datetime import datetime
import io
from jwt import PyJWTError
from werkzeug.exceptions import BadRequest
from werkzeug.utils import redirect
import pandas as pd
from microsetta_admin import metadata_util, upload_util
from microsetta_admin.config_manager import SERVER_CONFIG
from microsetta_admin._api import APIRequest
import importlib.resources as pkg_resources
TOKEN_KEY_NAME = 'token'
SEND_EMAIL_CHECKBOX_DEFAULT_NAME = 'send_email'
PUB_KEY = pkg_resources.read_text(
'microsetta_admin',
"authrocket.pubkey")
DUMMY_SELECT_TEXT = '-------'
RECEIVED_TYPE_DROPDOWN = \
[DUMMY_SELECT_TEXT, "Blood (skin prick)", "Saliva", "Stool",
"Sample Type Unclear (Swabs Included)"]
VALID_STATUS = "sample-is-valid"
NO_SOURCE_STATUS = "no-associated-source"
NO_ACCOUNT_STATUS = "no-registered-account"
NO_COLLECTION_INFO_STATUS = "no-collection-info"
INCONSISTENT_SAMPLE_STATUS = "sample-has-inconsistencies"
UNKNOWN_VALIDITY_STATUS = "received-unknown-validity"
STATUS_OPTIONS = [DUMMY_SELECT_TEXT, VALID_STATUS, NO_SOURCE_STATUS,
NO_ACCOUNT_STATUS, NO_COLLECTION_INFO_STATUS,
INCONSISTENT_SAMPLE_STATUS, UNKNOWN_VALIDITY_STATUS]
API_PROJECTS_URL = '/api/admin/projects'
def handle_pyjwt(pyjwt_error):
# PyJWTError (Aka, anything wrong with token) will force user to log out
# and log in again
return redirect('/logout')
def parse_jwt(token):
"""
Raises
------
jwt.PyJWTError
If the token is invalid
"""
decoded = jwt.decode(token, PUB_KEY, algorithms=['RS256'], verify=True)
return decoded
def build_login_variables():
# Anything that renders sitebase.html must pass down these variables to
# jinja2
token_info = None
if TOKEN_KEY_NAME in session:
# If user leaves the page open, the token can expire before the
# session, so if our token goes back we need to force them to login
# again.
token_info = parse_jwt(session[TOKEN_KEY_NAME])
vars = {
'endpoint': SERVER_CONFIG["endpoint"],
'ui_endpoint': SERVER_CONFIG["ui_endpoint"],
'authrocket_url': SERVER_CONFIG["authrocket_url"]
}
if token_info is not None:
vars['email'] = token_info['email']
return vars
def build_app():
# Create the application instance
app = Flask(__name__)
flask_secret = SERVER_CONFIG["FLASK_SECRET_KEY"]
if flask_secret is None:
print("WARNING: FLASK_SECRET_KEY must be set to run with gUnicorn")
flask_secret = secrets.token_urlsafe(16)
app.secret_key = flask_secret
app.config['SESSION_TYPE'] = 'memcached'
app.config['SESSION_COOKIE_NAME'] = 'session-microsetta-admin'
# Set mapping from exception type to response code
app.register_error_handler(PyJWTError, handle_pyjwt)
return app
app = build_app()
@app.context_processor
def utility_processor():
def format_timestamp(timestamp_str):
if not timestamp_str:
return "None"
datetime_obj = datetime.fromisoformat(timestamp_str)
return datetime_obj.strftime("%Y %B %d %H:%M:%S")
return dict(format_timestamp=format_timestamp)
@app.route('/')
def home():
return render_template('sitebase.html', **build_login_variables())
@app.route('/search', methods=['GET'])
def search():
return _search()
@app.route('/search/sample', methods=['GET', 'POST'])
def search_sample():
return _search('samples')
@app.route('/search/kit', methods=['GET', 'POST'])
def search_kit():
return _search('kit')
@app.route('/search/email', methods=['GET', 'POST'])
def search_email():
return _search('account')
def _search(resource=None):
if request.method == 'GET':
return render_template('search.html', **build_login_variables())
elif request.method == 'POST':
query = request.form['search_%s' % resource]
status, result = APIRequest.get(
'/api/admin/search/%s/%s' % (resource, query))
if status == 404:
result = {'error_message': "Query not found"}
return render_template('search_result.html',
**build_login_variables(),
result=result), 200
elif status == 200:
return render_template('search_result.html',
**build_login_variables(),
resource=resource,
result=result), 200
else:
return result
def _translate_nones(a_dict, do_none_to_str):
# Note: this ISN'T a deep copy. This function is NOT set up
# for recursing through a multi-layer dictionary
result = a_dict.copy()
for k, v in result.items():
if do_none_to_str and v is None:
result[k] = ""
elif not do_none_to_str and v == '':
result[k] = None
return result
def _get_projects(include_stats, is_active):
projects_uri = API_PROJECTS_URL + f"?include_stats={include_stats}"
if is_active is not None:
projects_uri += f"&is_active={is_active}"
status, projects_output = APIRequest.get(projects_uri)
if status >= 400:
result = {'error_message': f"Unable to load project list: "
f"{projects_uri}"}
else:
cleaned_projects = [_translate_nones(x, True) for x in
projects_output]
# if we're not using full project stats, sort
# alphabetically by project name
if not include_stats:
cleaned_projects = sorted(cleaned_projects,
key=lambda k: k['project_name'])
result = {'projects': cleaned_projects}
return status, result
@app.route('/manage_projects', methods=['GET', 'POST'])
def manage_projects():
result = None
is_active = request.args.get('is_active', None)
if request.method == 'POST':
model = {x: request.form[x] for x in request.form}
project_id = model.pop('project_id')
model['is_microsetta'] = model.get('is_microsetta', '') == 'true'
model['bank_samples'] = model.get('bank_samples', '') == 'true'
model = _translate_nones(model, False)
if project_id.isdigit():
# update (put) an existing project
action = "update"
status, api_output = APIRequest.put(
'{}/{}'.format(API_PROJECTS_URL, project_id),
json=model)
else:
# create (post) a new project
action = "create"
status, api_output = APIRequest.post(
API_PROJECTS_URL, json=model)
# if api post or put failed
if status >= 400:
result = {'error_message': f'Unable to {action} project.'}
# end if post
# if the above work (if any) didn't produce an error message, return
# the projects list
if result is None:
_, result = _get_projects(include_stats=True, is_active=is_active)
return render_template('manage_projects.html',
**build_login_variables(),
result=result), 200
@app.route('/email_stats', methods=['GET', 'POST'])
def email_stats():
_, result = _get_projects(include_stats=False, is_active=True)
projects = result.get('projects')
if request.method == 'GET':
project = request.args.get('project', None)
email = request.args.get('email')
if email is None:
# They want to search for emails, show them the search dialog
return render_template("email_stats_pulldown.html",
**build_login_variables(),
resource=None,
search_error=None,
projects=projects)
emails = [email, ]
elif request.method == 'POST':
project = request.form.get('project', None)
emails, upload_err = upload_util.parse_request_csv_col(
request,
'file',
'email'
)
if upload_err is not None:
return render_template('email_stats_pulldown.html',
**build_login_variables(),
resource=None,
search_error=[{'error': upload_err}],
projects=projects)
else:
raise BadRequest()
if project == "":
project = None
# de-duplicate
emails = list({e.lower() for e in emails})
status, result = APIRequest.post(
'/api/admin/account_email_summary',
json={
"emails": emails,
"project": project
})
if status != 200:
return render_template('email_stats_pulldown.html',
search_error=[{'error': result}],
resource=None,
**build_login_variables(),
projects=projects)
# At a minimum, our table will display these columns.
# We may show additional info depending on what comes back from the request
base_data_template = {
'email': 'XXX',
'summary': 'XXX',
'account_id': 'XXX',
'creation_time': 'XXX',
'kit_name': 'XXX',
'project': 'XXX',
'unclaimed-samples-in-kit': 0,
'never-scanned': 0,
'sample-is-valid': 0,
'no-associated-source': 0,
'no-registered-account': 0,
'no-collection-info': 0,
'sample-has-inconsistencies': 0,
'received-unknown-validity': 0
}
df = pd.DataFrame([base_data_template] + result)
df = df.drop(0) # remove the template row
numeric_cols = [
"unclaimed-samples-in-kit", "never-scanned", "sample-is-valid",
"no-associated-source", "no-registered-account", "no-collection-info",
"sample-has-inconsistencies", "received-unknown-validity"
]
df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric)
df[numeric_cols] = df[numeric_cols].fillna(0)
def urlify_account_id(id_):
if pd.isnull(id_):
return "No associated account"
else:
ui_endpoint = SERVER_CONFIG['ui_endpoint']
account_url = f"{ui_endpoint}/accounts/{id_}"
return f'<a target="_blank" href="{account_url}">{id_}</a>'
# see https://stackoverflow.com/questions/20035518/insert-a-link-inside-a-pandas-table # noqa
df['account_id'] = df["account_id"].apply(urlify_account_id)
return render_template("email_stats_pulldown.html",
search_error=None,
resource=df,
**build_login_variables(),
projects=projects)
@app.route('/per_sample_summary', methods=['GET', 'POST'])
def per_sample_summary():
# get a list of all projects in the system
_, result = _get_projects(include_stats=False, is_active=True)
projects = result.get('projects')
# filter out any projects that don't belong to Microsetta
projects = [x for x in projects if x['is_microsetta'] is True]
# build a list of dictionaries with just the project id and the project
# name.
projects = [{'project_name': x['project_name'],
'project_id': x['project_id']} for x in projects]
# determine if user wants sample ids stripped
strip_sampleid = request.form.get('strip_sampleid', 'off')
strip_sampleid = strip_sampleid.lower() == 'on'
if request.method == 'GET':
# If user arrived via GET then they are either here w/out
# querying and they simply need the default webpage, or they are
# querying with either a list of barcodes, or with a project id.
# look for both parameters to determine which state we are in.
sample_barcode = request.args.get('sample_barcode')
project_id = request.args.get('project_id')
if sample_barcode is None and project_id is None:
# user just wants the default page.
return render_template('per_sample_summary.html',
resource=None,
projects=projects,
**build_login_variables())
if project_id is not None:
# user wants to get summaries on all samples in a project.
payload = {'project_id': project_id}
status, result = APIRequest.post('/api/admin/account_barcode_summa'
'ry?strip_sampleid=False',
json=payload)
if status == 200:
if result['partial_result'] is True:
unprocessed_barcodes = result['unprocessed_barcodes']
else:
unprocessed_barcodes = None
resource = pd.DataFrame(result['samples'])
order = ['sampleid', 'project', 'account-email',
'source-email', 'source-type', 'site-sampled',
'sample-status', 'sample-received', 'ffq-taken',
'ffq-complete', 'vioscreen_username']
order.extend(sorted(set(resource.columns) - set(order)))
resource = resource[order]
if unprocessed_barcodes:
return render_template('per_sample_summary.html',
resource=resource,
projects=projects,
error_message="Too many barcodes. S"
"erver processed only"
" the first 1000.",
**build_login_variables())
else:
return render_template('per_sample_summary.html',
resource=resource,
projects=projects,
**build_login_variables())
else:
return render_template('per_sample_summary.html',
resource=None,
projects=projects,
error_message=result,
**build_login_variables())
# if we are here then the user is querying using barcodes and we
# simply need to set up the query below to perform.
sample_barcodes = [sample_barcode, ]
else:
# assume POST, since there are only two methods defined in route.
# if we are here, it is because the user is querying using an uploaded
# file containing sample names.
sample_barcodes, err = upload_util.parse_request_csv_col(request,
'file',
'sample_name')
if err is not None:
# there was an error. abort early.
return render_template('per_sample_summary.html',
resource=None,
projects=projects,
**build_login_variables(),
search_error=[{'error': err}])
# perform the main query.
payload = {'sample_barcodes': sample_barcodes}
status, result = APIRequest.post('/api/admin/account_barcode_summary?stri'
'p_sampleid=%s' % str(strip_sampleid),
json=payload)
if status == 200:
if result['partial_result'] is True:
unprocessed_barcodes = result['unprocessed_barcodes']
else:
unprocessed_barcodes = None
resource = pd.DataFrame(result['samples'])
order = ['sampleid', 'project', 'account-email', 'source-email',
'source-type', 'site-sampled', 'sample-status',
'sample-received', 'ffq-taken', 'ffq-complete',
'vioscreen_username']
order.extend(sorted(set(resource.columns) - set(order)))
resource = resource[order]
if unprocessed_barcodes:
return render_template('per_sample_summary.html',
resource=resource,
projects=projects,
error_message="Too many barcodes. S"
"erver processed only"
" the first 1000.",
**build_login_variables())
else:
return render_template('per_sample_summary.html',
resource=resource,
projects=projects,
**build_login_variables())
else:
return render_template('per_sample_summary.html',
resource=None,
projects=projects,
error_message=result,
**build_login_variables())
def _get_by_sample_barcode(sample_barcodes, strip_sampleid, projects):
payload = {'sample_barcodes': sample_barcodes}
status, result = APIRequest.post('/api/admin/account_barcode_summary?'
'strip_sampleid=%s' % str(strip_sampleid),
json=payload)
if status == 200:
if result['partial_result'] is True:
unprocessed_barcodes = result['unprocessed_barcodes']
else:
unprocessed_barcodes = None
resource = pd.DataFrame(result['samples'])
order = ['sampleid', 'project', 'account-email', 'source-email',
'source-type', 'site-sampled', 'sample-status',
'sample-received', 'ffq-taken', 'ffq-complete',
'vioscreen_username']
order.extend(sorted(set(resource.columns) - set(order)))
resource = resource[order]
if unprocessed_barcodes:
return render_template('per_sample_summary.html',
resource=resource,
projects=projects,
error_message="Too many barcodes. S"
"erver processed only"
" the first 1000.",
**build_login_variables())
else:
return render_template('per_sample_summary.html',
resource=resource,
projects=projects,
**build_login_variables())
else:
return render_template('per_sample_summary.html',
resource=None,
projects=projects,
error_message=result,
**build_login_variables())
@app.route('/create_kits', methods=['GET', 'POST'])
def new_kits():
_, result = _get_projects(include_stats=False, is_active=True)
projects = result.get('projects')
if request.method == 'GET':
return render_template('create_kits.html',
error_message=result.get('error_message'),
projects=projects,
**build_login_variables())
elif request.method == 'POST':
num_kits = int(request.form['num_kits'])
num_samples = int(request.form['num_samples'])
prefix = request.form['prefix']
selected_project_ids = request.form.getlist('project_ids')
payload = {'number_of_kits': num_kits,
'number_of_samples': num_samples,
'project_ids': selected_project_ids}
if prefix:
payload['kit_id_prefix'] = prefix
status, result = APIRequest.post(
'/api/admin/create/kits',
json=payload)
if status != 201:
return render_template('create_kits.html',
error_message='Failed to create kits',
projects=projects,
**build_login_variables())
# StringIO/BytesIO based off https://stackoverflow.com/a/45111660
buf = io.StringIO()
payload = io.BytesIO()
# explicitly expand out the barcode detail
kits = pd.DataFrame(result['created'])
for i in range(num_samples):
kits['barcode_%d' % (i+1)] = [r['sample_barcodes'][i]
for _, r in kits.iterrows()]
kits.drop(columns='sample_barcodes', inplace=True)
kits.to_csv(buf, sep=',', index=False, header=True)
payload.write(buf.getvalue().encode('utf-8'))
payload.seek(0)
buf.close()
stamp = datetime.now().strftime('%d%b%Y-%H%M')
fname = f'kits-{stamp}.csv'
return send_file(payload, as_attachment=True,
attachment_filename=fname,
mimetype='text/csv')
def _check_sample_status(extended_barcode_info):
warning = None
in_microsetta_project = any(
[x['is_microsetta'] for x in extended_barcode_info['projects_info']])
# one warning to rule them all; check in order of precendence
if not in_microsetta_project:
warning = UNKNOWN_VALIDITY_STATUS
elif extended_barcode_info['account'] is None:
warning = NO_ACCOUNT_STATUS
elif extended_barcode_info['source'] is None:
warning = NO_SOURCE_STATUS
# collection datetime is used as the bellwether for the whole
# set of sample collection info because it is relevant to all
# kinds of samples (whereas previously used field, sample site, is not
# filled when environmental samples are returned).
elif extended_barcode_info['sample'].get('datetime_collected') is None:
warning = NO_COLLECTION_INFO_STATUS
return warning
# Set up handlers for the cases,
# GET to view the page,
# POST to update info for a barcode -AND (possibly)-
# email end user about the change in sample status,
def _scan_get(sample_barcode, update_error):
# If there is no sample_barcode in the GET
# they still need to enter one in the box, so show empty page
if sample_barcode is None:
return render_template('scan.html', **build_login_variables())
# Assuming there is a sample barcode, grab that sample's information
status, result = APIRequest.get(
'/api/admin/search/samples/%s' % sample_barcode)
# If we successfully grab it, show the page to the user
if status == 200:
# Process result in python because its easier than jinja2.
status_warning = _check_sample_status(result)
# check the latest scan to find the default sample_status for form
latest_status = DUMMY_SELECT_TEXT
if result['latest_scan']:
latest_status = result['latest_scan']['sample_status']
account = result.get('account')
events = []
if account:
event_status, event_result = APIRequest.get(
'/api/admin/events/accounts/%s' % account['id']
)
if event_status != 200:
raise Exception("Couldn't pull event history")
events = event_result
return render_template(
'scan.html',
**build_login_variables(),
barcode_info=result["barcode_info"],
projects_info=result['projects_info'],
scans_info=result['scans_info'],
latest_status=latest_status,
dummy_status=DUMMY_SELECT_TEXT,
status_options=STATUS_OPTIONS,
send_email=session.get(SEND_EMAIL_CHECKBOX_DEFAULT_NAME, True),
sample_info=result['sample'],
extended_info=result,
status_warning=status_warning,
update_error=update_error,
received_type_dropdown=RECEIVED_TYPE_DROPDOWN,
source=result['source'],
events=events
)
elif status == 401:
# If we fail due to unauthorized, need the user to log in again
return redirect('/logout')
elif status == 404:
# If we fail due to not found, need to tell the user to pick a diff
# barcode
return render_template(
'scan.html',
**build_login_variables(),
search_error="Barcode %s Not Found" % sample_barcode,
update_error=update_error,
received_type_dropdown=RECEIVED_TYPE_DROPDOWN
)
else:
raise BadRequest()
def _scan_post_update_info(sample_barcode,
technician_notes,
sample_status,
action,
issue_type,
template,
received_type,
recorded_type):
###
# Bugfix Part 1 for duplicate emails being sent. Theory is that client is
# out of sync due to hitting back button after a scan has changed
# state.
# Can't test if client is up to date without ETags, so for right now,
# we just validate whether or not they should send an email, duplicating
# the client log. (This can still break with multiple admin clients,
# but that is unlikely at the moment.)
latest_status = None
# TODO: Replace this with ETags!
status, result = APIRequest.get(
'/api/admin/search/samples/%s' % sample_barcode)
if result['latest_scan']:
latest_status = result['latest_scan']['sample_status']
###
# Do the actual update
status, response = APIRequest.post(
'/api/admin/scan/%s' % sample_barcode,
json={
"sample_status": sample_status,
"technician_notes": technician_notes
}
)
# if the update failed, keep track of the error so it can be displayed
if status != 201:
update_error = response
return _scan_get(sample_barcode, update_error)
else:
update_error = None
# If we're not supposed to send an email, go back to GET
if action != "send_email":
return _scan_get(sample_barcode, update_error)
###
# Bugfix Part 2 for duplicate emails being sent.
if sample_status == latest_status:
# This is what we'll hit if javascript thinks it's updating status
# but is out of sync with the database.
update_error = "Ignoring Send Email, sample_status would " \
"not have been updated (Displayed page was out of " \
"sync)"
return _scan_get(sample_barcode, update_error)
###
# This is what we'll hit if there are no email templates to send for
# the new sample status (or if we screw up javascript side :D )
if template is None:
update_error = "Cannot Send Email: No Issue Type Specified " \
"(or no issue types available)"
return _scan_get(sample_barcode, update_error)
# Otherwise, send out an email to the end user
status, response = APIRequest.post(
'/api/admin/email',
json={
"issue_type": issue_type,
"template": template,
"template_args": {
"sample_barcode": sample_barcode,
"recorded_type": recorded_type,
"received_type": received_type
}
}
)
# if the email failed to send, keep track of the error
# so it can be displayed
if status != 200:
update_error = response
else:
update_error = None
return _scan_get(sample_barcode, update_error)
@app.route('/scan', methods=['GET', 'POST'])
def scan():
# Now that the handlers are set up, parse the request to determine what
# to do.
# If its a get, grab the sample_barcode from the query string rather than
# form parameters
if request.method == 'GET':
sample_barcode = request.args.get('sample_barcode')
return _scan_get(sample_barcode, None)
# If its a post, make the changes, then refresh the page
if request.method == 'POST':
# Without some extra ajax, we can't persist the send_email checkbox
# until they actually post the form
send_email = request.form.get('send_email', False)
session[SEND_EMAIL_CHECKBOX_DEFAULT_NAME] = send_email
sample_barcode = request.form['sample_barcode']
technician_notes = request.form['technician_notes']
sample_status = request.form['sample_status']
action = request.form.get('action')
issue_type = request.form.get('issue_type')
template = request.form.get('template')
received_type = request.form.get('received_type')
recorded_type = request.form.get('recorded_type')
return _scan_post_update_info(sample_barcode,
technician_notes,
sample_status,
action,
issue_type,
template,
received_type,
recorded_type)
@app.route('/metadata_pulldown', methods=['GET', 'POST'])
def metadata_pulldown():
allow_missing = request.form.get('allow_missing_samples', False)
if request.method == 'GET':
sample_barcode = request.args.get('sample_barcode')
# If there is no sample_barcode in the GET
# they still need to enter one in the box, so show empty page
if sample_barcode is None:
return render_template('metadata_pulldown.html',
**build_login_variables())
sample_barcodes = [sample_barcode]
elif request.method == 'POST':
sample_barcodes, upload_err = upload_util.parse_request_csv_col(
request,
'file',
'sample_name'
)
if upload_err is not None:
return render_template('metadata_pulldown.html',
**build_login_variables(),
search_error=[{'error': upload_err}])
else:
raise BadRequest()
df, errors = metadata_util.retrieve_metadata(sample_barcodes)
# Strangely, these api requests are returning an html error page rather
# than a machine parseable json error response object with message.
# This is almost certainly due to error handling for the cohosted minimal
# client. In future, we should just pass down whatever the api says here.
if len(errors) == 0 or allow_missing:
df = metadata_util.drop_private_columns(df)
# TODO: Streaming direct from pandas is a pain. Need to search for
# better ways to iterate and chunk this file as we generate it
strstream = io.StringIO()
df.to_csv(strstream, sep='\t', index=True, header=True)
# TODO: utf-8 or utf-16 encoding??
bytestream = io.BytesIO()
bytestream.write(strstream.getvalue().encode('utf-8'))
bytestream.seek(0)
strstream.close()
return send_file(bytestream,
mimetype="text/tab-separated-values",
as_attachment=True,
attachment_filename="metadata_pulldown.tsv",
add_etags=False,
cache_timeout=None,
conditional=False,
last_modified=None,
)
else:
return render_template('metadata_pulldown.html',
**build_login_variables(),
info={'barcodes': sample_barcodes},
search_error=errors)
@app.route('/submit_daklapack_order', methods=['GET'])
def submit_daklapack_order():
error_msg_key = "error_message"
def return_error(msg):
return render_template('submit_daklapack_order.html',
**build_login_variables(),
error_message=msg)
status, dak_articles_output = APIRequest.get(
'/api/admin/daklapack_articles')
if status >= 400:
return return_error("Unable to load daklapack articles list.")
status, projects_output = _get_projects(include_stats=False,
is_active=True)
if status >= 400:
return return_error(projects_output[error_msg_key])
return render_template('submit_daklapack_order.html',
**build_login_variables(),
error_message=None,
dummy_status=DUMMY_SELECT_TEXT,
dak_articles=dak_articles_output,
contact_phone_number=SERVER_CONFIG[
"order_contact_phone"],
projects=projects_output['projects'])
@app.route('/submit_daklapack_order', methods=['POST'])
def post_submit_daklapack_order():
def return_error(msg):
return render_template('submit_daklapack_order.html',
**build_login_variables(),
error_message=msg)
error_message = success_submissions = failure_submissions = headers = None
expected_headers = ["firstName", "lastName", "address1", "insertion",
"address2", "postalCode", "city", "state",
"country", "countryCode"]
# get required fields; cast where expected by api
phone_number = request.form['contact_phone_number']
project_ids_list = list(map(int, request.form.getlist('projects')))
dak_article_code = request.form['dak_article_code']
article_quantity = int(request.form['quantity'])
file = request.files['addresses_file']
# get optional fields or defaults
planned_send_str = request.form.get('planned_send_date')
planned_send_date = planned_send_str if planned_send_str else None
description = request.form.get('description')
fedex_ref_1 = request.form.get('fedex_ref_1')
fedex_ref_2 = request.form.get('fedex_ref_2')
fedex_ref_3 = request.form.get('fedex_ref_3')
try:
# NB: import everything as a string so that zip codes beginning with
# zero (e.g., 06710) don't get silently cast to numbers
if file.filename.endswith('xls'):
addresses_df = pd.read_excel(file, dtype=str)
elif file.filename.endswith('xlsx'):
addresses_df = pd.read_excel(file, engine='openpyxl', dtype=str)
else:
raise ValueError(f"Unrecognized extension on putative excel "
f"filename: {file.filename}")
headers = list(addresses_df.columns)
except Exception as e: # noqa
return return_error('Could not parse addresses file')
if headers != expected_headers:
return return_error(f"Received column names {headers} do "
f"not match expected column names"
f" {expected_headers}")
# add (same) contact phone number to every address
addresses_df['phone'] = phone_number
addresses_df = addresses_df.fillna("")
temp_dict = addresses_df.to_dict(orient='index')
addresses_list = [temp_dict[n] for n in range(len(temp_dict))]
status, post_output = APIRequest.post(
'/api/admin/daklapack_orders',
json={
"project_ids": project_ids_list,
"article_code": dak_article_code,
"quantity": article_quantity,
"addresses": addresses_list,
"planned_send_date": planned_send_date,
"description": description,
"fedex_ref_1": fedex_ref_1,
"fedex_ref_2": fedex_ref_2,
"fedex_ref_3": fedex_ref_3
}
)
# if the post failed, keep track of the error so it can be displayed
if status != 200:
error_message = post_output
else:
order_submissions = post_output["order_submissions"]
success_submissions = [x for x in order_submissions if
x["order_success"]]
failure_submissions = [x for x in order_submissions if not
x["order_success"]]
return render_template('submit_daklapack_order.html',
**build_login_variables(),
error_message=error_message,
success_submissions=success_submissions,
failure_submissions=failure_submissions)
@app.route('/authrocket_callback')
def authrocket_callback():
token = request.args.get('token')
session[TOKEN_KEY_NAME] = token
return redirect("/")
@app.route('/logout')
def logout():
if TOKEN_KEY_NAME in session:
del session[TOKEN_KEY_NAME]
return redirect("/")
# If we're running in stand alone mode, run the application
if __name__ == '__main__':
if SERVER_CONFIG["ssl_cert_path"] and SERVER_CONFIG["ssl_key_path"]:
ssl_context = (
SERVER_CONFIG["ssl_cert_path"], SERVER_CONFIG["ssl_key_path"]
)
else:
ssl_context = None
app.run(
port=SERVER_CONFIG['port'],
debug=SERVER_CONFIG['debug'],
ssl_context=ssl_context
)
|
from bz2 import BZ2File
from collections import Counter, Sequence, Iterable, \
Mapping
from functools import partial
import gc
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email import encoders
from inspect import signature, getattr_static, ismethod, getmembers, getmodule
from itertools import chain
import json
from multiprocessing import Pool
import os
from pathlib import Path
import pickle
from random import choice
import re
import smtplib
from subprocess import run, check_output
import sys
import time
from tqdm.auto import tqdm
import wordninja as wn
from htools.config import get_credentials, get_default_user
class InvalidArgumentError(Exception):
pass
def hdir(obj, magics=False, internals=False):
"""Print object methods and attributes, by default excluding magic methods.
Parameters
-----------
obj: any type
The object to print methods and attributes for.
magics: bool
Specifies whether to include magic methods (e.g. __name__, __hash__).
Default False.
internals: bool
Specifies whether to include internal methods (e.g. _dfs, _name).
Default False.
Returns
--------
dict
Keys are method/attribute names, values are strings specifying whether
the corresponding key is a 'method' or an 'attr'.
"""
output = dict()
for attr in dir(obj):
# Exclude magics or internals if specified.
if (not magics and attr.startswith('__')) or \
(not internals and re.match('_[^_]', attr)):
continue
# Handle rare case where attr can't be invoked (e.g. df.sparse on a
# non-sparse Pandas dataframe).
try:
is_method = callable(getattr(obj, attr))
except Exception:
continue
# Update output to specify whether attr is callable.
if is_method:
output[attr] = 'method'
else:
output[attr] = 'attribute'
return output
def tdir(obj, **kwargs):
"""A variation of the built in `dir` function that shows the
attribute names as well as their types. Methods are excluded as they can
change the object's state.
Parameters
----------
obj: any type
The object to examine.
kwargs: bool
Additional arguments to be passed to hdir. Options are `magics` and
`internals`. See hdir documentation for more information.
Returns
-------
dict[str, type]: Dictionary mapping the name of the object's attributes to
the corresponding types of those attributes.
"""
return {k: type(getattr(obj, k))
for k, v in hdir(obj, **kwargs).items() if v == 'attribute'}
def hasarg(func, arg):
"""Checks if a function has a given argument. Works with args and kwargs as
well if you exclude the stars. See example below.
Parameters
----------
func: function
arg: str
Name of argument to look for.
Returns
-------
bool
Example
-------
def foo(a, b=6, *args):
return
>>> hasarg(foo, 'b')
True
>>> hasarg(foo, 'args')
True
>>> hasarg(foo, 'c')
False
"""
return arg in signature(func).parameters
def quickmail(subject, message, to_email, from_email=None, img_path=None,
img_name=None, verbose=True, password=None):
"""Send an email.
Parameters
-----------
from_email: str
Gmail address being used to send email.
to_email: str
Recipient's email.
subject: str
Subject line of email.
message: str
Body of email.
Returns
--------
None
"""
# Load email username. Error handling takes place in config functions.
from_email = from_email or get_default_user()
if not from_email: return None
# Load email password.
password = password or get_credentials(from_email)
if not password: return None
# Create message and add text if specified.
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = to_email
if message: msg.attach(MIMEText(message))
# Load and attach image.
if img_path:
with open(img_path, 'rb') as f:
img = MIMEImage(f.read(),
name=img_name or os.path.basename(img_path))
encoders.encode_base64(img)
msg.attach(img)
# Access server and send email.
server = smtplib.SMTP(host='smtp.gmail.com', port=587)
server.starttls()
server.login(user=from_email, password=password)
server.sendmail(from_email, to_email, msg.as_string())
if verbose: print(f'Email sent to {to_email}.')
def hsplit(text, sep, group=True, attach=True):
"""Flexible string splitting that retains the delimiter rather, unlike
the built-in str.split() method.
NOTE: I recently observed behavior suggesting separators with special
characters (e.g. "\n") may not work as expected for some settings. It
should work when group=True and attach=True though since I rewrote that
with new logic without the re module.
Parameters
-----------
text: str
The input text to be split.
sep: str
The delimiter to be split on.
group: bool
Specifies whether to group consecutive delimiters together (True),
or to separate them (False).
attach: bool
Specifies whether to attach the delimiter to the string that preceeds
it (True), or to detach it so it appears in the output list as its own
item (False).
Returns
--------
list[str]
Examples
---------
text = "Score -- Giants win 6-5"
sep = '-'
# Case 0.1: Delimiters are grouped together and attached to the preceding
word.
>> hsplit(text, sep, group=True, attach=True)
>> ['Score --', ' Giants win 6-', '5']
# Case 0.2: Delimiters are grouped together but are detached from the
preceding word, instead appearing as their own item in the output list.
>> hsplit(text, sep, group=True, attach=False)
>> ['Score ', '--', ' Giants win 6', '-', '5']
Case 1.1: Delimiters are retained and attached to the preceding string.
If the delimiter occurs multiple times consecutively, only the first
occurrence is attached, and the rest appear as individual items in the
output list.
>> hsplit(text, sep, group=False, attach=True)
>> ['Score -', '-', ' Giants win 6-', '5']
# Case 1.2: Delimiters are retained but are detached from the preceding
string. Each instance appears as its own item in the output list.
>> hsplit(text, sep, group=False, attach=False)
>> ['Score ', '-', '-', ' Giants win 6', '-', '5']
"""
sep_re = re.escape(sep)
regex = f'[^{sep_re}]*{sep_re}*'
##########################################################################
# Case 0: Consecutive delimiters are grouped together.
##########################################################################
if group:
# Subcase 0.1
if attach:
return _grouped_split(text, sep)
# Subcase 0.2
else:
return [word for word in re.split(f'({sep_re}+)', text) if word]
##########################################################################
# Case 1: Consecutive delimiters are NOT grouped together.
##########################################################################
words = text.split(sep)
# Subcase 1.1
if attach:
return [word for word in re.findall(regex[:-1]+'?', text) if word]
# Subcase 1.2
return [word for word in chain(*zip(words, [sep]*len(words))) if word][:-1]
def _grouped_split(text, sep):
"""Hsplit helper for case where group=True and attach=True (see hsplit
docs). Old re.find() method didn't work right when sep had special
characters (e.g. "\n").
"""
res = []
toks = text.split(sep)
max_idx = len(toks) - 1
for i, tok in enumerate(toks):
if tok:
if i < max_idx: tok += sep
res.append(tok)
elif i < max_idx:
if res:
res[-1] += sep
else:
res.append(sep)
return res
def rmvars(*args):
"""Wrapper to quickly free up memory by deleting global variables. Htools
3.0 does not provide a way to do this for local variables.
Parameters
----------
args: str
One or more variable names to delete. Do not pass in the variable
itself.
Returns
-------
None
"""
for arg in args:
del globals()[arg]
gc.collect()
def print_object_sizes(space, limit=None, exclude_underscore=True):
"""Print the object names and sizes of the currently defined objects.
Parameters
-----------
space: dict
locals(), globals(), or vars()
limit: int or None
Optionally limit the number of objects displayed (default None for no
limit).
exclude_underscore: bool
Determine whether to exclude objects whose names start with an
underscore (default True).
"""
var_size = [(var, sys.getsizeof(obj)) for var, obj in space.items()]
for var, size in sorted(var_size, key=lambda x: -x[1])[:limit]:
if not var.startswith('_') or not exclude_underscore:
print(var, size)
def eprint(arr, indent=2, spacing=1):
"""Enumerated print. Prints an iterable with one item per line accompanied
by a number specifying its index in the iterable.
Parameters
-----------
arr: iterable
The object to be iterated over.
indent: int
Width to assign to column of integer indices. Default is 2, meaning
columns will line up as long as <100 items are being printed, which is
the expected use case.
spacing: int
Line spacing. Default of 1 will print each item on a new line with no
blank lines in between. Spacing of 2 will double space output, and so
on for larger values.
Returns
--------
None
"""
for i, x in enumerate(arr):
print(f'{i:>{indent}}: {x}', end='\n'*spacing)
def _read_write_args(path, mode):
"""Helper for `save` and `load` functions.
Parameters
----------
path: str
Path to read/write object from/to.
mode: str
'w' for writing files (as in `save`), 'r' for reading files
(as in `load`).
Returns
-------
tuple: Function to open file, mode to open file with (str), object to open
file with.
"""
ext = path.rpartition('.')[-1]
if ext not in {'json', 'pkl', 'zip'}:
raise InvalidArgumentError(
'Invalid extension. Make sure your filename ends with '
'.json, .pkl, or .zip.'
)
# Store in dict to make it easier to add additional formats in future.
ext2data = {
'json': (open, '', json),
'pkl': (open, 'b', pickle),
'zip': (BZ2File, '', pickle),
}
opener, mode_suffix, saver = ext2data[ext]
return opener, mode + mode_suffix, saver
def save(obj, path, mode_pre='w', verbose=True):
"""Wrapper to save data as text, pickle (optionally zipped), or json.
Parameters
-----------
obj: any
Object to save. This will be pickled/jsonified/zipped inside the
function - do not convert it before-hand.
path: str
File name to save object to. Should end with .txt, .sh, md, .pkl, .zip,
or .json depending on desired output format. If .zip is used, object
will be zipped and then pickled. (.sh and .md will be treated
identically to .txt.)
mode_pre: str
Determines whether to write or append text. One of ('w', 'a').
verbose: bool
If True, print a message confirming that the data was pickled, along
with its path.
Returns
-------
None
"""
path = Path(path)
os.makedirs(path.parent, exist_ok=True)
if verbose: print(f'Writing data to {path}.')
if path.suffix[1:] in ('txt', 'sh', 'md', 'py'):
with path.open(mode_pre) as f:
f.write(obj)
else:
opener, mode, saver = _read_write_args(str(path), mode_pre)
with opener(path, mode) as f:
saver.dump(obj, f)
def load(path, verbose=True):
"""Wrapper to load text files or pickled (optionally zipped) or json data.
Parameters
----------
path : str
File to load. File type will be inferred from extension. Must be one of
'.txt', '.sh', 'md', '.json', '.pkl', or '.zip'.
verbose : bool, optional
If True, will print message stating where object was loaded from.
Returns
-------
object: The Python object that was pickled to the specified file.
"""
path = Path(path)
if path.suffix[1:] in ('txt', 'sh', 'md', 'py'):
return path.read_text()
opener, mode, saver = _read_write_args(str(path), 'r')
with opener(path, mode) as f:
data = saver.load(f)
if verbose: print(f'Object loaded from {path}.')
return data
def dict_sum(*args):
"""Given two or more dictionaries with numeric values, combine them into a
single dictionary. For keys that appear in multiple dictionaries, their
corresponding values are added to produce the new value.
This differs from combining two dictionaries in the following manner:
{**d1, **d2}
The method shown above will combine the keys but will retain the value
from d2, rather than adding the values from d1 and d2.
Parameters
-----------
*args: dicts
2 or more dictionaries with numeric values.
Returns
--------
dict: Contains all keys which appear in any of the dictionaries that are
passed in. The corresponding values from each dictionary containing a
given key are summed to produce the new value.
Examples
---------
>>> d1 = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = {'a': 10, 'c': -20, 'd': 30}
>>> d3 = {'c': 10, 'd': 5, 'e': 0}
>>> dict_sum(d1, d2)
{'a': 11, 'b': 2, 'c': -7, 'd': 35, 'e': 0}
"""
keys = {key for d in args for key in d.keys()}
return {key: sum(d.get(key, 0) for d in args)
for key in keys}
def _select_mapping(items, keep=(), drop=()):
"""Helper function for `select`.
Parameters
----------
items: Mapping
Dict (or similar mapping) to select/drop from.
keep: Iterable[str]
Sequence of keys to keep.
drop: Iterable[str]
Sequence of keys to drop. You should specify either `keep` or `drop`,
not both.
Returns
-------
Dict
"""
if keep:
return {k: items[k] for k in keep}
return {k: v for k, v in items.items() if k not in set(drop)}
def _select_sequence(items, keep=(), drop=()):
"""Helper function for `select` that works on sequences (basically
collections that support enumeration).
Parameters
----------
items: Sequence
List, tuple, or iterable sequence of some sort to select items from.
keep: Iterable[str]
Sequence of indices to keep.
drop: Iterable[str]
Sequence of indices to drop. You should specify either `keep` or
`drop`, not both.
Returns
-------
Same type as `items` (usually a list or tuple).
"""
type_ = type(items)
if keep:
return type_(x for i, x in enumerate(items) if i in set(keep))
return type_(x for i, x in enumerate(items) if i not in set(drop))
def select(items, keep=(), drop=()):
"""Select a subset of a data structure. When used on a mapping (e.g. dict),
you can specify a list of keys to include or exclude. When used on a
sequence like a list or tuple, specify indices instead of keys.
Parameters
----------
items: abc.Sequence or abc.Mapping
The dictionary to select items from.
keep: Iterable[str]
Sequence of keys to keep.
drop: Iterable[str]
Sequence of keys to drop. You should specify either `keep` or `drop`,
not both.
Returns
-------
dict: Dictionary containing only the specified keys (when passing in
`keep`), or all keys except the specified ones (when passing in
`drop`).
"""
if bool(keep) + bool(drop) != 1:
raise InvalidArgumentError('Specify exactly one of `keep` or `drop`.')
if isinstance(items, Mapping):
return _select_mapping(items, keep, drop)
elif isinstance(items, Sequence):
return _select_sequence(items, keep, drop)
else:
raise InvalidArgumentError('`items` must be a Mapping or Sequence.')
def differences(obj1, obj2, methods=False, **kwargs):
"""Find the differences between two objects (generally of the same type -
technically this isn't enforced but we do require that the objects have
the same set of attribute names so a similar effect is achieved. Actual
type checking was causing problems comparing multiple Args instances,
presumably because each Args object is defined when called).
This is a way to get more detail beyond whether two objects are equal or
not.
Parameters
-----------
obj1: any
An object.
obj2: any, usually the same type as obj1
An object.
methods: bool
If True, include methods in the comparison. If False, only attributes
will be compared. Note that the output may not be particularly
interpretable when using method=True; for instance when comparing two
strings consisting of different characters, we get a lot of output
that looks like this:
{'islower': (<function str.islower()>, <function str.islower()>),
'isupper': (<function str.isupper()>, <function str.isupper()>),...
'istitle': (<function str.istitle()>, <function str.istitle()>)}
These attributes all reflect the same difference: if obj1 is 'abc'
and obj2 is 'def', then
'abc' != 'def' and
'ABC' != 'DEF' abd
'Abc' != 'Def'.
When method=False, we ignore all of these, such that
differences('a', 'b') returns {}. Therefore, it is important to
carefully consider what differences you care about identifying.
**kwargs: bool
Can pass args to hdir to include magics or internals.
Returns
--------
dict[str, tuple]: Maps attribute name to a tuple of values, where the
first is the corresponding value for obj1 and the second is the
corresponding value for obj2.
"""
# May built-in comparison functionality. Keep error handling broad.
try:
if obj1 == obj2:
return {}
except Exception:
pass
attr1, attr2 = hdir(obj1, **kwargs), hdir(obj2, **kwargs)
assert attr1.keys() == attr2.keys(), 'Objects must have same attributes.'
diffs = {}
for (k1, v1), (k2, v2) in zip(attr1.items(), attr2.items()):
# Only compare non-callable attributes.
if not (methods or v1 == 'attribute'):
continue
# Comparisons work differently for arrays/tensors than other objects.
val1, val2 = getattr(obj1, k1), getattr(obj2, k2)
try:
equal = (val1 == val2).all()
except AttributeError:
equal = val1 == val2
# Store values that are different for obj1 and obj2.
if not equal:
diffs[k1] = (val1, val2)
return diffs
def catch(func, *args, verbose=False):
"""Error handling for list comprehensions. In practice, it's recommended
to use the higher-level robust_comp() function which uses catch() under the
hood.
Parameters
-----------
func: function
*args: any type
Arguments to be passed to func.
verbose: bool
If True, print the error message should one occur.
Returns
--------
any type: If the function executes successfully, its output is returned.
Otherwise, return None.
Examples
---------
[catch(lambda x: 1 / x, i) for i in range(3)]
>>> [None, 1.0, 0.5]
# Note that the filtering method shown below also removes zeros which is
# okay in this case.
list(filter(None, [catch(lambda x: 1 / x, i) for i in range(3)]))
>>> [1.0, 0.5]
"""
try:
return func(*args)
except Exception as e:
if verbose: print(e)
return
def safe_map(func, seq):
"""This addresses the issue of error handling in map() or list
comprehension operations by simply skipping any items that throw an error.
Note that values of None will be removed from the resulting list.
Parameters
----------
func: function
Function to apply to each item in seq.
seq: generator, iterator
The sequence to iterate over. This could also be a generator, list,
set, etc.
Returns
-------
list
Examples
--------
# Notice that instead of throwing an error when dividing by zero, that
# entry was simply dropped.
>>> safe_map(lambda x: x/(x-2), range(4))
[-0.0, -1.0, 3.0]
"""
return list(
filter(lambda x: x is not None, (catch(func, obj) for obj in seq))
)
def flatten(nested):
"""Flatten a nested sequence where the sub-items can be sequences or
primitives. This differs slightly from itertools chain methods because
those require all sub-items to be sequences. Here, items can be primitives,
sequences, nested sequences, or any combination of these. Any iterable
items aside from strings will be completely un-nested, so use with caution
(e.g. a torch Dataset would be unpacked into separate items for each
index). This also returns a list rather than a generator.
Parameters
----------
nested: sequence (list, tuple, set)
Sequence where some or all of the items are also sequences.
Returns
-------
list: Flattened version of `nested`.
"""
def _walk(nested):
for group in nested:
if isinstance(group, Iterable) and not isinstance(group, str):
yield from _walk(group)
else:
yield group
return list(_walk(nested))
class BasicPipeline:
"""Create a simple unidirectional pipeline of functions to apply in order
with optional debugging output.
"""
def __init__(self, *funcs):
"""
Parameters
----------
*funcs: function(s)
One or more functions to apply in the specified order.
"""
# Make `funcs` mutable. Could use @htools.meta.delegate('funcs')
# but not sure if that would cause circular import issues. Check later.
self.funcs = list(funcs)
def __call__(self, x, verbose=False, attr=''):
"""Apply the pipeline of functions to x.
Parameters
----------
x: any
Object to operate on.
verbose: bool
If True, print x (or an attribute of x) after each step.
attr: str
If specified and verbose is True, will print this attribute of x
after each function is applied.
Returns
-------
output of last func in self.funcs
"""
for func in self.funcs:
x = func(x)
if verbose: print(repr(getattr(x, attr, x)))
return x
def __repr__(self):
# Try to display each item in the form that was likely passed in: for
# functions, this is the name, but for callable classes this is
# the str representation of the object, not the class itself.
names = ',\n\t'.join(str(f) if hasattr(f, '__call__') else func_name(f)
for f in self.funcs)
return f'{type(self).__name__}(\n\t{names}\n)'
def pipe(x, *funcs, verbose=False, attr=''):
"""Convenience function to apply many functions in order to some object.
This lets us replace messy notation where it's hard to keep parenthesis
straight:
list(parse_processed_text(tokenize_rows(porter_stem(strip_html_tags(
text)))))
with:
pipe(text, strip_html_tags, porter_stem, tokenize_rows,
parse_processed_text, list)
or if we have a list of functions:
pipe(x, *funcs)
Parameters
----------
x: any
Object to apply functions to.
*funcs: function(s)
Functions in the order you want to apply them. Use functools.partial
to specify other arguments.
verbose: bool
If True, print x (or an attribute of x) after each step.
attr: str
If specified and verbose is True, will print this attribute of x
after each function is applied.
Returns
-------
output of last func in *funcs
"""
return BasicPipeline(*funcs)(x, verbose=verbose, attr=attr)
def vcounts(arr, normalize=True):
"""Equivalent of pandas_htools vcounts method that we can apply on lists
or arrays. Basically just a wrapper around Counter but with optional
normalization.
Parameters
----------
arr: Iterable
Sequence of values to count. Typically a list or numpy array.
normalize: bool
If True, counts will be converted to percentages.
Returns
-------
dict: Maps unique items in `arr` to the number of times (or % of times)
that they occur in `arr`.
"""
counts = dict(Counter(arr))
if normalize:
length = len(arr)
counts = {k: v/length for k, v in counts.items()}
return counts
def item(it, random=True, try_values=True):
"""Get an item from an iterable (e.g. dict, set, torch DataLoader).
This is a quick way to access an item for iterables that don't support
indexing, or do support indexing but require us to know a key.
Parameters
----------
it: Iterable
Container that we want to access a value from.
random: bool
If True, pick a random value from `it`. Otherwise just return the first
value.
try_values: bool
If True, will check if `it` has a `values` attribute and will operate
on that if it does. We often want to see a random value from a dict
rather than a key. If we want both a key and value, we could set
try_values=False and pass in d.items().
Returns
-------
any: An item from the iterable.
"""
if try_values and hasattr(it, 'values'): it = it.values()
if random: return choice(list(it))
return next(iter(it))
def lmap(fn, *args):
"""Basically a wrapper for `map` that returns a list rather than a
generator. This is such a common pattern that I think it deserves its own
function (think of it as a concise alternative to a list comprehension).
One slight difference is that we use *args instead of passing in an
iterable. This adds a slight convenience for the intended use case (fast
prototyping). See the `Examples` for more on this.
Parameters
----------
args: any
Returns
-------
list
Examples
--------
Consider these three equivalent syntax options:
lmap(fn, x, y)
[fn(obj) for obj in (x, y)]
list(map(fn, (x, y))
When quickly iterating, option 1 saves a bit of typing. The extra
parentheses that options 2 and 3 require to put x and y in a temporary
data structure can get messy as we add more complex logic.
"""
return list(map(fn, args))
def amap(attr, *args):
"""More convenient syntax for quick data exploration. Get an attribute
value for multiple objects. Name is short for "attrmap".
Parameters
----------
attr: str
Name of attribute to retrieve for each object.
args: any
Objects (usually of same type) to retrieve attributes for.
Returns
-------
list: Result for each object.
Examples
--------
df1 = pd.DataFrame(np.random.randint(0, 10, (4, 5)))
df2 = pd.DataFrame(np.random.randint(0, 3, (4, 5)))
df3 = pd.DataFrame(np.random.randint(0, 3, (2, 3)))
>>> amap('shape', df1, df2, df3)
[(4, 5), (4, 5), (2, 3)]
net = nn.Sequential(...)
>>> amap('shape', *net.parameters())
[torch.Size([5, 3]),
torch.Size([16, 4]),
torch.Size([16, 3]),
torch.Size([16])]
"""
return [getattr(arg, attr) for arg in args]
def smap(*x):
"""Get shape of each array/tensor in a list or tuple.
Parameters
----------
*x: np.arrays or torch.tensors
We use star unpacking here to create a consistent interface with amap()
and lmap().
Returns
-------
list: Shape of each array/tensor in input.
"""
return amap('shape', *x)
def sleepy_range(*args, wait=1, wait_before=True):
"""Convenience function: we often want to create a loop that mimics doing
some time intensive thing on each iteration. This is just like the built-in
range function (not technically a function!) but with a sleep period baked
in, making it particularly useful for list comprehensions where this would
be tricky otherwise. Note: unlike range, calling this is destructive.
See examples.
Parameters
----------
args: int
Passed on to range().
wait: int or float
Number of seconds to wait on each iteration. Remember this is a keyword
only argument for compatibility with the range interface.
wait_before: bool
Determines whether to sleep before or after yielding the number.
Defaults to before to mimic "doing work" before producing some result.
Examples
--------
# Takes 6 seconds to create this list.
>>> [i for i in sleepy_range(3, wait=2)]
[0, 1, 2]
>>> srange = sleepy_range(0, 6, 2, wait_before=False)
>>> for i in srange:
>>> print(i)
0
2
4
>>> for i in srange:
>>> print(i)
# Notice this cannot be used again without manually calling sleepy_range.
"""
for i in range(*args):
if wait_before: time.sleep(wait)
yield i
if not wait_before: time.sleep(wait)
def venumerate(iterable, start=0, freq=1, print_before=True,
message_format='{}'):
"""Verbose enumerate: simple convenience function that's a drop-in
replacement for enumerate. It prints updates as we iterate over some
object. TQDM progress bar may not be available in some cases (e.g. we
don't know the length of the interval, or possible some cases using
concurrency?), and this function gives us some way to keep an eye on
progress. Mainly intended as a convenience for list comprehensions, since
in a standard for loop we could easily add this logic.
Parameters
----------
iterable: Iterable
The object to iterate over.
start: int
Passed on to enumerate - the first index to use when counting.
freq: int
Frequency with which to print updates (i.e. updates are printed when
i is divisible by freq).
print_before: bool
Specifies whether to print the message before yielding the i'th value
or after.
message_format: str
Used to format the message that will be displayed when i is divisible
by freq. Defaults to just printing i.
"""
for i, x in enumerate(iterable, start=start):
if i % freq == 0 and print_before: print(message_format.format(i))
yield i, x
if i % freq == 0 and not print_before: print(message_format.format(i))
def method_of(meth):
"""Retrieve the class a method belongs to. This will NOT work on
attributes. Also, this won't help if your goal is to retrieve an instance:
this returns the type of the instance. Not thoroughly tested but it seems
to work regardless of whether you pass in meth from an instance or a class
(the output is the same in both cases).
Parameters
----------
meth: MethodType
The method to retrieve the class of.
Returns
-------
type: The class which defines the method in question.
Examples
--------
class Foo:
def my_method(self, x):
return x*2
f = Foo()
assert method_of(Foo.my_method) == method_of(f.my_method) == Foo
"""
cls, name = meth.__qualname__.split('.')
return dict(getmembers(getmodule(meth)))[cls]
def hasstatic(cls, meth_name):
"""Check if a class possesses a staticmethod of a given name. Similar to
hasattr. Note that isinstance(cls.meth_name, staticmethod) would always
return False: we must use getattr_static or cls.__dict__[meth_name]
to potentially return True.
Parameters
----------
cls: Type or any
A class or an instance (seems to work on both, though more extensive
testing may be needed for more complex scenarios).
meth_name: str
Name of method to check. If the class/instance does not contain any
attribute with this name, function returns False.
Returns
-------
bool: True if `cls` has a staticmethod with name `meth_name`.
"""
return isinstance(getattr_static(cls, meth_name, None), staticmethod)
def isstatic(meth):
"""Companion to hasstatic that checks a method itself rather than a class
and method name. It does use hasstatic under the hood.
"""
# First check isn't required but I want to avoid reaching the hackier bits
# of code if necessary. This catches regular methods and attributes.
if ismethod(meth) or not callable(meth): return False
parts = getattr(meth, '__qualname__', '').split('.')
if len(parts) != 2: return False
cls = method_of(meth)
return hasstatic(cls, parts[-1])
def has_classmethod(cls, meth_name):
"""Check if a class has a classmethod with a given name.
Note that isinstance(cls.meth_name, classmethod) would always
return False: we must use getattr_static or cls.__dict__[meth_name]
to potentially return True.
Parameters
----------
cls: type or obj
This is generally intended to be a class but it should work on objects
(class instances) as well.
meth_name: str
The name of the potential classmethod to check for.
Returns
-------
bool: True if cls possesses a classmethod with the specified name.
"""
return isinstance(getattr_static(cls, meth_name), classmethod)
def is_classmethod(meth):
"""Companion to has_classmethod that checks a method itself rather than a
class and a method name. It does use has_classmethod under the hood.
"""
if not ismethod(meth): return False
parts = getattr(meth, '__qualname__', '').split('.')
if len(parts) != 2: return False
cls = method_of(meth)
return has_classmethod(cls, parts[-1])
def parallelize(func, items, total=None, chunksize=1_000, processes=None):
"""Apply a function to a sequence of items in parallel. A progress bar
is included.
Parameters
----------
func: function
This will be applied to each item in `items`.
items: Iterable
Sequence of items to apply `func` to.
total: int or None
This defaults to the length of `items`. In the case that items is a
generator, this lets us pass in the length explicitly. This lets tdqm
know how quickly to advance our progress bar.
chunksize: int
Positive int that determines the size of chunks submitted to the
process pool as separate tasks. Multiprocessing's default is 1 but
larger values should speed things up, especially with long sequences.
processes: None
Optionally set number of processes to run in parallel.
Returns
-------
list
"""
total = total or len(items)
with Pool(processes) as p:
res = list(tqdm(p.imap(func, items, chunksize=chunksize),
total=total))
return res
def identity(x):
"""Returns the input argument. Sometimes it is convenient to have this if
we sometimes apply a function to an item: rather than defining a None
variable, sometimes setting it to a function, then checking if it's None
every time we're about to call it, we can set the default as identity and
safely call it without checking.
Parameters
----------
x: any
Returns
-------
x: Unchanged input.
"""
return x
def always_true(x, *args, **kwargs):
"""Similar to `identity` but returns True instead of x. I'm tempted to name
this `true` but I fear that will cause some horrible bugs where I
accidentally use this when I want to use True.
"""
return True
def ifnone(arg, backup):
"""Shortcut to provide a backup value if an argument is None. Commonly used
for numpy arrays since their truthiness is ambiguous.
Parameters
----------
arg: any
We will check if this is None.
backup: any
This will be returned if arg is None.
Returns
-------
Either `arg` or `backup` will be returned.
"""
return arg if arg is not None else backup
def listlike(x):
"""Checks if an object is a list/tuple/set/array etc. Strings and
mappings (e.g. dicts) are not considered list-like.
"""
return isinstance(x, Iterable) and not isinstance(x, (str, Mapping))
def tolist(x, length_like=None, length=None,
error_message='x length does not match desired length.'):
"""Helper to let a function accept a single value or a list of values for
a certain parameter.
WARNING: if x is a primitive and you specify a length (either via
`length_like` or `length`, the resulting list will contain multiple
references to the same item). This is mostly intended for use on lists of
floats or ints so I don't think it's a problem, but keep this in mind when
considering using this on mutable objects.
Parameters
----------
x: Iterable
Usually either a list/tuple or a primitive.
length_like: None or object
If provided, we check that x is the same length. If x is a primitive,
we'll make it the same length.
length: None or int
Similar to `length_like` but lets us specify the desired length
directly. `length_like` overrides this, though you should only provide
one or the other.
error_message: str
Displayed in the event that a desired length is specified and x is
list-like and does not match that length. You can pass in your own
error message if you want something more specific to your current use
case.
Returns
-------
list
Examples
--------
def train(lrs):
lrs = tolist(lrs)
...
We can now pass in a single learning rate or multiple.
>>> train(3e-3)
>>> train([3e-4, 3e-3])
"""
if length_like is not None: length = len(length_like)
# Case 1. List-like x
if listlike(x):
if length:
assert len(x) == length, error_message
return list(x)
# Case 2. Dict-like x
if isinstance(x, Mapping):
raise ValueError('x must not be a mapping. It should probably be a '
'primitive (str, int, etc.) or a list-like object '
'(tuple, list, set).')
# Case 3. Primitive x
return [x] * (length or 1)
def xor_none(*args, n=1):
"""Checks that exactly 1 (or n) of inputs is not None. Useful for
validating optional function arguments (for example, ensuring the user
specifies either a directory name or a list of files but not both.
Parameters
----------
args: any
n: int
The desired number of non-None elements. Usually 1 but we allow the
user to specify other values.
Returns
-------
None: This will raise an error if the condition is not satisfied. Do not
use this as an if condition (e.g. `if xor_none(a, b): print('success')`.
This would always evaluate to False because the function doesn't explicitly
return a value so we get None.
"""
if sum(bool(arg is not None) for arg in args) != n:
raise ValueError(f'Exactly {n} or args must be not None.')
def max_key(d, fn=identity):
"""Find the maximum value in a dictionary and return the associated key.
If we want to compare values using something other than their numeric
values, we can specify a function. For example, with a dict mapping strings
to strings, fn=len would return the key with the longest value.
Parameters
----------
d: dict
Values to select from.
fn: callable
Takes 1 argument (a single value from d.values()) and returns a number.
This will be used to sort the items.
Returns
-------
A key from dict `d`.
"""
return max(d.items(), key=lambda x: fn(x[1]))[0]
def is_builtin(x, drop_callables=True):
"""Check if an object is a Python built-in object.
Parameters
----------
x: object
drop_callables: bool
If True, return False for callables (basically functions, methods, or
classes). These typically will return True otherwise since they are of
class `type` or `builtin_function_or_method`.
Returns
-------
bool: True if `x` is a built-in object, False otherwise.
"""
def _builtin(x, drop_callables):
if callable(x) and drop_callables:
return False
return x.__class__.__module__ == 'builtins'
builtin = partial(_builtin, drop_callables=drop_callables)
# Check mapping first because mappings are iterable.
if isinstance(x, Mapping):
return builtin(x) and all(builtin(o) for o in flatten(x.items()))
elif isinstance(x, Iterable):
return builtin(x) and all(builtin(o) for o in flatten(x))
return builtin(x)
def hashable(x):
"""Check if an object is hashable. Hashable objects will usually be
immutable though this is not guaranteed.
Parameters
----------
x: object
The item to check for hashability.
Returns
-------
bool: True if `x` is hashable (suggesting immutability), False otherwise.
"""
try:
_ = hash(x)
return True
except TypeError:
return False
def fgrep(text, term, window=25, with_idx=False, reverse=False):
"""Search a string for a given term. If found, print it with some context.
Similar to `grep -C 1 term text`. `fgrep` is short for faux grep.
Parameters
----------
text: str
Text to search.
term: str
Term to look for in text.
window: int
Number of characters to display before and after the matching term.
with_idx: bool
If True, return index as well as string.
reverse: bool
If True, reverse search direction (find last match rather than first).
Returns
-------
str or tuple[int, str]: The desired term and its surrounding context.
If the term isn't present, an empty string is returned. If
with_idx=True, a tuple of (match index, string with text) is returned.
"""
idx = text.rfind(term) if reverse else text.find(term)
if idx == -1:
res = ''
else:
res = text[max(idx-window, 0):idx+window]
return (idx, res) if with_idx else res
def spacer(char='-', n_chars=79, newlines_before=1, newlines_after=1):
""" Get string to separate output when printing output for multiple items.
Parameters
----------
char: str
The character that will be printed repeatedly.
n_chars: int
The number of times to repeat `char`. We expect that `char` is a
single character so this will be the total line length.
newlines_before: int
Number of newline characters to add before the spacer.
newlines_after: int
Number of newline characters to add after the spacer.
Returns
-------
str
"""
return '\n'*newlines_before + char * n_chars + '\n'*newlines_after
def func_name(func):
"""Usually just returns the name of a function. The difference is this is
compatible with functools.partial, which otherwise makes __name__
inaccessible.
Parameters
----------
func: callable
Can be a function, partial, or callable class.
"""
assert callable(func), 'Input must be callable.'
try:
res = func.__name__
except AttributeError:
if isinstance(func, partial):
return func_name(func.func)
else:
return func.__class__.__name__
except Exception as e:
raise e
return res
def snake2camel(text):
"""Convert snake case to camel case. This assumes the input is valid snake
case (if you have some weird hybrid of snake and camel case, for instance,
you'd want to do some preprocessing first).
Parameters
----------
text: str
Snake case string, e.g. vader_sentiment_score.
Returns
-------
str: `text` converted to camel case, e.g. vaderSentimentScore.
"""
res = []
prev = ''
for char in text:
if char != '_':
# Check if res is empty because of case with leading underscore.
res.append(char.upper() if prev == '_' and res else char)
prev = char
return ''.join(res)
def camel2snake(text):
"""Convert camel case to snake case. This assumes the input is valid camel
case (if you have some weird hybrid of camel and snake case, for instance,
you'd want to do some preprocessing first).
Parameters
----------
text: str
Camel case string, e.g. vaderSentimentScore.
Returns
-------
str: `text` converted to snake case, e.g. vader_sentiment_score.
"""
res = []
for char in text:
if char.islower():
res.append(char)
else:
res.extend(['_', char.lower()])
return ''.join(res)
def to_snake(text):
"""Experimental feature: tries to convert any common format to snake case.
This hasn't been extensively tested but it seems to work with snake case
(no change), camel case, upper camel case, words separated by
hyphens/dashes/spaces, and combinations of the above. It may occasionally
split words that should not be split, though this should be rare if names
use actual English words (this might not work so well on fastai-style
variable names (very short, e.g. "tfms" for "transforms"), but the intended
use case is mostly for fixing column names in pandas.
Parameters
----------
text: str
Returns
-------
str: Input text converted to snake case.
"""
return '_'.join(wn.split(text.lower()))
def to_camel(text):
"""Experimental feature: tries to convert any common format to camel case.
This hasn't been extensively tested but it seems to work with camel case
(no change), snake case, upper camel case, words separated by
hyphens/dashes/spaces, and combinations of the above. It may occasionally
split words that should not be split, though this should be rare if names
use actual English words (this might not work so well on fastai-style
variable names (very short, e.g. "tfms" for "transforms"), but the intended
use case is mostly for fixing column names in pandas.
Parameters
----------
text: str
Returns
-------
str: Input text converted to snake case.
"""
return ''.join(w.title() if i > 0 else w
for i, w in enumerate(wn.split(text.lower())))
def kwargs_fallback(self, *args, assign=False, **kwargs):
"""Use inside a method that accepts **kwargs. Sometimes we want to use
an instance variable for some computation but want to give the user the
option to pass in a new value to the method (often ML hyperparameters) to
be used instead. This function makes that a little more convenient.
Parameters
----------
self: object
The class instance. In most cases users will literally pass `self` in.
args: str
One or more names of variables to use this procedure on.
assign: bool
If True, any user-provided kwargs will be used to update attributes of
the instance. If False (the default), they will be used in computation
but won't change the state of the instance.
kwargs: any
Just forward along the kwargs passed to the method.
Returns
-------
list or single object: If more than one arg is specified, a list of values
is returned. For just one arg, a single value will be returned.
Examples
--------
class Foo:
def __init__(self, a, b=3, c=('a', 'b', 'c')):
self.a, self.b, self.c = a, b, c
def walk(self, d, **kwargs):
a, c = kwargs_fallback(self, 'a', 'c', **kwargs)
print(self.a, self.b, self.c)
print(a, c, end='\n\n')
b, c = kwargs_fallback(self, 'b', 'c', assign=True, **kwargs)
print(self.a, self.b, self.c)
print(b, c)
# Notice the first `kwargs_fallback` call doesn't change attributes of f
# but the second does. In the first block of print statements, the variable
# `b` does not exist yet because we didn't include it in *args.
>>> f = Foo(1)
>>> f.walk(d=0, b=10, c=100)
1 3 ('a', 'b', 'c')
1 100
1 10 100
10 100
"""
res = []
for arg in args:
# Don't just use `kwargs.get(arg) or ...` because this doesn't work
# well when we pass in a numpy array or None.
val = kwargs[arg] if arg in kwargs else getattr(self, arg)
res.append(val)
if assign: setattr(self, arg, val)
return res if len(res) > 1 else res[0]
def cd_root(root_subdir='notebooks', max_depth=4):
"""Run at start of Jupyter notebook to enter project root.
Parameters
----------
root_subdir: str
Name of a subdirectory contained in the project root directory.
If not found in the current working directory, this will move
to the parent directory repeatedly until it is found. Choose carefully:
if you have multiple directories with the same name in your directory
structure (e.g. ~/htools/lib/htools), 'htools' would be a bad choice
if you want to end up in ~).
max_depth: int
Max number of directory levels to traverse. Don't want to get stuck in
an infinite loop if we make a mistake.
Examples
--------
Sample file structure (abbreviated):
my_project/
py/
fetch_raw_data.py
notebooks/
nb01_eda.ipynb
Running cd_root() from nb01_eda.ipynb will change the working
directory from notebooks/ to my_project/, which is typically the
same directory we'd run scripts in py/ from. This makes converting
from notebooks to scripts easier.
"""
changes = 0
start_dir = os.getcwd()
while root_subdir not in next(os.walk('.'))[1]:
if changes >= max_depth:
os.chdir(start_dir)
raise RuntimeError('Exceeded max_depth. Check that your '
'root_subdir is <= max_depth directories away.')
os.chdir('..')
changes += 1
print('Current directory:', os.getcwd())
def ngrams(word, n=3, step=1, drop_last=False):
"""To get non-overlapping sequences, pass in same value for `step` as `n`.
"""
stop = max(1, step+len(word)-n)
ngrams_ = []
for i in range(0, stop, step):
ngrams_.append(word[i:i+n])
if drop_last and len(ngrams_[-1]) < n: ngrams_ = ngrams_[:-1]
return ngrams_
def shell(cmd, return_output=True):
"""Execute shell command (between subprocess and os, there's ~5 different
ways to do this and I always forget which I want. This is just a way for me
to choose once and not have to decide again. There are rare situations
where we may need a different function (subprocess.run is blocking; if we
want to launch a process and continue the script without waiting for
completion, we can use subprocess.check_call).
Parameters
----------
cmd: str
Example: 'ls *.csv'
return_output: bool
If True, return the output of the command: e.g. if cmd is
'pip show requests', this would return a string containing information
about the version of the requests library you have installed. If False,
we return a tuple of (return code (0/1), stderr, stdout). I've noticed
the latter 2 are usually None though - need to read more into
subprocess docs to figure out why this is happening.
Returns
-------
tuple: returncode (int), stderr, stdout. I believe stderr and stdout are
None if nothing is returned and str otherwise.
"""
parts = cmd.split()
if return_output:
return check_output(parts).decode()
res = run(parts)
return res.returncode, res.stderr, res.stdout
def set_summary(x1, x2, info=('first_only', 'second_only')):
"""Summarize set comparison between two iterables (they will be converted
to sets internally).
Parameters
----------
info: Iterable[str]
Determines what info to return. 'first_only' returns items only in the
first iterable, 'second_only' returns items only in the second, 'and'
returns items in both, and 'or' returns items in either.
Returns
-------
dict[str, set]: Maps str in `info` to set of items.
"""
s1, s2 = set(x1), set(x2)
res = {'and': s1 & s2,
'or': s1 | s2,
'first_only': s1 - s2,
'second_only': s2 - s1}
for k, v in res.items():
print(f'{k}: {len(v)} items')
return select(res, keep=list(info))
SENTINEL = object()
|
'''
common usage:
1. put this script in ckpt folder
2. python print_tensor_in_ckpt.py > tensors.txt
'''
# ref: https://stackoverflow.com/questions/38218174/how-do-i-find-the-variable-names-and-values-that-are-saved-in-a-checkpoint
import tensorflow as tf
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
latest_ckp = tf.train.latest_checkpoint('./')
print_tensors_in_checkpoint_file(latest_ckp, all_tensors=True, tensor_name='')
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_message["reject"].reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=VB_TOP_BITS):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
self.log.info("Testing non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
self.log.info("Testing behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
self.log.info("Testing witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
self.log.info("Testing witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
self.log.info("Testing witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
self.log.info("Testing extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-200000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
self.log.info("Testing maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
self.log.info("Testing maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
self.log.info("Testing witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
self.log.error("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-100000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
self.log.info("Testing block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
# Favcoin: Blocks with nVersion < VB_TOP_BITS are rejected
# self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
# assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
# self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
# Favcoin: Blocks with nVersion < VB_TOP_BITS are rejected
block4 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [ CBlockHeader(block4) ]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)]
tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
self.log.info("Testing standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
self.log.info("Testing premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
self.log.info("Testing segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
self.log.info("Testing P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
self.stop_node(node_id)
self.start_node(node_id, extra_args=[])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = self.nodes[node_id].getblockcount()
while height >= 0:
block_hash = self.nodes[node_id].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
self.log.info("Testing sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
self.log.info("Testing uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def test_reject_blocks(self):
print ("\tTesting rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(nVersion=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
self.log.info("Starting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
self.log.info("Testing behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
self.log.info("Testing behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_reject_blocks()
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
# Favcoin: Disable test due to occasional travis issue
#self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
|
import inspect
import json
import logging
import threading
from datetime import datetime, timezone
from time import time
import pytz
import requests
from notion.block import TextBlock
from notion.client import NotionClient
from notion.collection import NotionDate
from tqdm import tqdm
from tzlocal import get_localzone
from .constants import (
POST_INTERVAL_SEC,
REQUIRED_COLUMNS,
REQUIRED_STATUS_OPTIONS,
Status,
)
def get_localzone_name():
local_tz = get_localzone()
return datetime.now(local_tz).tzname()
class notion_tqdm(tqdm):
_is_configured = False
post_interval_sec = POST_INTERVAL_SEC
timezone = get_localzone_name()
common_props = {}
@classmethod
def _get_table_schema_prop_names(cls):
return set(
[prop["name"] for prop in cls.table_view.collection.get_schema_properties()]
)
@classmethod
def _validate_table_shcema(cls):
# Check table view type
if "collection" not in dir(cls.table_view):
raise Exception(
f"table_view is not referring to the table correctly. Make sure you are setting a table link that is not a page link."
)
# Check required columns
table_view_columns = cls._get_table_schema_prop_names()
missing_columns = REQUIRED_COLUMNS - table_view_columns
if len(missing_columns) > 0:
raise Exception(
f"There are missing columns in the table: {missing_columns}. Did you duplicate this view?: https://www.notion.so/syunyo/notion-tqdm-template-7d2d53595e774c9eb7a020e00fd81fab"
)
# Check select options
table_status_options = set(
[
op["value"]
for op in cls.table_view.collection.get_schema_property("status")[
"options"
]
]
)
missing_options = REQUIRED_STATUS_OPTIONS - table_status_options
if len(missing_options) > 0:
raise Exception(
f"There are missing options in the status columns: {missing_options}. Did you duplicate this view?: https://www.notion.so/syunyo/notion-tqdm-template-7d2d53595e774c9eb7a020e00fd81fab"
)
@classmethod
def set_config(
cls, token_v2, table_url, email=None, timezone=None, post_interval_sec=None
):
# Common Config
if timezone is not None:
cls.timezone = timezone
if post_interval_sec is not None:
ls.post_interval_sec = post_interval_sec
cls._timezone_pytz = pytz.timezone(cls.timezone)
# Notion Config
cls.client = NotionClient(token_v2=token_v2)
if email is not None:
cls.client.set_user_by_email(email)
cls.table_view = cls.client.get_block(table_url)
# Validation
cls._validate_table_shcema()
cls._is_configured = True
@classmethod
def set_common_props(cls, **kwargs):
cls.common_props = kwargs
missing_columns = set(kwargs) - cls._get_table_schema_prop_names()
if len(missing_columns) > 0:
logging.error(
f"There are missing columns in the table: {missing_columns}."
)
def localize_timestamp(self, timestamp):
utc_datetime = datetime.fromtimestamp(timestamp, tz=timezone.utc)
return utc_datetime.astimezone(notion_tqdm._timezone_pytz)
def _update_row(self):
if not notion_tqdm._is_configured:
logging.warning(
"notion_tqdm does not seem to be set yet. call notion_tqdm.set_config and configure it.\nrefer to https://github.com/shunyooo/notion-tqdm#usage"
)
return
if self._row_creating:
return
if self.row is None and not self._row_creating:
self._row_creating = True
self.row = notion_tqdm.table_view.collection.add_row()
self._row_creating = False
for c, v in notion_tqdm.common_props.items():
self.row.set_property(c, v)
if self.row is not None:
# Base props
# TODO: Difference only updates
now = time()
row = self.row
row.total = self.total
row.name = self.desc
row.status = self.status
row.value = self.n
row.start_timestamp = self.start_t
row.update_timestamp = now
row.timerange = NotionDate(
self.localize_timestamp(self.start_t),
self.localize_timestamp(now),
timezone=notion_tqdm.timezone,
)
row.elapsed_sec = now - self.start_t
# Custom props
# TODO: Set the props that have been skipped during creating.
for c, v in self.custom_props.items():
row.set_property(c, v)
# Add Text Blocks
for text in self._pending_texts:
self.row.children.add_new(TextBlock).title = text
self._pending_texts = []
@property
def _can_post(self):
is_past = (
self.last_post_time is None
or (time() - self.last_post_time) > notion_tqdm.post_interval_sec
)
return not self._loading and is_past
def _post_if_need(self, force):
if self._can_post or force:
self._loading = True
try:
self._update_row()
except Exception as e:
logging.warning(e)
self.last_post_time = time()
self._loading = False
def display(self, msg=None, pos=None, status=None, force=False):
force = status is not None or force
self.status = Status.doing if status is None else status
t = threading.Thread(
name="_post_if_need", target=self._post_if_need, args=[force]
)
t.setDaemon(True)
t.start()
def __init__(self, *args, **kwargs):
self.row = None
self.total = 0
self.last_post_time = None
self.status = Status.doing
self._loading = False
self._row_creating = False
super().__init__(*args, **kwargs)
self.sp = self.display
self.custom_props = {}
self._pending_texts = []
def __iter__(self, *args, **kwargs):
try:
for obj in super().__iter__(*args, **kwargs):
yield obj
except:
self.display(status=Status.error)
raise
def add_text(self, text, force=False):
self._pending_texts.append(text)
self.display(force)
def update_props(self, force=False, **kwags):
self.custom_props = kwags
self.display(force)
def update(self, *args, **kwargs):
try:
super().update(*args, **kwargs)
except:
self.display(status=Status.error)
raise
def close(self, *args, **kwargs):
if self.total and self.n < self.total:
self.display(status=Status.error)
else:
self.display(status=Status.done)
|
import shutil
import sys
import os
for (root,dirs,files) in os.walk(os.path.abspath('.'),topdown=True):
for d in dirs:
path = os.path.join(root,d)
if '__pycache__' in path:
shutil.rmtree(path)
for f in files:
path = os.path.join(root,f)
if '.DS_Store' in path:
os.remove(path)
|
class messageEntity:
isBotMention = False
start: int
end: int
def __repr__(self):
return "<class 'messageEntity' ({})>".format(type(self))
class formatEntity(messageEntity):
pass
class mention(messageEntity): #(@username)
text: str
user: str
def __init__(self, text, user = None, botname:str = ""):
self.isBotMention = text == "@" + botname
if text[0] == '@': text = text[1:]
self.text = text
self.user = user
class hashtag(messageEntity): #(#hashtag)
text: str
def __init__(self, text):
if text[0] == '#': text = text[1:]
self.text = text
class cashtag(messageEntity): #($USD)
text: str
def __init__(self, text):
self.text = text
class bot_command(messageEntity): #(/start@jobs_bot)
text: str
def __init__(self, text, botname:str = ""):
self.text = text
if text.count("@") == 1:
command, bot_mention = text[:-len(botname)], text[-len(botname):]
self.isBotMention = bot_mention == botname
else:
command = text
self.isBotMention == True
command = command[command[0] == '/':]
self.command = command
class url(messageEntity): #(https://telegram.org)
text: str
def __init__(self, text, botname:str = ""):
self.isBotMention = text == "https://t.me/" + botname
self.text = text
class email(messageEntity): #(do-not-reply@telegram.org)
text: str
def __init__(self, text):
self.text = text
class phone_number(messageEntity): #(+1-212-555-0123)
text: str
def __init__(self, text):
self.text = text
class bold(formatEntity): #(bold text)
text: str
def __init__(self, text):
self.text = text
class italic(formatEntity): #(italic text)
text: str
def __init__(self, text):
self.text = text
class underline(formatEntity): #(underlined text)
text: str
def __init__(self, text):
self.text = text
class strikethrough(formatEntity): #(strikethrough text)
text: str
def __init__(self, text):
self.text = text
class code(messageEntity): #(monowidth string)
text: str
def __init__(self, text):
self.text = text
class pre(code): #(monowidth block)
text: str
language: str
def __init__(self, text, language = None):
self.text = text
self.language = language
class text_link(messageEntity): #(for clickable text URLs)
text: str
def __init__(self, text, url, botname: str = ""):
self.isBotMention = url == "https://t.me/" + botname
self.text = text
self.url = url
class text_mention(messageEntity): #(for users without usernames)
text: str
user: str
def __init__(self, text, user = None):
self.text = text
self.user = user
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import domains
class DomainTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(DomainTests, self).setUp()
self.key = 'domain'
self.collection_key = 'domains'
self.model = domains.Domain
self.manager = self.client.domains
def new_ref(self, **kwargs):
kwargs = super(DomainTests, self).new_ref(**kwargs)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
return kwargs
|
# from importlib import import_module
from os import path
import re
from setuptools import find_packages, setup
def get_version():
text = open(path.join(path.dirname(__file__), "markdown_it", "__init__.py")).read()
match = re.compile(r"^__version__\s*\=\s*[\"\']([^\s\'\"]+)", re.M).search(text)
return match.group(1)
setup(
name="markdown-it-py",
version=get_version(),
description="Python port of markdown-it. Markdown parsing, done right!",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/markdown-it-py",
project_urls={"Documentation": "https://markdown-it-py.readthedocs.io"},
author="Chris Sewell",
author_email="chrisj_sewell@hotmail.com",
license="MIT",
packages=find_packages(exclude=["test*", "benchmarking"]),
include_package_data=True,
entry_points={"console_scripts": ["markdown-it = markdown_it.cli.parse:main"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
],
keywords="markdown lexer parser development",
python_requires="~=3.6",
install_requires=["attrs>=19,<21"],
extras_require={
"code_style": ["pre-commit==2.6"],
"testing": [
"coverage",
"pytest>=3.6,<4",
"pytest-cov",
"pytest-regressions",
"pytest-benchmark~=3.2",
"psutil",
],
"rtd": [
"myst-nb",
"sphinx_book_theme",
"sphinx-panels~=0.4.0",
"sphinx-copybutton",
"sphinx>=2,<4",
"pyyaml",
],
"compare": [
"commonmark~=0.9.1",
"markdown~=3.2",
"mistune~=0.8.4",
# "mistletoe~=0.7.2",
"mistletoe-ebp~=0.10.0",
"panflute~=1.12",
],
},
zip_safe=False,
)
|
""" A BlogController Module """
from masonite.controllers import Controller
from masonite.request import Request
from app.Blog import Blog
class BlogController(Controller):
def __init__(self, request: Request):
self.request = request
def show(self):
id = self.request.param("id")
return Blog.find(id)
def index(self):
return Blog.all()
def create(self):
subject = self.request.input("title")
details = self.request.input("body")
blog = Blog.create({"title": subject, "body": details})
return blog
def update(self):
title = self.request.input("title")
body = self.request.input("body")
id = self.request.param("id")
Blog.where("id", id).update({"title": title, "body": body})
return Blog.where("id", id).get()
def destroy(self):
id = self.request.param("id")
blog = Blog.where("id", id).get()
Blog.where("id", id).delete()
return blog
|
import uuid
import datetime
from sqlalchemy import Float
from sqlalchemy import Column
from sqlalchemy import String
from sqlalchemy import Integer
from sqlalchemy import Boolean
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from .base import Model
from .problem import Problem
from .score import ScoreType
__all__ = [
"Event",
"EventTeam",
"EventAdmin",
"EventScoreType",
]
class Event(Model):
"""Event table.
This table contains all information of a RAMP event.
Parameters
----------
problem_name : str
The name of the problem.
name : str
The name of the event.
event_title : str
The title to give for the event (used in the frontend, can contain
spaces).
ramp_sandbox_name : str
Name of the submission which will be considered the sandbox. It will
correspond to the key ``sandbox_name`` of the dictionary created with
:func:`ramp_utils.generate_ramp_config`.
path_ramp_submissions : str
Path to the deployment RAMP submissions directory. It will corresponds
to the key ``ramp_submissions_dir`` of the dictionary created with
:func:`ramp_utils.generate_ramp_config`.
session : None or :class:`sqlalchemy.orm.Session`, optional
The session used to perform some required queries. It is a required
argument when interacting with the database outside of Flask.
Attributes
----------
id : int
ID of the table row.
name : str
Event name.
title : str
Event title.
problem_id : int
The problem ID associated with this event.
problem : :class:`ramp_database.model.Problem`
The :class:`ramp_database.model.Problem` instance.
max_members_per_team : int
The maximum number of members per team.
max_n_ensemble : int
The maximum number of models in the ensemble.
is_send_trained_mails : bool
Whether or not to send an email when a model is trained.
is_public : bool
Whether or not the event is public.
is_controled_signup : bool
Whether or not the sign-up to the event is moderated.
is_competitive : bool
Whether or not the challenge is in the competitive phase.
min_duration_between_submission : int
The amount of time to wait between two submissions.
opening_timestamp : datetime
The date and time of the event opening.
public_opening_timestamp : datetime
The date and time of the publicly event opening.
closing_timestamp : datetime
The date and time of the event closure.
official_score_name : str
The name of the official score used to evaluate the submissions.
combined_combined_valid_score : float
The combined public score for all folds.
combine_combined_test_score : float
The combined private score for all folds.
combined_foldwise_valid_score : float
The combined public scores for each fold.
combined_foldwise_test_score : float
The combined public scores for each fold.
n_submissions : int
The number of submissions for an event.
public_leaderboard_html_no_links : str
The public leaderboard in HTML format with links to the submissions.
public_leaderboard_html_with_links : str
The public leaderboard in HTML format.
private_leaderboard_html : str
The private leaderboard in HTML.
failed_leaderboard_html : str
The leaderboard with the failed submissions.
new_leaderboard_html : str
The leaderboard with the new submitted submissions.
public_competition_leaderboard_html : str
The public leaderboard of the competition in HTML.
private_competition_leaderboard_html : str
The private leaderboard of the competition in HTML.
path_ramp_kit : str
The path where the kit are located.
ramp_sandbox_name : str
Name of the submission which will be considered the sandbox.
path_ramp_submissions : str
Path to the deployment RAMP submissions directory. It will correspond
to the key `ramp_submissions_dir` of the dictionary created with
:func:`ramp_utils.generate_ramp_config`.
score_types : list of :class:`ramp_database.model.EventScoreType`
A back-reference to the score type used in the event.
event_admins : list of :class:`ramp_database.model.EventAdmin`
A back-reference to the admin for the event.
event_teams: list of :class:`ramp_database.model.EventTeam`
A back-reference to the teams enrolled in the event.
cv_folds : list of :class:`ramp_database.model.CVFold`
A back-reference to the CV folds for the event.
"""
__tablename__ = "events"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False, unique=True)
title = Column(String, nullable=False)
problem_id = Column(Integer, ForeignKey("problems.id"), nullable=False)
problem = relationship(
"Problem", backref=backref("events", cascade="all, delete-orphan")
)
max_members_per_team = Column(Integer, default=1)
# max number of submissions in Caruana's ensemble
max_n_ensemble = Column(Integer, default=80)
is_send_trained_mails = Column(Boolean, default=True)
is_send_submitted_mails = Column(Boolean, default=True)
is_public = Column(Boolean, default=False)
is_controled_signup = Column(Boolean, default=True)
# in competitive events participants can select the submission
# with which they want to participate in the competition
is_competitive = Column(Boolean, default=False)
min_duration_between_submissions = Column(Integer, default=15 * 60)
opening_timestamp = Column(DateTime, default=datetime.datetime(2000, 1, 1, 0, 0, 0))
# before links to submissions in leaderboard are not alive
public_opening_timestamp = Column(
DateTime, default=datetime.datetime(2100, 1, 1, 0, 0, 0)
)
closing_timestamp = Column(DateTime, default=datetime.datetime(2100, 1, 1, 0, 0, 0))
# the name of the score in self.event_score_types which is used for
# ensembling and contributivity.
official_score_name = Column(String)
# official_score_index = Column(Integer, default=0)
combined_combined_valid_score = Column(Float, default=None)
combined_combined_test_score = Column(Float, default=None)
combined_foldwise_valid_score = Column(Float, default=None)
combined_foldwise_test_score = Column(Float, default=None)
n_submissions = Column(Integer, default=0)
public_leaderboard_html_no_links = Column(String, default=None)
public_leaderboard_html_with_links = Column(String, default=None)
private_leaderboard_html = Column(String, default=None)
failed_leaderboard_html = Column(String, default=None)
new_leaderboard_html = Column(String, default=None)
public_competition_leaderboard_html = Column(String, default=None)
private_competition_leaderboard_html = Column(String, default=None)
# big change in the database
ramp_sandbox_name = Column(
String, nullable=False, unique=False, default="starting-kit"
)
path_ramp_submissions = Column(String, nullable=False, unique=False)
def __init__(
self,
problem_name,
name,
event_title,
ramp_sandbox_name,
path_ramp_submissions,
session=None,
):
self.name = name
self.ramp_sandbox_name = ramp_sandbox_name
self.path_ramp_submissions = path_ramp_submissions
if session is None:
self.problem = Problem.query.filter_by(name=problem_name).one()
else:
self.problem = (
session.query(Problem).filter(Problem.name == problem_name).one()
)
self.title = event_title
def __repr__(self):
return "Event({})".format(self.name)
def set_n_submissions(self):
"""Set the number of submissions for the current event by checking
each team."""
self.n_submissions = 0
for event_team in self.event_teams:
# substract one for starting kit
self.n_submissions += len(event_team.submissions) - 1
@property
def Predictions(self):
""":class:`rampwf.prediction_types.base.BasePrediction`: Predictions
for the given event."""
return self.problem.Predictions
@property
def workflow(self):
""":class:`ramp_database.model.Workflow`: The workflow used for the
event."""
return self.problem.workflow
@property
def official_score_type(self):
""":class:`ramp_database.model.EventScoreType`: The score type for the
current event."""
return EventScoreType.query.filter_by(
event=self, name=self.official_score_name
).one()
def get_official_score_type(self, session):
"""Get the type of the default score used for the current event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session used to make the query.
Returns
-------
event_type_score : :class:`ramp_database.model.EventTypeScore`
The default type score for the current event.
"""
return (
session.query(EventScoreType)
.filter(EventScoreType.event == self)
.filter(EventScoreType.name == self.official_score_name)
.one()
)
@property
def official_score_function(self):
"""callable: The default function used for scoring in the event."""
return self.official_score_type.score_function
@property
def combined_combined_valid_score_str(self):
"""str: Convert to string the combined public score for all folds."""
return (
None
if self.combined_combined_valid_score is None
else str(
round(
self.combined_combined_valid_score,
self.official_score_type.precision,
)
)
)
@property
def combined_combined_test_score_str(self):
"""str: Convert to string the combined private score for all folds."""
return (
None
if self.combined_combined_test_score is None
else str(
round(
self.combined_combined_test_score,
self.official_score_type.precision,
)
)
)
@property
def combined_foldwise_valid_score_str(self):
"""str: Convert to string the combined public score for each fold."""
return (
None
if self.combined_foldwise_valid_score is None
else str(
round(
self.combined_foldwise_valid_score,
self.official_score_type.precision,
)
)
)
@property
def combined_foldwise_test_score_str(self):
"""str: Convert to string the combined public score for each fold."""
return (
None
if self.combined_foldwise_test_score is None
else str(
round(
self.combined_foldwise_test_score,
self.official_score_type.precision,
)
)
)
@property
def is_open(self):
"""bool: Whether or not the event is opened."""
now = datetime.datetime.utcnow()
return self.closing_timestamp > now > self.opening_timestamp
@property
def is_public_open(self):
"""bool: Whether or not the public phase of the event is opened."""
now = datetime.datetime.utcnow()
return self.closing_timestamp > now > self.public_opening_timestamp
@property
def is_closed(self):
"""bool: Whether or not the event is closed."""
now = datetime.datetime.utcnow()
return now > self.closing_timestamp
@property
def n_jobs(self):
"""int: The number of cv fold which can be used as number of jobs."""
return sum(1 for cv_fold in self.cv_folds if cv_fold.type == "live")
@property
def n_participants(self):
"""int: The number of participants to the event."""
# Only select individual teams
return len(
[
event_team
for event_team in self.event_teams
if event_team.team.is_individual
]
)
class EventScoreType(Model):
"""EventScoreType table.
This is a many-to-one relationship between Event and ScoreType. Stores the
ScoresTypes for each event.
For each Event / ScoreType combo, also a new record in ScoreType is
created, which is not that useful (TODO consider removing ScoreType table)
Parameters
----------
event : :class:`ramp_database.model.Event`
The event instance.
score_type_object : :class:`rampwf.score_types`
A scoring instance.
Attributes
----------
id : int
The ID of the table row.
name : str
The name of the score.
event_id : int
The ID of the event associated.
event : :class:`ramp_database.model.Event`
The event instance.
score_type_id : int
The ID of the score.
score_type : :class:`ramp_database.model.ScoreType`
The score type instance.
precision : int
The numerical precision of the score.
submissions : list of :class:`ramp_database.model.SubmissionScore`
A back-reference of the submissions for the event/score type.
"""
__tablename__ = "event_score_types"
id = Column(Integer, primary_key=True)
# Can be renamed, default is the same as score_type.name
name = Column(String, nullable=False)
event_id = Column(Integer, ForeignKey("events.id"), nullable=False)
event = relationship(
"Event", backref=backref("score_types", cascade="all, delete-orphan")
)
score_type_id = Column(Integer, ForeignKey("score_types.id"), nullable=False)
score_type = relationship("ScoreType", backref=backref("events"))
# display precision in n_digits
# default is the same as score_type.precision
precision = Column(Integer)
UniqueConstraint(event_id, score_type_id, name="es_constraint")
UniqueConstraint(event_id, name, name="en_constraint")
def __init__(self, event, score_type_object):
self.event = event
self.score_type = ScoreType(str(uuid.uuid4()), True, 0, 1)
# XXX after migration we should store the index of the
# score_type so self.score_type_object (should be renamed
# score_type) wouldn't have to do a search each time.
self.name = score_type_object.name
self.precision = score_type_object.precision
def __repr__(self):
return "{}: {}".format(self.name, self.event)
@property
def score_type_object(self):
""":class:`rampwf.score_types`: Score type object."""
score_types = self.event.problem.module.score_types
for score_type in score_types:
if score_type.name == self.name:
return score_type
@property
def score_function(self):
"""callable: Scoring function."""
return self.score_type_object.score_function
@property
def is_lower_the_better(self):
"""bool: Whether a lower score is better."""
return self.score_type_object.is_lower_the_better
@property
def minimum(self):
"""float: the lower bound of the score."""
return self.score_type_object.minimum
@property
def maximum(self):
"""float: the higher bound of the score."""
return self.score_type_object.maximum
@property
def worst(self):
"""float: the worst possible score."""
return self.score_type_object.worst
class EventAdmin(Model):
"""EventAdmin table.
This is a many-to-many relationship between Event and User to defined
admins.
Parameters
----------
event : :class:`ramp_database.model.Event`
The event instance.
admin : :class:`ramp_database.model.User`
The user instance.
Attributes
----------
id : int
The ID of the table row.
event_id : int
The ID of the event.
event : :class:`ramp_database.model.Event`
The event instance.
admin_id : int
The ID of the user defined as an admin.
admin : :class:`ramp_database.model.User`
The user instance.
"""
__tablename__ = "event_admins"
id = Column(Integer, primary_key=True)
event_id = Column(Integer, ForeignKey("events.id"), nullable=False)
event = relationship(
"Event", backref=backref("event_admins", cascade="all, delete-orphan")
)
admin_id = Column(Integer, ForeignKey("users.id"), nullable=False)
admin = relationship(
"User", backref=backref("admined_events", cascade="all, delete-orphan")
)
class EventTeam(Model):
"""EventTeam table.
This is a many-to-many relationship between Event and Team.
Parameters
----------
event : :class:`ramp_database.model.Event`
The event instance.
team : :class:`ramp_database.model.Team`
The team instance.
Attributes
----------
id : int
The ID of a row in the table.
event_id : int
The ID of the event.
event : :class:`ramp_database.model.Event`
The event instance.
team_id : int
The ID of the team.
team : :class:`ramp_database.model.Team`
The team instance.
is_active : bool
Whether the team is active for the event.
last_submission_name : str
The name of the last submission to the event.
signup_timestamp : datetime
The date and time when the team signed up for the event.
approved : bool
Whether the team has been approved to participate to the event.
leaderboard_html : str
The leaderboard for the team for the specific event.
failed_leaderboard_html : str
The failed submission board for the team for the specific event.
new_leaderboard_html : str
The new submission board for the team for the specific event.
submissions : list of :class:`ramp_database.model.Submission`
A back-reference to the submissions associated with this event/team.
"""
__tablename__ = "event_teams"
id = Column(Integer, primary_key=True)
event_id = Column(Integer, ForeignKey("events.id"), nullable=False)
event = relationship(
"Event", backref=backref("event_teams", cascade="all, delete-orphan")
)
team_id = Column(Integer, ForeignKey("teams.id"), nullable=False)
team = relationship(
"Team", backref=backref("team_events", cascade="all, delete-orphan")
)
is_active = Column(Boolean, default=True)
last_submission_name = Column(String, default=None)
signup_timestamp = Column(DateTime, nullable=False)
approved = Column(Boolean, default=False)
leaderboard_html = Column(String, default=None)
failed_leaderboard_html = Column(String, default=None)
new_leaderboard_html = Column(String, default=None)
UniqueConstraint(event_id, team_id, name="et_constraint")
def __init__(self, event, team):
self.event = event
self.team = team
self.signup_timestamp = datetime.datetime.utcnow()
def __repr__(self):
return "{}/{}".format(self.event, self.team)
|
import os
import csv
from django.core.management.base import BaseCommand
from django.db import transaction
from api.models import Action, Country, DisasterType, SituationReportType
from api.logger import logger
class Command(BaseCommand):
help = 'Import translated strings from a CSV. Either use the --table and --field params \
or else the CSV has to be named like "tablename__fieldname.csv" (ex. api_country__name.csv). \
Delimiter should be ";". Field order: original, fr, es, ar (ex. name, name_fr, name_es, name_ar)'
missing_args_message = "Filename is missing."
def add_arguments(self, parser):
parser.add_argument('filename', nargs='+', type=str)
parser.add_argument(
'-t',
'--table',
type=str,
help='Database table name of the translated strings'
)
parser.add_argument(
'-f',
'--field',
type=str,
help='Database field name of the translated strings'
)
@transaction.atomic
def handle(self, *args, **kwargs):
''' Example CSV header: name; name_fr; name_es; name_ar '''
filename = kwargs['filename'][0]
# os.path.split() [0] is the folder [1] is the filename
tablename = kwargs['table'] or os.path.split(filename)[1].split('__')[0]
# [:4] is to remove '.csv' from the end
fieldname = kwargs['field'] or os.path.split(filename)[1].split('__')[1][:4]
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=';')
fieldnames = next(reader)
translations = list(reader)
for tr in translations:
if tablename == 'api_country':
# fieldname = 'name'
# **{fieldname: tr[0]} = name=tr[0]
country = Country.objects.filter(**{fieldname: tr[0]})
if country:
country.update(**{
fieldnames[1]: tr[1],
fieldnames[2]: tr[2],
fieldnames[3]: tr[3]
})
else:
logger.info(f'No Country in GO DB with the string: {tr[0]}')
elif tablename == 'api_action':
action = Action.objects.filter(**{fieldname: tr[0]})
if action:
action.update(**{
fieldnames[1]: tr[1],
fieldnames[2]: tr[2],
fieldnames[3]: tr[3]
})
else:
logger.info(f'No Action in GO DB with the string: {tr[0]}')
elif tablename == 'api_disastertype':
distype = DisasterType.objects.filter(**{fieldname: tr[0]})
if distype:
distype.update(**{
fieldnames[1]: tr[1],
fieldnames[2]: tr[2],
fieldnames[3]: tr[3]
})
else:
logger.info(f'No DisasterType in GO DB with the string: {tr[0]}')
elif tablename == 'api_situationreporttype':
sitreptype = SituationReportType.objects.filter(**{fieldname: tr[0]})
if sitreptype:
sitreptype.update(**{
fieldnames[1]: tr[1],
fieldnames[2]: tr[2],
fieldnames[3]: tr[3]
})
else:
logger.info(f'No SituationReportType in GO DB with the string: {tr[0]}')
print('done!')
|
from ctre import WPI_TalonSRX
class Shooter:
motor: WPI_TalonSRX
def __init__(self):
self.ref_velocity = 0
def enable(self):
self.ref_velocity = 1
def disable(self):
self.ref_velocity = 0
def ready(self):
return self.motor.getQuadratureVelocity() > 4500
def execute(self):
self.motor.set(WPI_TalonSRX.ControlMode.PercentOutput, self.ref_velocity)
|
"""fasterRCNN训练的损失函数与数据生成器"""
from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
import keras
import tensorflow as tf
import numpy as np
from random import shuffle
import random
from PIL import Image
from keras.objectives import categorical_crossentropy
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
import sys
sys.path.append("..")
from net import RPN as RPN
def rand(a=0, b=1):
return np.random.rand() * (b - a) + a
def cls_loss(ratio=3):
def _cls_loss(y_true, y_pred):
# y_true [batch_size, num_anchor, num_classes+1]
# y_pred [batch_size, num_anchor, num_classes]
labels = y_true
anchor_state = y_true[:, :, -1] # -1 是需要忽略的, 0 是背景, 1 是存在目标
classification = y_pred
# 找出存在目标的先验框
indices_for_object = tf.where(keras.backend.equal(anchor_state, 1))
labels_for_object = tf.gather_nd(labels, indices_for_object)
classification_for_object = tf.gather_nd(classification, indices_for_object)
cls_loss_for_object = keras.backend.binary_crossentropy(labels_for_object, classification_for_object)
# 找出实际上为背景的先验框
indices_for_back = tf.where(keras.backend.equal(anchor_state, 0))
labels_for_back = tf.gather_nd(labels, indices_for_back)
classification_for_back = tf.gather_nd(classification, indices_for_back)
# 计算每一个先验框应该有的权重
cls_loss_for_back = keras.backend.binary_crossentropy(labels_for_back, classification_for_back)
# 标准化,实际上是正样本的数量
normalizer_pos = tf.where(keras.backend.equal(anchor_state, 1))
normalizer_pos = keras.backend.cast(keras.backend.shape(normalizer_pos)[0], keras.backend.floatx())
normalizer_pos = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_pos)
normalizer_neg = tf.where(keras.backend.equal(anchor_state, 0))
normalizer_neg = keras.backend.cast(keras.backend.shape(normalizer_neg)[0], keras.backend.floatx())
normalizer_neg = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_neg)
# 将所获得的loss除上正样本的数量
cls_loss_for_object = keras.backend.sum(cls_loss_for_object) / normalizer_pos
cls_loss_for_back = ratio * keras.backend.sum(cls_loss_for_back) / normalizer_neg
# 总的loss
loss = cls_loss_for_object + cls_loss_for_back
return loss
return _cls_loss
def smooth_l1(sigma=1.0):
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
# y_true [batch_size, num_anchor, 4+1]
# y_pred [batch_size, num_anchor, 4]
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# 找到正样本
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# 计算 smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = tf.where(
keras.backend.less(regression_diff, 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared
)
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
loss = keras.backend.sum(regression_loss) / normalizer
return loss
return _smooth_l1
def class_loss_regr(num_classes):
epsilon = 1e-4
def class_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, 4 * num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
loss = 4 * K.sum(
y_true[:, :, :4 * num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(
epsilon + y_true[:, :, :4 * num_classes])
return loss
return class_loss_regr_fixed_num
def class_loss_cls(y_true, y_pred):
return K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
def get_new_img_size(width, height, img_min_side=600):
if width <= height:
f = float(img_min_side) / width
resized_height = int(f * height)
resized_width = int(img_min_side)
else:
f = float(img_min_side) / height
resized_width = int(f * width)
resized_height = int(img_min_side)
return resized_width, resized_height
def get_img_output_length(width, height):
def get_output_length(input_length):
# input_length += 6
filter_sizes = [7, 3, 1, 1]
padding = [3, 1, 0, 0]
stride = 2
for i in range(4):
# input_length = (input_length - filter_size + stride) // stride
input_length = (input_length + 2 * padding[i] - filter_sizes[i]) // stride + 1
return input_length
return get_output_length(width), get_output_length(height)
class Generator(object):
def __init__(self, bbox_util, train_lines, num_classes, solid, solid_shape=[600, 600]):
self.bbox_util = bbox_util
self.train_lines = train_lines
self.train_batches = len(train_lines)
self.num_classes = num_classes
self.solid = solid
# 用于固定训练图片的大小(600,600)
self.solid_shape = solid_shape
def get_random_data(self, annotation_line, jitter=.3, hue=.1, sat=1.5, val=1.5):
"""数据增强,提高模型鲁棒性"""
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
# 如果solid=True,训练的图片大小会强制resize
if self.solid:
w, h = self.solid_shape
else:
w, h = get_new_img_size(iw, ih)
box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])
# resize image
new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# place image
dx = int(rand(0, w - nw))
dy = int(rand(0, h - nh))
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand() < .5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
x = rgb_to_hsv(np.array(image) / 255.)
x[..., 0] += hue
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x > 1] = 1
x[x < 0] = 0
image_data = hsv_to_rgb(x) * 255 # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((len(box), 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
if len(box) == 0:
return image_data, []
if (box_data[:, :4] > 0).any():
return image_data, box_data
else:
return image_data, []
def generate(self):
"""数据生成器"""
while True:
# 打乱2007_train.txt
shuffle(self.train_lines)
lines = self.train_lines
for annotation_line in lines:
# 对每一行即没一张图片进行数据增强:改变光照,对比度等,使图片变得多样,从而提高模型鲁棒性
# img为数据增强后的图片,y为目标的信息
img, y = self.get_random_data(annotation_line)
height, width, _ = np.shape(img)
# 没有目标就跳过
if len(y) == 0:
continue
# 将目标信息归一化
boxes = np.array(y[:, :4], dtype=np.float32)
boxes[:, 0] = boxes[:, 0] / width
boxes[:, 1] = boxes[:, 1] / height
boxes[:, 2] = boxes[:, 2] / width
boxes[:, 3] = boxes[:, 3] / height
box_heights = boxes[:, 3] - boxes[:, 1]
box_widths = boxes[:, 2] - boxes[:, 0]
# 如果遇到标记错误为负数的情况,应跳过
if (box_heights <= 0).any() or (box_widths <= 0).any():
continue
y[:, :4] = boxes[:, :4]
# 获得先验框 38*38*9个
anchors = RPN.create_anchor(get_img_output_length(width, height), width, height)
# 计算真实框对应的先验框,返回正样本:可以对应到真实框的先验框,负样本:背景
assignment = self.bbox_util.assign_boxes(y, anchors)
# 训练一般随机选择128个正样本,128个负样本
num_regions = 256
classification = assignment[:, 4]
regression = assignment[:, :]
mask_pos = classification[:] > 0
num_pos = len(classification[mask_pos])
# 如果正样本数量大于128,就忽略多余的正样本
if num_pos > num_regions / 2:
val_locs = random.sample(range(num_pos), int(num_pos - num_regions / 2))
classification[mask_pos][val_locs] = -1
regression[mask_pos][val_locs, -1] = -1
mask_neg = classification[:] == 0
num_neg = len(classification[mask_neg])
# 如果负样本过多,也进行忽略,这么做是为了平衡正负样本的数量
if len(classification[mask_neg]) + num_pos > num_regions:
val_locs = random.sample(range(num_neg), int(num_neg - num_pos))
classification[mask_neg][val_locs] = -1
classification = np.reshape(classification, [-1, 1])
regression = np.reshape(regression, [-1, 5])
tmp_inp = np.array(img)
tmp_targets = [np.expand_dims(np.array(classification, dtype=np.float32), 0),
np.expand_dims(np.array(regression, dtype=np.float32), 0)]
# 1.对图片进行预处理 2.返回训练使用的预测信息 3.返回真实框
yield preprocess_input(np.expand_dims(tmp_inp, 0)), tmp_targets, np.expand_dims(y, 0)
|
import os
from setuptools import setup
#data_files = []
#directories = glob.glob('src/share')
setup(
name = "bmk",
packages=['bmk'],
package_dir = {'' : 'src'},
package_data = {'bmk' : ['share/*']},
author = "Matthew Ballance",
author_email = "matt.ballance@gmail.com",
description = ("Provides a core classes for use by memory-oriented BFMs"),
license = "Apache 2.0",
keywords = ["Python", "CocoTB", "embedded software"],
url = "https://github.com/mballance/bmk",
setup_requires=[
'setuptools_scm',
],
install_requires=[
"pyhvl-rpc",
],
)
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock, call
from signedjson.key import generate_signing_key
from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.events import room_version_to_event_format
from synapse.events.builder import EventBuilder
from synapse.handlers.presence import (
FEDERATION_PING_INTERVAL,
FEDERATION_TIMEOUT,
IDLE_TIMER,
LAST_ACTIVE_GRANULARITY,
SYNC_ONLINE_TIMEOUT,
handle_timeout,
handle_update,
)
from synapse.rest.client.v1 import room
from synapse.storage.presence import UserPresenceState
from synapse.types import UserID, get_domain_from_id
from tests import unittest
class PresenceUpdateTestCase(unittest.TestCase):
def test_offline_to_online(self):
wheel_timer = Mock()
user_id = "@foo:bar"
now = 5000000
prev_state = UserPresenceState.default(user_id)
new_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE, last_active_ts=now
)
state, persist_and_notify, federation_ping = handle_update(
prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
)
self.assertTrue(persist_and_notify)
self.assertTrue(state.currently_active)
self.assertEquals(new_state.state, state.state)
self.assertEquals(new_state.status_msg, state.status_msg)
self.assertEquals(state.last_federation_update_ts, now)
self.assertEquals(wheel_timer.insert.call_count, 3)
wheel_timer.insert.assert_has_calls(
[
call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
call(
now=now,
obj=user_id,
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
),
call(
now=now,
obj=user_id,
then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
),
],
any_order=True,
)
def test_online_to_online(self):
wheel_timer = Mock()
user_id = "@foo:bar"
now = 5000000
prev_state = UserPresenceState.default(user_id)
prev_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE, last_active_ts=now, currently_active=True
)
new_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE, last_active_ts=now
)
state, persist_and_notify, federation_ping = handle_update(
prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
)
self.assertFalse(persist_and_notify)
self.assertTrue(federation_ping)
self.assertTrue(state.currently_active)
self.assertEquals(new_state.state, state.state)
self.assertEquals(new_state.status_msg, state.status_msg)
self.assertEquals(state.last_federation_update_ts, now)
self.assertEquals(wheel_timer.insert.call_count, 3)
wheel_timer.insert.assert_has_calls(
[
call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
call(
now=now,
obj=user_id,
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
),
call(
now=now,
obj=user_id,
then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
),
],
any_order=True,
)
def test_online_to_online_last_active_noop(self):
wheel_timer = Mock()
user_id = "@foo:bar"
now = 5000000
prev_state = UserPresenceState.default(user_id)
prev_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now - LAST_ACTIVE_GRANULARITY - 10,
currently_active=True,
)
new_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE, last_active_ts=now
)
state, persist_and_notify, federation_ping = handle_update(
prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
)
self.assertFalse(persist_and_notify)
self.assertTrue(federation_ping)
self.assertTrue(state.currently_active)
self.assertEquals(new_state.state, state.state)
self.assertEquals(new_state.status_msg, state.status_msg)
self.assertEquals(state.last_federation_update_ts, now)
self.assertEquals(wheel_timer.insert.call_count, 3)
wheel_timer.insert.assert_has_calls(
[
call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
call(
now=now,
obj=user_id,
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
),
call(
now=now,
obj=user_id,
then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY,
),
],
any_order=True,
)
def test_online_to_online_last_active(self):
wheel_timer = Mock()
user_id = "@foo:bar"
now = 5000000
prev_state = UserPresenceState.default(user_id)
prev_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now - LAST_ACTIVE_GRANULARITY - 1,
currently_active=True,
)
new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE)
state, persist_and_notify, federation_ping = handle_update(
prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
)
self.assertTrue(persist_and_notify)
self.assertFalse(state.currently_active)
self.assertEquals(new_state.state, state.state)
self.assertEquals(new_state.status_msg, state.status_msg)
self.assertEquals(state.last_federation_update_ts, now)
self.assertEquals(wheel_timer.insert.call_count, 2)
wheel_timer.insert.assert_has_calls(
[
call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER),
call(
now=now,
obj=user_id,
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
),
],
any_order=True,
)
def test_remote_ping_timer(self):
wheel_timer = Mock()
user_id = "@foo:bar"
now = 5000000
prev_state = UserPresenceState.default(user_id)
prev_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE, last_active_ts=now
)
new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE)
state, persist_and_notify, federation_ping = handle_update(
prev_state, new_state, is_mine=False, wheel_timer=wheel_timer, now=now
)
self.assertFalse(persist_and_notify)
self.assertFalse(federation_ping)
self.assertFalse(state.currently_active)
self.assertEquals(new_state.state, state.state)
self.assertEquals(new_state.status_msg, state.status_msg)
self.assertEquals(wheel_timer.insert.call_count, 1)
wheel_timer.insert.assert_has_calls(
[
call(
now=now,
obj=user_id,
then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT,
)
],
any_order=True,
)
def test_online_to_offline(self):
wheel_timer = Mock()
user_id = "@foo:bar"
now = 5000000
prev_state = UserPresenceState.default(user_id)
prev_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE, last_active_ts=now, currently_active=True
)
new_state = prev_state.copy_and_replace(state=PresenceState.OFFLINE)
state, persist_and_notify, federation_ping = handle_update(
prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
)
self.assertTrue(persist_and_notify)
self.assertEquals(new_state.state, state.state)
self.assertEquals(state.last_federation_update_ts, now)
self.assertEquals(wheel_timer.insert.call_count, 0)
def test_online_to_idle(self):
wheel_timer = Mock()
user_id = "@foo:bar"
now = 5000000
prev_state = UserPresenceState.default(user_id)
prev_state = prev_state.copy_and_replace(
state=PresenceState.ONLINE, last_active_ts=now, currently_active=True
)
new_state = prev_state.copy_and_replace(state=PresenceState.UNAVAILABLE)
state, persist_and_notify, federation_ping = handle_update(
prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now
)
self.assertTrue(persist_and_notify)
self.assertEquals(new_state.state, state.state)
self.assertEquals(state.last_federation_update_ts, now)
self.assertEquals(new_state.state, state.state)
self.assertEquals(new_state.status_msg, state.status_msg)
self.assertEquals(wheel_timer.insert.call_count, 1)
wheel_timer.insert.assert_has_calls(
[
call(
now=now,
obj=user_id,
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
)
],
any_order=True,
)
class PresenceTimeoutTestCase(unittest.TestCase):
def test_idle_timer(self):
user_id = "@foo:bar"
now = 5000000
state = UserPresenceState.default(user_id)
state = state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now - IDLE_TIMER - 1,
last_user_sync_ts=now,
)
new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
self.assertIsNotNone(new_state)
self.assertEquals(new_state.state, PresenceState.UNAVAILABLE)
def test_sync_timeout(self):
user_id = "@foo:bar"
now = 5000000
state = UserPresenceState.default(user_id)
state = state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=0,
last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1,
)
new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
self.assertIsNotNone(new_state)
self.assertEquals(new_state.state, PresenceState.OFFLINE)
def test_sync_online(self):
user_id = "@foo:bar"
now = 5000000
state = UserPresenceState.default(user_id)
state = state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now - SYNC_ONLINE_TIMEOUT - 1,
last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1,
)
new_state = handle_timeout(
state, is_mine=True, syncing_user_ids=set([user_id]), now=now
)
self.assertIsNotNone(new_state)
self.assertEquals(new_state.state, PresenceState.ONLINE)
def test_federation_ping(self):
user_id = "@foo:bar"
now = 5000000
state = UserPresenceState.default(user_id)
state = state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now,
last_user_sync_ts=now,
last_federation_update_ts=now - FEDERATION_PING_INTERVAL - 1,
)
new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
self.assertIsNotNone(new_state)
self.assertEquals(new_state, new_state)
def test_no_timeout(self):
user_id = "@foo:bar"
now = 5000000
state = UserPresenceState.default(user_id)
state = state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now,
last_user_sync_ts=now,
last_federation_update_ts=now,
)
new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
self.assertIsNone(new_state)
def test_federation_timeout(self):
user_id = "@foo:bar"
now = 5000000
state = UserPresenceState.default(user_id)
state = state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now,
last_user_sync_ts=now,
last_federation_update_ts=now - FEDERATION_TIMEOUT - 1,
)
new_state = handle_timeout(
state, is_mine=False, syncing_user_ids=set(), now=now
)
self.assertIsNotNone(new_state)
self.assertEquals(new_state.state, PresenceState.OFFLINE)
def test_last_active(self):
user_id = "@foo:bar"
now = 5000000
state = UserPresenceState.default(user_id)
state = state.copy_and_replace(
state=PresenceState.ONLINE,
last_active_ts=now - LAST_ACTIVE_GRANULARITY - 1,
last_user_sync_ts=now,
last_federation_update_ts=now,
)
new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now)
self.assertIsNotNone(new_state)
self.assertEquals(state, new_state)
class PresenceJoinTestCase(unittest.HomeserverTestCase):
"""Tests remote servers get told about presence of users in the room when
they join and when new local users join.
"""
user_id = "@test:server"
servlets = [room.register_servlets]
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
"server", http_client=None, federation_sender=Mock()
)
return hs
def prepare(self, reactor, clock, hs):
self.federation_sender = hs.get_federation_sender()
self.event_builder_factory = hs.get_event_builder_factory()
self.federation_handler = hs.get_handlers().federation_handler
self.presence_handler = hs.get_presence_handler()
# self.event_builder_for_2 = EventBuilderFactory(hs)
# self.event_builder_for_2.hostname = "test2"
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.auth = hs.get_auth()
# We don't actually check signatures in tests, so lets just create a
# random key to use.
self.random_signing_key = generate_signing_key("ver")
def test_remote_joins(self):
# We advance time to something that isn't 0, as we use 0 as a special
# value.
self.reactor.advance(1000000000000)
# Create a room with two local users
room_id = self.helper.create_room_as(self.user_id)
self.helper.join(room_id, "@test2:server")
# Mark test2 as online, test will be offline with a last_active of 0
self.presence_handler.set_state(
UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}
)
self.reactor.pump([0]) # Wait for presence updates to be handled
#
# Test that a new server gets told about existing presence
#
self.federation_sender.reset_mock()
# Add a new remote server to the room
self._add_new_user(room_id, "@alice:server2")
# We shouldn't have sent out any local presence *updates*
self.federation_sender.send_presence.assert_not_called()
# When new server is joined we send it the local users presence states.
# We expect to only see user @test2:server, as @test:server is offline
# and has a zero last_active_ts
expected_state = self.get_success(
self.presence_handler.current_state_for_user("@test2:server")
)
self.assertEqual(expected_state.state, PresenceState.ONLINE)
self.federation_sender.send_presence_to_destinations.assert_called_once_with(
destinations=["server2"], states=[expected_state]
)
#
# Test that only the new server gets sent presence and not existing servers
#
self.federation_sender.reset_mock()
self._add_new_user(room_id, "@bob:server3")
self.federation_sender.send_presence.assert_not_called()
self.federation_sender.send_presence_to_destinations.assert_called_once_with(
destinations=["server3"], states=[expected_state]
)
def test_remote_gets_presence_when_local_user_joins(self):
# We advance time to something that isn't 0, as we use 0 as a special
# value.
self.reactor.advance(1000000000000)
# Create a room with one local users
room_id = self.helper.create_room_as(self.user_id)
# Mark test as online
self.presence_handler.set_state(
UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE}
)
# Mark test2 as online, test will be offline with a last_active of 0.
# Note we don't join them to the room yet
self.presence_handler.set_state(
UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}
)
# Add servers to the room
self._add_new_user(room_id, "@alice:server2")
self._add_new_user(room_id, "@bob:server3")
self.reactor.pump([0]) # Wait for presence updates to be handled
#
# Test that when a local join happens remote servers get told about it
#
self.federation_sender.reset_mock()
# Join local user to room
self.helper.join(room_id, "@test2:server")
self.reactor.pump([0]) # Wait for presence updates to be handled
# We shouldn't have sent out any local presence *updates*
self.federation_sender.send_presence.assert_not_called()
# We expect to only send test2 presence to server2 and server3
expected_state = self.get_success(
self.presence_handler.current_state_for_user("@test2:server")
)
self.assertEqual(expected_state.state, PresenceState.ONLINE)
self.federation_sender.send_presence_to_destinations.assert_called_once_with(
destinations=set(("server2", "server3")), states=[expected_state]
)
def _add_new_user(self, room_id, user_id):
"""Add new user to the room by creating an event and poking the federation API.
"""
hostname = get_domain_from_id(user_id)
room_version = self.get_success(self.store.get_room_version(room_id))
builder = EventBuilder(
state=self.state,
auth=self.auth,
store=self.store,
clock=self.clock,
hostname=hostname,
signing_key=self.random_signing_key,
format_version=room_version_to_event_format(room_version),
room_id=room_id,
type=EventTypes.Member,
sender=user_id,
state_key=user_id,
content={"membership": Membership.JOIN},
)
prev_event_ids = self.get_success(
self.store.get_latest_event_ids_in_room(room_id)
)
event = self.get_success(builder.build(prev_event_ids))
self.get_success(self.federation_handler.on_receive_pdu(hostname, event))
# Check that it was successfully persisted.
self.get_success(self.store.get_event(event.event_id))
self.get_success(self.store.get_event(event.event_id))
|
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to OpenHTF configuration files.
As a matter of convention, OpenHTF configuration files should contain values
which are specific to an individual station (not station type). This is
intended to provide a means to decouple deployment of test code from
station-specific configuration or calibration.
Examples of the types of values commonly found in the configuration are
physical port names, IP addresses, calibrated light/sound levels, etc.
Configuration values should not be used to determine test flow, or to control
debug output.
Config keys must be declared as in the following example, where default_value
and description are optional:
from openhtf.util import conf
conf.declare('antimatter_intermix_constant',
default_value=3.14159,
description='Intermix constant calibrated for our warp core.')
Declared keys can be accessed directly as attributes of the conf module. To
avoid naming conflicts, configuration keys must begin with a lowercase letter.
They may also be accessed by treating the conf module as a dictionary, but this
method is discouraged and should only be used in favor of getattr().
from openhtf.util import conf
warp_core.SetIntermixConstant(conf.antimatter_intermix_constant)
# An example of when you might use dict-like access.
for idx in range(5):
warp_core.SetDilithiumRatio(idx, conf['dilthium_ratio_%s' % idx])
Another common mechanism for obtaining configuration values is to use the
conf.inject_positional_args decorator:
from openhtf.util import conf
@conf.inject_positional_args
def ModifyThePhaseVariance(antimatter_intermix_constant, phase_variance):
return antimatter_intermix_constant * phase_variance
# antimatter_intermix_constant will be taken from the configuration value.
x = ModifyThePhaseVariance(phase_variance=2.71828)
Decorating a function with conf.inject_positional_args forces all other
arguments to be passed by keyword in order to avoid ambiguity in the values of
positional args. Values passed via keyword that also exist in the config will
override config values and log a warning message. Keyword args in the function
declaration will not be overridden (because it would be ambiguous which default
to use), and any overlap in keyword arg names and config keys will result in a
warning message.
If the configuration key is declared but no default_value is provided and no
value has been loaded, then no value will be passed, and a TypeError will be
raised unless the value is passed via keyword. Essentially, if `keyword_arg in
conf` evaluates to True, then that keyword arg will be provded from the
configuration unless overriden in the kwargs passed to the function. Otherwise
keyword_arg must be passed via kwargs at function invokation time.
The conf module supports 'in' checks, where `key in conf` will evaluate to True
if conf[key] would successfully provide a value. That is, if either a value
has been loaded or a default_value was declared.
Configuration values may be loaded directly or from a yaml or json file. If no
configuration is loaded, default values will still be accessible. Loading a
configuration always overrides default values, but only overrides previously
loaded values if _override=True (default) for the load* method used. Some
examples of how to load a configuration:
from openhtf.util import conf
conf.declare('antimatter_intermix_constant')
conf.declare('phase_variance')
conf.load(antimatter_intermix_constant=3.14,
phase_variance=2.718)
conf.load_from_dict({
'antimatter_intermix_constant': 3.14,
'phase_variance': 2.718,
})
conf.load_from_file('config.json')
conf.load_from_file('config.yaml')
Note that any of the load* methods here accept an _override keyword argument
that defaults to True, but may be set False to prevent overriding previously
loaded values. Regardless of whether _override is True or False, a message
will be logged indicating how the duplicate value was handled.
conf.load_from_file() attempts to parse the filename given as JSON and as YAML,
if neither succeeds, an exception will be raised. In either case, the value
parsed must be a dictionary mapping configuration key to value. Complex
configuration values are discouraged; they should be kept to single values or
lists of values when possible.
Lastly, configuration values may also be provided via the --config-value flag,
but this is discouraged, and should only be used for debugging purposes.
Configuration values loaded via commandline flags, either --config-file or
--config-value, are not checked against Declarations. This allows for using
configuration files that are supersets of required configuration. Declarations
are *always* checked upon configuration value access, however, so you still
must declare any keys you wish to use.
Loaded configuration values may be purged via the reset() method, but this
should only be used for testing purposes. This will reset the configuration
state to what it was before any load* methods were called (defaults loaded
and flag values used, either directly or from --config-file).
A recommended alternative to using reset() is the @save_and_restore decorator,
which allows you to decorate a function or method so that during execution
of the decorated callable, configuration values are altered (and restored
after execution of that callable). For example:
conf.load(foo='foo')
@conf.save_and_restore(foo='bar')
def do_stuff():
print 'foo has value: ', conf.foo
print 'foo before call: ', conf.foo
do_stuff()
print 'foo after call: ', conf.foo
This example prints:
foo before call: foo
foo has value: bar
foo after call: foo
This is useful primarily for unittest methods (see util/test.py for specific
examples of unittest usages). Note that config overrides may be specified at
decoration time, but do not have to be:
@conf.save_and_restore
def do_stuff():
conf.foo = 'bar'
This is also valid. The entire configuration is restored to the state it had
upon excution of the decorated callable, regardless of which keys are updated
in the decorator or in the decorated callable.
"""
import argparse
import functools
import inspect
import logging
import sys
import threading
import yaml
import mutablerecords
from . import argv
from . import threads
# If provided, --config-file will cause the given file to be load()ed when the
# conf module is initially imported.
ARG_PARSER = argv.ModuleParser()
ARG_PARSER.add_argument(
'--config-file', type=argparse.FileType('r'),
help='File from which to load configuration values.')
ARG_PARSER.add_argument(
'--config-value', action='append', default=[],
help='Allows specifying a configuration key=value on the command line. '
'The format should be --config-value=key=value. This value will override '
'any loaded value, and will be a string.')
class Configuration(object): # pylint: disable=too-many-instance-attributes
"""A singleton class to replace the 'conf' module.
This class provides the configuration interface described in the module
docstring. All attribuets/methods must not begin with a lowercase letter so
as to avoid naming conflicts with configuration keys.
"""
class ConfigurationInvalidError(Exception):
"""Indicates the configuration format was invalid or couldn't be read."""
class KeyAlreadyDeclaredError(Exception):
"""Indicates that a configuration key was already declared."""
class UndeclaredKeyError(Exception):
"""Indicates that a key was required but not predeclared."""
class InvalidKeyError(Exception):
"""Raised when an invalid key is declared or accessed."""
class UnsetKeyError(Exception):
"""Raised when a key value is requested but we have no value for it."""
# pylint: disable=invalid-name,bad-super-call
class Declaration(mutablerecords.Record(
'Declaration', ['name'], {
'description': None, 'default_value': None, 'has_default': False})):
"""Record type encapsulating information about a config declaration."""
def __init__(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
# Track this separately to allow for None as a default value, override
# any value that was passed in explicitly - don't do that.
self.has_default = 'default_value' in kwargs
# pylint: enable=invalid-name,bad-super-call
__slots__ = ('_logger', '_lock', '_modules', '_declarations',
'_flag_values', '_flags', '_loaded_values', 'ARG_PARSER',
'__name__')
def __init__(self, logger, lock, parser, **kwargs):
"""Initializes the configuration state.
We have to pull everything we need from global scope into here because we
will be swapping out the module with this instance and will lose any global
references.
Args:
logger: Logger to use for logging messages within this class.
lock: Threading.lock to use for locking access to config values.
**kwargs: Modules we need to access within this class.
"""
self._logger = logger
self._lock = lock
self._modules = kwargs
self._declarations = {}
self.ARG_PARSER = parser
# Parse just the flags we care about, since this happens at import time.
self._flags, _ = parser.parse_known_args()
self._flag_values = {}
# Populate flag_values from flags now.
self.load_flag_values()
# Initialize self._loaded_values and load from --config-file if it's set.
self.reset()
def load_flag_values(self, flags=None):
"""Load flag values given from command line flags.
Args:
flags: An argparse Namespace containing the command line flags.
"""
if flags is None:
flags = self._flags
for keyval in flags.config_value:
k,v = keyval.split('=', 1)
v = self._modules['yaml'].load(v) if isinstance(v, str) else v
self._flag_values.setdefault(k, v)
@staticmethod
def _is_valid_key(key):
"""Return True if key is a valid configuration key."""
return key and key[0].islower()
def __setattr__(self, attr, value):
"""Provide a useful error when attempting to set a value via setattr()."""
if self._is_valid_key(attr):
raise AttributeError("Can't set conf values by attribute, use load()")
# __slots__ is defined above, so this will raise an AttributeError if the
# attribute isn't one we expect; this limits the number of ways to abuse the
# conf module singleton instance. Also note that we can't use super()
# normally here because of the sys.modules swap (Configuration is no longer
# defined, and evaluates to None if used here).
# pylint: disable=bad-super-call
super(type(self), self).__setattr__(attr, value)
# Don't use synchronized on this one, because __getitem__ handles it.
def __getattr__(self, attr): # pylint: disable=invalid-name
"""Get a config value via attribute access."""
if self._is_valid_key(attr):
return self[attr]
# Config keys all begin with a lowercase letter, so treat this normally.
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, attr))
@threads.synchronized
def __getitem__(self, item): # pylint: disable=invalid-name
"""Get a config value via item access.
Order of precedence is:
- Value provided via --config-value flag.
- Value loaded via load*() methods.
- Default value as declared with conf.declare()
Args:
item: Config key name to get.
"""
if item not in self._declarations:
raise self.UndeclaredKeyError('Configuration key not declared', item)
if item in self._flag_values:
if item in self._loaded_values:
self._logger.warning(
'Overriding loaded value for %s (%s) with flag value: %s',
item, self._loaded_values[item], self._flag_values[item])
return self._flag_values[item]
if item in self._loaded_values:
return self._loaded_values[item]
if self._declarations[item].has_default:
return self._declarations[item].default_value
raise self.UnsetKeyError(
'Configuration value not set and has no default', item)
@threads.synchronized
def __contains__(self, name): # pylint: disable=invalid-name
"""True if we have a value for name."""
return (name in self._declarations and
(self._declarations[name].has_default or
name in self._loaded_values or
name in self._flag_values))
@threads.synchronized
def declare(self, name, description=None, **kwargs):
"""Declare a configuration key with the given name.
Args:
name: Configuration key to declare, must not have been already declared.
description: If provided, use this as the description for this key.
**kwargs: Other kwargs to pass to the Declaration, only default_value
is currently supported.
"""
if not self._is_valid_key(name):
raise self.InvalidKeyError(
'Invalid key name, must begin with a lowercase letter', name)
if name in self._declarations:
raise self.KeyAlreadyDeclaredError(
'Configuration key already declared', name)
self._declarations[name] = self.Declaration(
name, description=description, **kwargs)
@threads.synchronized
def reset(self):
"""Reset the loaded state of the configuration to what it was at import.
Note that this does *not* reset values set by commandline flags or loaded
from --config-file (in fact, any values loaded from --config-file that have
been overridden are reset to their value from --config-file).
"""
# Populate loaded_values with values from --config-file, if it was given.
self._loaded_values = {}
if self._flags.config_file is not None:
self.load_from_file(self._flags.config_file, _allow_undeclared=True)
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):
"""Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML).
"""
self._logger.info('Loading configuration from file: %s', yamlfile)
try:
parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())
except self._modules['yaml'].YAMLError as exception:
raise self.ConfigurationInvalidError(
'Failed to load from %s as YAML' % yamlfile, exception)
if not isinstance(parsed_yaml, dict):
# Parsed YAML, but it's not a dict.
raise self.ConfigurationInvalidError(
'YAML parsed, but wrong type, should be dict', parsed_yaml)
self._logger.debug('Configuration loaded from file: %s', parsed_yaml)
self.load_from_dict(
parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared)
def load(self, _override=True, _allow_undeclared=False, **kwargs):
"""load configuration values from kwargs, see load_from_dict()."""
self.load_from_dict(
kwargs, _override=_override, _allow_undeclared=_allow_undeclared)
@threads.synchronized
def load_from_dict(self, dictionary, _override=True, _allow_undeclared=False):
"""Loads the config with values from a dictionary instead of a file.
This is meant for testing and bin purposes and shouldn't be used in most
applications.
Args:
dictionary: The dictionary containing config keys/values to update.
_override: If True, new values will override previous values.
_allow_undeclared: If True, silently load undeclared keys, otherwise
warn and ignore the value. Typically used for loading config
files before declarations have been evaluated.
"""
undeclared_keys = []
for key, value in dictionary.items():
# Warn in this case. We raise if you try to access a config key that
# hasn't been declared, but we don't raise here so that you can use
# configuration files that are supersets of required configuration for
# any particular test station.
if key not in self._declarations and not _allow_undeclared:
undeclared_keys.append(key)
continue
if key in self._loaded_values:
if _override:
self._logger.info(
'Overriding previously loaded value for %s (%s) with value: %s',
key, self._loaded_values[key], value)
else:
self._logger.info(
'Ignoring new value (%s), keeping previous value for %s: %s',
value, key, self._loaded_values[key])
continue
self._loaded_values[key] = value
if undeclared_keys:
self._logger.warning('Ignoring undeclared configuration keys: %s',
undeclared_keys)
@threads.synchronized
def _asdict(self):
"""Create a dictionary snapshot of the current config values."""
# Start with any default values we have, and override with loaded values,
# and then override with flag values.
retval = {key: self._declarations[key].default_value for
key in self._declarations if self._declarations[key].has_default}
retval.update(self._loaded_values)
# Only update keys that are declared so we don't allow injecting
# un-declared keys via commandline flags.
for key, value in self._flag_values.items():
if key in self._declarations:
retval[key] = value
return retval
@property
def help_text(self):
"""Return a string with all config keys and their descriptions."""
result = []
for name in sorted(self._declarations.keys()):
result.append(name)
result.append('-' * len(name))
decl = self._declarations[name]
if decl.description:
result.append(decl.description.strip())
else:
result.append('(no description found)')
if decl.has_default:
result.append('')
quotes = '"' if type(decl.default_value) is str else ''
result.append(' default_value={quotes}{val}{quotes}'.format(
quotes=quotes, val=decl.default_value))
result.append('')
result.append('')
return '\n'.join(result)
def save_and_restore(self, _func=None, **config_values):
"""Decorator for saving conf state and restoring it after a function.
This decorator is primarily for use in tests, where conf keys may be updated
for individual test cases, but those values need to be reverted after the
test case is done.
Examples:
conf.declare('my_conf_key')
@conf.save_and_restore
def MyTestFunc():
conf.load(my_conf_key='baz')
SomeFuncUnderTestThatUsesMyConfKey()
conf.load(my_conf_key='foo')
MyTestFunc()
print conf.my_conf_key # Prints 'foo', *NOT* 'baz'
# Without the save_and_restore decorator, MyTestFunc() would have had the
# side effect of altering the conf value of 'my_conf_key' to 'baz'.
# Config keys can also be initialized for the context inline at decoration
# time. This is the same as setting them at the beginning of the
# function, but is a little clearer syntax if you know ahead of time what
# config keys and values you need to set.
@conf.save_and_restore(my_conf_key='baz')
def MyOtherTestFunc():
print conf.my_conf_key # Prints 'baz'
MyOtherTestFunc()
print conf.my_conf_key # Prints 'foo' again, for the same reason.
Args:
_func: The function to wrap. The returned wrapper will invoke the
function and restore the config to the state it was in at invokation.
**config_values: Config keys can be set inline at decoration time, see
examples. Note that config keys can't begin with underscore, so
there can be no name collision with _func.
Returns:
Wrapper to replace _func, as per Python decorator semantics.
"""
functools = self._modules['functools'] # pylint: disable=redefined-outer-name
if not _func:
return functools.partial(self.save_and_restore, **config_values)
@functools.wraps(_func)
def _saving_wrapper(*args, **kwargs):
saved_config = dict(self._loaded_values)
try:
self.load_from_dict(config_values)
return _func(*args, **kwargs)
finally:
self._loaded_values = saved_config # pylint: disable=attribute-defined-outside-init
return _saving_wrapper
def inject_positional_args(self, method):
"""Decorator for injecting positional arguments from the configuration.
This decorator wraps the given method, so that any positional arguments are
passed with corresponding values from the configuration. The name of the
positional argument must match the configuration key.
Keyword arguments are *NEVER* modified, even if their names match
configuration keys. Avoid naming keyword args names that are also
configuration keys to avoid confusion.
Additional positional arguments may be used that do not appear in the
configuration, but those arguments *MUST* be specified as keyword arguments
upon invokation of the method. This is to avoid ambiguity in which
positional arguments are getting which values.
Args:
method: The method to wrap.
Returns:
A wrapper that, when invoked, will call the wrapped method, passing in
configuration values for positional arguments.
"""
inspect = self._modules['inspect']
argspec = inspect.getargspec(method)
# Index in argspec.args of the first keyword argument. This index is a
# negative number if there are any kwargs, or 0 if there are no kwargs.
keyword_arg_index = -1 * len(argspec.defaults or [])
arg_names = argspec.args[:keyword_arg_index or None]
kwarg_names = argspec.args[len(arg_names):]
functools = self._modules['functools'] # pylint: disable=redefined-outer-name
# Create the actual method wrapper, all we do is update kwargs. Note we
# don't pass any *args through because there can't be any - we've filled
# them all in with values from the configuration. Any positional args that
# are missing from the configuration *must* be explicitly specified as
# kwargs.
@functools.wraps(method)
def method_wrapper(**kwargs):
"""Wrapper that pulls values from openhtf.util.conf."""
# Check for keyword args with names that are in the config so we can warn.
for kwarg in kwarg_names:
if kwarg in self:
self._logger.warning('Keyword arg %s not set from configuration, but '
'is a configuration key', kwarg)
# Set positional args from configuration values.
final_kwargs = {name: self[name] for name in arg_names if name in self}
for overridden in set(kwargs) & set(final_kwargs):
self._logger.warning('Overriding configuration value for kwarg %s (%s) '
'with provided kwarg value: %s', overridden,
self[overridden], kwargs[overridden])
final_kwargs.update(kwargs)
if inspect.ismethod(method):
name = '%s.%s' % (method.__self__.__class__.__name__, method.__name__)
else:
name = method.__name__
self._logger.debug('Invoking %s with %s', name, final_kwargs)
return method(**final_kwargs)
# We have to check for a 'self' parameter explicitly because Python doesn't
# pass it as a keyword arg, it passes it as the first positional arg.
if argspec.args[0] == 'self':
@functools.wraps(method)
def self_wrapper(self, **kwargs): # pylint: disable=invalid-name
"""Wrapper that pulls values from openhtf.util.conf."""
kwargs['self'] = self
return method_wrapper(**kwargs)
return self_wrapper
return method_wrapper
# Swap out the module for a singleton instance of Configuration so we can
# provide __getattr__ and __getitem__ functionality at the module level.
sys.modules[__name__] = Configuration(
logging.getLogger(__name__), threading.RLock(), ARG_PARSER,
functools=functools, inspect=inspect, yaml=yaml)
|
#!/usr/bin/python
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import Adafruit_DHT
import httplib, urllib
import json
deviceId = "DpQUAGQx"
deviceKey = "FSe2y0pELSD4qjzq"
def post_to_mcs(payload):
headers = {"Content-type": "application/json", "deviceKey": deviceKey}
not_connected = 1
while (not_connected):
try:
conn = httplib.HTTPConnection("api.mediatek.com:80")
conn.connect()
not_connected = 0
except (httplib.HTTPException, socket.error) as ex:
print "Error: %s" % ex
time.sleep(10)
# sleep 10 seconds
conn.request("POST", "/mcs/v2/devices/" + deviceId + "/datapoints", json.dumps(payload), headers)
response = conn.getresponse()
print( response.status, response.reason, json.dumps(payload), time.strftime("%c"))
data = response.read()
conn.close()
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(14, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Parse command line parameters.
sensor_args = { '11': Adafruit_DHT.DHT11,
'22': Adafruit_DHT.DHT22,
'2302': Adafruit_DHT.AM2302 }
if len(sys.argv) == 3 and sys.argv[1] in sensor_args:
sensor = sensor_args[sys.argv[1]]
pin = sys.argv[2]
else:
print('Usage: sudo ./Adafruit_DHT.py [11|22|2302] <GPIO pin number>')
print('Example: sudo ./Adafruit_DHT.py 2302 4 - Read from an AM2302 connected to GPIO pin #4')
sys.exit(1)
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Un-comment the line below to convert the temperature to Fahrenheit.
# temperature = temperature * 9/5.0 + 32
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
while True:
SwitchStatus = GPIO.input(14)
h0, t0= Adafruit_DHT.read_retry(11, 4)
if h0 is not None and t0 is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(t0, h0))
payload = {"datapoints":[{"dataChnId":"puddingH","values":{"value":h0}},
{"dataChnId":"puddingT","values":{"value":t0}},{"dataChnId":"SwitchStatus","values":{"value":SwitchStatus}}]}
post_to_mcs(payload)
time.sleep(10)
else:
print('Failed to get reading. Try again!')
sys.exit(1)
|
from django.db import models
class Person(models.Model):
age = models.IntegerField()
name = models.CharField(max_length=100)
class Document(models.Model):
myfile = models.FileField(upload_to="uploads")
|
from Simulation.calculation_status import CalculationStatus
from Simulation.sign_function import SignFunction
from Simulation.mcad import Mcad
from Simulation.market_snapshot import MarketSnapshot
from Simulation.stock_snapshot_helper import StockSnapshotHelper
from Simulation.visualization_data import VisualizationData
from Simulation.exponential_moving_average import ExponentialMovingAverage
__author__ = 'Raymond & Albert'
class McadSignalLine:
def __init__(self, total_capital, num_stocks):
self.transaction_amount = total_capital / num_stocks
self.mcads = []
self.old_mcads = []
self.mcad_EMA = []
self.old_mcad_EMA = []
self.visualization_data = VisualizationData()
for count in range(num_stocks):
self.mcads.append(Mcad())
for count in range(num_stocks):
self.old_mcads.append(CalculationStatus.Invalid)
for count in range(num_stocks):
self.mcad_EMA.append()
for count in range(num_stocks):
self.old_mcad_EMA.append(CalculationStatus.Invalid)
def notify(self, market_snapshot: MarketSnapshot):
decisions = []
for i, stock_snapshot in enumerate(market_snapshot.stock_snapshots):
stock_snapshot_helper = StockSnapshotHelper(stock_snapshot)
mid_price = stock_snapshot_helper.get_mid_price()
curr_mcad = self.mcads[i].evaluate(mid_price)
self.visualization_data.add_price(stock_snapshot.ticker, mid_price)
del_mcad = 0
if curr_mcad == CalculationStatus.Invalid:
self.visualization_data.add_mcad(stock_snapshot.ticker, 0)
continue
else:
self.visualization_data.add_mcad(stock_snapshot.ticker, curr_mcad)
self.mcad_EMA = ExponentialMovingAverage(9)
if self.old_mcads[i] == CalculationStatus.Invalid:
self.old_mcads[i] = curr_mcad
continue
curr_mcad_EMA = self.mcad_EMA.evaluate(curr_mcad)
if curr_mcad_EMA == CalculationStatus.Invalid:
self.old_mcad_EMA[i] = curr_mcad_EMA
continue
else:
del_mcad = SignFunction.evaluate(curr_mcad-curr_mcad_EMA) - SignFunction.evaluate(self.old_mcads[i]-curr_mcad_EMA)
self.old_mcad_EMA[i] = curr_mcad_EMA
if del_mcad > 0:
decisions.append((stock_snapshot.ticker, -self.transaction_amount))
elif del_mcad < 0:
decisions.append((stock_snapshot.ticker, self.transaction_amount))
return decisions
def reset(self):
for mcad in self.mcads:
mcad.reset()
self.old_mcads = [CalculationStatus.Invalid for old_mcad in self.old_mcads]
visualization_data_holder = self.visualization_data
self.visualization_data = VisualizationData()
return visualization_data_holder
|
_base_ = [
'../../_base_/models/tsm_r50.py', '../../_base_/schedules/sgd_tsm_100e.py',
'../../_base_/default_runtime.py'
]
# model settings
model = dict(backbone=dict(pretrained='weight/resnet50-19c8e357.pth'),cls_head=dict(num_classes=2))
log_config = dict(
interval=1,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
])
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.0025, # this lr is used for 1 gpus
momentum=0.9,
weight_decay=0.0001)
# dataset settings
dataset_type = 'RawframeDataset'
data_root = '/home/jovyan/data-vol-1/fight_recognition/fight-detection-rawframes'
data_root_val = '/home/jovyan/data-vol-1/fight_recognition/fight-detection-rawframes'
ann_file_train = '/home/jovyan/data-vol-1/fight_recognition/train_videofolder.txt'
ann_file_val = '/home/jovyan/data-vol-1/fight_recognition/val_videofolder.txt'
ann_file_test = '/home/jovyan/data-vol-1/fight_recognition/val_videofolder.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
#clip_len 是单个裁剪片段的长度
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=8,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=8,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# runtime settings
checkpoint_config = dict(interval=5)
work_dir = './work_dirs/tsm_r50_1x1x8_100e_customdataset_rgb/'
|
import matplotlib.pyplot as plt
import nnfs
from nnfs.datasets import vertical_data
nnfs.init()
X, y = vertical_data(samples=100, classes=3)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap='brg')
plt.show()
import numpy as np
import nnfs
import matplotlib.pyplot as plt
nnfs.init()
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1,n_neurons))
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
# Forward Pass
def forward(self, inputs):
self.output = np.maximum(0,inputs)
class Activation_Softmax:
def forward(self, inputs):
exp_values= np.exp(inputs - np.max(inputs, axis=1, keepdims=True))
normalized = exp_values / np.sum(exp_values, axis=1, keepdims=True)
self.output = normalized
class Loss:
# Calculates the data and regularization losses
# given model output and ground truth values
def calculate(self, output, y):
# Calculate sample losses
sample_losses = self.forward(output, y)
# Calculate mean loss
data_loss = np.mean(sample_losses)
# Return loss
return data_loss
class Loss_CatagoricalCrossEntropy(Loss):
def forward(self, y_pred, y_true):
# Number of Samples
samples = len(y_pred)
# Clip Data to prevent div by 0
# Clip Both sides to not drag the mean torwards any value
y_pred_clipped = np.clip(y_pred, 1e-7, 1-1e-7)
# Probabilities for target values -
# Only if categorical labels
if len(y_true.shape) == 1:
correct_confidences = y_pred_clipped[range(samples), y_true]
# Mask Values - only for one-hot encoded labels
elif len(y_true.shape) == 2:
correct_confidences = np.sum(y_pred_clipped * y_true, axis=1)
negative_log_likelyhoods = -np.log(correct_confidences)
return negative_log_likelyhoods
# Model
dense1 = Layer_Dense(2,3)
activation1 = Activation_ReLU()
dense2 = Layer_Dense(3, 3)
activation2 = Activation_Softmax()
loss_function = Loss_CatagoricalCrossEntropy()
# Helper variables
lowest_loss = 9999999 # some initial value
best_dense1_weights = dense1.weights.copy()
best_dense1_biases = dense1.biases.copy()
best_dense2_weights = dense2.weights.copy()
best_dense2_biases = dense2.biases.copy()
for iteration in range(10000):
# Generate a new set of weights for iteration
dense1.weights += 0.05 * np.random.randn(2, 3)
dense1.biases += 0.05 * np.random.randn(1, 3)
dense2.weights += 0.05 * np.random.randn(3, 3)
dense2.biases += 0.05 * np.random.randn(1, 3)
# Perform a forward pass of the training data through this layer
dense1.forward(X)
activation1.forward(dense1.output)
dense2.forward(activation1.output)
activation2.forward(dense2.output)
# Perform a forward pass through activation function
# it takes the output of second dense layer here and returns loss
loss = loss_function.calculate(activation2.output, y)
# Calculate accuracy from output of activation2 and targets
# calculate values along first axis
predictions = np.argmax(activation2.output, axis=1)
accuracy = np.mean(predictions==y)
# If loss is smaller - print and save weights and biases aside
if loss < lowest_loss:
print('New set of weights found, iteration:', iteration,
'loss:', loss, 'acc:', accuracy)
best_dense1_weights = dense1.weights.copy()
best_dense1_biases = dense1.biases.copy()
best_dense2_weights = dense2.weights.copy()
best_dense2_biases = dense2.biases.copy()
lowest_loss = loss
# Revert weights and biases
else:
dense1.weights = best_dense1_weights.copy()
dense1.biases = best_dense1_biases.copy()
dense2.weights = best_dense2_weights.copy()
dense2.biases = best_dense2_biases.copy()
|
import ldb
# console based do not edit
while True:
cmd = str(input("LDB > "))
cmd = cmd.split(" ")
if cmd[0].lower() == "exit":
break
elif cmd[0].lower() == "init":
ldb.init()
elif cmd[0].lower() == "create":
ldb.create(list(cmd[1:]))
elif cmd[0].lower() == "view":
ldb.view()
elif cmd[0].lower() == "add_c":
ldb.add_c(list(cmd[1:]))
elif cmd[0].lower() == "add_r":
ldb.add_r(list(cmd[1:]))
elif cmd[0].lower() == "clear_r":
ldb.clear_r(cmd[1:])
elif cmd[0].lower() == "clear_c":
ldb.clear_c(cmd[1:])
elif cmd[0].lower() == "clearall":
ldb.clearall()
elif cmd[0].lower() == "store":
ldb.store()
elif cmd[0].lower() == "retrieve":
ldb.retrieve()
elif cmd[0].lower() == "genid":
ldb.genid()
elif cmd[0].lower() == "return_r":
print(ldb.return_r(cmd[1:]))
elif cmd[0].lower() == "return_c":
print(ldb.return_c(int(cmd[1])))
elif cmd[0].lower() == "update_r":
ldb.update_r(int(cmd[1]), cmd[2:])
elif cmd[0].lower() == "update_c":
ldb.update_c(int(cmd[1]), cmd[2:])
elif cmd[0].lower() == "update_ri":
ldb.update_ri(int(cmd[1]), int(cmd[2]), cmd[3:])
elif cmd[0].lower() == "sort_col":
ldb.sort_col(int(cmd[1]), True if len(cmd) == 2 else False)
elif cmd[0].lower() == "find":
ldb.find(cmd[1])
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for JAX primitive coverage."""
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from functools import partial
import jax
from jax import dtypes
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax.config import config
from jax.experimental import jax2tf
from jax.experimental.jax2tf.tests import tf_test_util
from jax.interpreters import xla
import numpy as np
import tensorflow as tf # type: ignore[import]
config.parse_flags_with_absl()
# Import after parsing flags
from jax.experimental.jax2tf.tests import primitive_harness
REDUCE = (
jnp.all,
jnp.any,
jnp.max,
jnp.min,
jnp.prod,
jnp.sum,
)
INDEX = (
jax.ops.index_add,
jax.ops.index_max,
jax.ops.index_min,
jax.ops.index_mul,
jax.ops.index_update,
)
class JaxPrimitiveTest(tf_test_util.JaxToTfTestCase):
def test_primitive_coverage(self):
"""Fail if there are JAX primitives that are not implemented."""
# Harvest primitives from XLA translation tables
all_primitives = (set(xla.translations)
| set(xla.backend_specific_translations['cpu'])
| set(xla.backend_specific_translations['gpu'])
| set(xla.backend_specific_translations['tpu'])
| set(xla.initial_style_translations)
| set(xla.parallel_translations))
tf_impl = set(jax.experimental.jax2tf.jax2tf.tf_impl)
tf_not_yet_impl = set(jax.experimental.jax2tf.jax2tf.tf_not_yet_impl)
all_primitives = tuple(sorted(all_primitives, key=str))
for p in all_primitives:
# TODO: remove tie_in once omnistaging is on by default
if p.name == "axis_index" or p.name == "tie_in":
continue
if p in tf_not_yet_impl:
self.assertNotIn(p, tf_impl) # Should not be in both tf_impl and tf_not_yet_impl
else:
self.assertIn(p, tf_impl)
@parameterized.named_parameters(
dict(testcase_name=f"_{f_jax.__name__}",
f_jax=f_jax)
for f_jax in [jnp.add, jnp.subtract, jnp.multiply, jnp.divide,
jnp.less, jnp.less_equal, jnp.equal, jnp.greater,
jnp.greater_equal, jnp.not_equal, jnp.maximum,
jnp.minimum])
def test_type_promotion(self, f_jax=jnp.add):
# We only test a few types here, as tensorflow does not support many
# types like uint* or bool in binary ops.
types = [dtypes.bfloat16, np.int32, np.int64, np.float32]
for x_dtype in types:
for y_dtype in types:
x = np.array([1, 2], dtype=x_dtype)
y = np.array([3, 4], dtype=y_dtype)
self.ConvertAndCompare(f_jax, x, y)
def test_concat(self):
values = [np.array([1, 2], dtype=np.float32),
np.array([1, 2], dtype=np.int32),
np.array([1, 2], dtype=np.int8)]
f_jax = jax.jit(lambda x: jnp.concatenate(x, axis=0))
self.ConvertAndCompare(f_jax, values)
@primitive_harness.parameterized(primitive_harness.lax_pad)
def test_pad(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_top_k)
def test_top_k(self, harness: primitive_harness.Harness):
if (harness.params["k"] > harness.params["shape"][-1] or
harness.params["k"] < 0):
with self.assertRaisesRegex(ValueError, "k argument to top_k must be"):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
elif harness.params["dtype"] in jtu.dtypes.complex:
# TODO(necula): fix top_k complex bug on TPU
if jtu.device_under_test() == "tpu":
raise unittest.SkipTest("top_k complex on TPU raises different error")
with self.assertRaisesRegex(RuntimeError, "Unimplemented: complex comparison"):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
# TODO: TF and JAX sort [inf, nan] differently.
elif harness.name.startswith("nan_"):
raise unittest.SkipTest("inconsistent [nan, inf] sorting")
else:
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_sort)
def test_sort(self, harness: primitive_harness.Harness):
if (jtu.device_under_test() == "gpu" and
len(harness.arg_descriptors) == 4 and
not harness.params["is_stable"]):
# TODO: fix the TF GPU test
raise unittest.SkipTest("GPU tests are running TF on CPU")
if jtu.device_under_test() == "tpu" and harness.params["dtype"] in jtu.dtypes.complex:
raise unittest.SkipTest("JAX sort is not implemented on TPU for complex")
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_fft)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_fft(self, harness: primitive_harness.Harness):
if len(harness.params["fft_lengths"]) > 3:
with self.assertRaisesRegex(RuntimeError, "FFT only supports ranks 1-3"):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
elif (jtu.device_under_test() == "tpu" and
len(harness.params["fft_lengths"]) > 1):
# TODO(b/140351181): FFT is mostly unimplemented on TPU, even for JAX
with self.assertRaisesRegex(RuntimeError,
"only 1D FFT is currently supported."):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
else:
tol = None
if jtu.device_under_test() == "gpu":
if harness.params["dtype"] in jtu.dtypes.boolean:
tol = 0.01
else:
tol = 1e-3
self.ConvertAndCompare(harness.dyn_fun,
*harness.dyn_args_maker(self.rng()),
atol=tol, rtol=tol)
@primitive_harness.parameterized(primitive_harness.lax_linalg_qr)
def test_qr(self, harness: primitive_harness.Harness):
# See jax.lib.lapack.geqrf for the list of compatible types
dtype = harness.params["dtype"]
dut = jtu.device_under_test()
# These cases are not implemented in JAX
if dtype in (jtu.dtypes.all_integer + [jnp.bfloat16]):
unimplemented_jax = True
elif dtype is np.complex64 and dut == "tpu":
unimplemented_jax = True
elif dtype is np.float16 and dut in ("cpu", "gpu"):
unimplemented_jax = True
else:
unimplemented_jax = False
if unimplemented_jax:
raise unittest.SkipTest(f"QR not implemented in JAX for {dtype} on {dut}")
# TODO: see https://github.com/google/jax/pull/3775#issuecomment-659407824.
# - for now, the performance of the HLO QR implementation called when
# compiling with TF is expected to have worse performance than the
# custom calls made in JAX.
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),
atol=1e-5, rtol=1e-5)
@primitive_harness.parameterized(primitive_harness.lax_linalg_svd)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_svd(self, harness: primitive_harness.Harness):
if harness.params["dtype"] in [np.float16, dtypes.bfloat16]:
if jtu.device_under_test() != "tpu":
# Does not work in JAX
with self.assertRaisesRegex(NotImplementedError, "Unsupported dtype"):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
return
if harness.params["dtype"] in [np.complex64, np.complex128]:
if jtu.device_under_test() == "tpu":
# TODO: on JAX on TPU there is no SVD implementation for complex
with self.assertRaisesRegex(RuntimeError,
"Binary op compare with different element types"):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
return
def _custom_assert(r_jax, r_tf, atol=1e-6, rtol=1e-6):
def _reconstruct_operand(result, is_tf: bool):
# Reconstructing operand as documented in numpy.linalg.svd (see
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.svd.html)
s, u, v = result
if is_tf:
s = s.numpy()
u = u.numpy()
v = v.numpy()
U = u[..., :s.shape[-1]]
V = v[..., :s.shape[-1], :]
S = s[..., None, :]
return jnp.matmul(U * S, V), s.shape, u.shape, v.shape
if harness.params["compute_uv"]:
r_jax_reconstructed = _reconstruct_operand(r_jax, False)
r_tf_reconstructed = _reconstruct_operand(r_tf, True)
self.assertAllClose(r_jax_reconstructed, r_tf_reconstructed,
atol=atol, rtol=rtol)
else:
self.assertAllClose(r_jax, r_tf, atol=atol, rtol=rtol)
tol = 1e-4
custom_assert = partial(_custom_assert, atol=tol, rtol=tol)
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),
atol=tol, rtol=tol,
custom_assert=custom_assert,
always_custom_assert=True)
@primitive_harness.parameterized(primitive_harness.lax_select_and_gather_add)
@jtu.ignore_warning(category=UserWarning,
message="Using reduced precision for gradient.*")
def test_select_and_gather_add(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_reduce_window)
def test_reduce_window(self, harness: primitive_harness.Harness):
dtype = harness.params['dtype']
if (jtu.device_under_test() == 'tpu' and dtype is np.complex64):
raise unittest.SkipTest(
'TODO: JAX reduce_window on TPU does not handle complex64'
)
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_unary_elementwise)
def test_unary_elementwise(self, harness: primitive_harness.Harness):
dtype = harness.params["dtype"]
lax_name = harness.params["lax_name"]
arg, = harness.dyn_args_maker(self.rng())
custom_assert = None
if lax_name == "digamma":
# TODO(necula): fix bug with digamma/(f32|f16) on TPU
if dtype in [np.float16, np.float32] and jtu.device_under_test() == "tpu":
raise unittest.SkipTest("TODO: fix bug: nan vs not-nan")
# In the bfloat16 case, TF and lax both return NaN in undefined cases.
if not dtype is dtypes.bfloat16:
# digamma is not defined at 0 and -1
def custom_assert(result_jax, result_tf):
# lax.digamma returns NaN and tf.math.digamma returns inf
special_cases = (arg == 0.) | (arg == -1.)
nr_special_cases = np.count_nonzero(special_cases)
self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan)),
result_jax[special_cases])
self.assertAllClose(np.full((nr_special_cases,), dtype(np.inf)),
result_tf[special_cases])
# non-special cases are equal
self.assertAllClose(result_jax[~ special_cases],
result_tf[~ special_cases])
if lax_name == "erf_inv":
# TODO(necula): fix erf_inv bug on TPU
if jtu.device_under_test() == "tpu":
raise unittest.SkipTest("erf_inv bug on TPU: nan vs non-nan")
# TODO: investigate: in the (b)float16 cases, TF and lax both return the
# same result in undefined cases.
if not dtype in [np.float16, dtypes.bfloat16]:
# erf_inv is not defined for arg <= -1 or arg >= 1
def custom_assert(result_jax, result_tf): # noqa: F811
# for arg < -1 or arg > 1
# lax.erf_inv returns NaN; tf.math.erf_inv return +/- inf
special_cases = (arg < -1.) | (arg > 1.)
nr_special_cases = np.count_nonzero(special_cases)
self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan),
dtype=dtype),
result_jax[special_cases])
signs = np.where(arg[special_cases] < 0., -1., 1.)
self.assertAllClose(np.full((nr_special_cases,),
signs * dtype(np.inf), dtype=dtype),
result_tf[special_cases])
# non-special cases are equal
self.assertAllClose(result_jax[~ special_cases],
result_tf[~ special_cases])
atol = None
if jtu.device_under_test() == "gpu":
# TODO(necula): revisit once we fix the GPU tests
atol = 1e-3
self.ConvertAndCompare(harness.dyn_fun, arg, custom_assert=custom_assert,
atol=atol)
@primitive_harness.parameterized(primitive_harness.lax_bitwise_not)
def test_bitwise_not(self, harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_population_count)
def test_population_count(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_add_mul)
def test_add_mul(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_min_max)
def test_min_max(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_binary_elementwise)
def test_binary_elementwise(self, harness):
tol = None
lax_name, dtype = harness.params["lax_name"], harness.params["dtype"]
if lax_name in ("igamma", "igammac"):
# TODO(necula): fix bug with igamma/f16
if dtype in [np.float16, dtypes.bfloat16]:
raise unittest.SkipTest("TODO: igamma(c) unsupported with (b)float16 in JAX")
# TODO(necula): fix bug with igamma/f32 on TPU
if dtype is np.float32 and jtu.device_under_test() == "tpu":
raise unittest.SkipTest("TODO: fix bug: nan vs not-nan")
arg1, arg2 = harness.dyn_args_maker(self.rng())
custom_assert = None
if lax_name == "igamma":
# igamma is not defined when the first argument is <=0
def custom_assert(result_jax, result_tf):
# lax.igamma returns NaN when arg1 == arg2 == 0; tf.math.igamma returns 0
special_cases = (arg1 == 0.) & (arg2 == 0.)
nr_special_cases = np.count_nonzero(special_cases)
self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype),
result_jax[special_cases])
self.assertAllClose(np.full((nr_special_cases,), 0., dtype=dtype),
result_tf[special_cases])
# non-special cases are equal
self.assertAllClose(result_jax[~ special_cases],
result_tf[~ special_cases])
if lax_name == "igammac":
# On GPU, tolerance also needs to be adjusted in compiled mode
if dtype == np.float64 and jtu.device_under_test() == 'gpu':
tol = 1e-14
# igammac is not defined when the first argument is <=0
def custom_assert(result_jax, result_tf): # noqa: F811
# lax.igammac returns 1. when arg1 <= 0; tf.math.igammac returns NaN
special_cases = (arg1 <= 0.) | (arg2 <= 0)
nr_special_cases = np.count_nonzero(special_cases)
self.assertAllClose(np.full((nr_special_cases,), 1., dtype=dtype),
result_jax[special_cases])
self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype),
result_tf[special_cases])
# On CPU, tolerance only needs to be adjusted in eager & graph modes
tol = None
if dtype == np.float64:
tol = 1e-14
# non-special cases are equal
self.assertAllClose(result_jax[~ special_cases],
result_tf[~ special_cases], atol=tol, rtol=tol)
self.ConvertAndCompare(harness.dyn_fun, arg1, arg2,
custom_assert=custom_assert, atol=tol, rtol=tol)
@primitive_harness.parameterized(primitive_harness.lax_binary_elementwise_logical)
def test_binary_elementwise_logical(self, harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_betainc)
def test_betainc(self, harness: primitive_harness.Harness):
dtype = harness.params["dtype"]
# TODO: https://www.tensorflow.org/api_docs/python/tf/math/betainc only
# supports float32/64 tests.
# TODO(bchetioui): investigate why the test actually fails in JAX.
if dtype in [np.float16, dtypes.bfloat16]:
raise unittest.SkipTest("(b)float16 not implemented in TF")
tol = None
if dtype is np.float64:
tol = 1e-14
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),
atol=tol, rtol=tol)
# TODO(necula): combine tests that are identical except for the harness
# wait until we get more experience with using harnesses.
@primitive_harness.parameterized(primitive_harness.lax_shift_left)
def test_shift_left(self, harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_shift_right_logical)
def test_shift_right_logical(self, harness):
if jtu.device_under_test() == "tpu" and harness.params["dtype"] in [np.int8, np.int16]:
raise unittest.SkipTest("TODO: silent error for negative inputs")
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_shift_right_arithmetic)
def test_shift_right_arithmetic(self, harness):
if jtu.device_under_test() == "tpu" and harness.params["dtype"] in [np.uint8, np.uint16]:
raise unittest.SkipTest("TODO: silent error for negative inputs")
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_slice)
def test_slice(self, harness):
# JAX.slice rejects negative indices; check, and skip jax2tf
if any(si < 0 or si >= sh or li < 0 or li > sh
for sh, si, li in zip(harness.params["shape"],
harness.params["start_indices"],
harness.params["limit_indices"])):
with self.assertRaisesRegex(TypeError, ""):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
else:
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_dynamic_slice)
def test_dynamic_slice(self, harness):
# JAX.dynamic_slice rejects slice sizes too big; check this, and skip jax2tf
args = harness.dyn_args_maker(self.rng())
if any(li - si < 0 or li - si >= sh
for sh, si, li in zip(harness.params["shape"],
harness.params["start_indices"],
harness.params["limit_indices"])):
with self.assertRaisesRegex(TypeError, ""):
harness.dyn_fun(*args)
return
self.ConvertAndCompare(harness.dyn_fun, *args)
@primitive_harness.parameterized(primitive_harness.lax_dynamic_update_slice)
def test_dynamic_update_slice(self, harness):
# JAX.dynamic_update_slice rejects update slices too big; check, and skip jax2tf
if any(ush > sh
for sh, ush in zip(harness.params["shape"],
harness.params["update_shape"])):
with self.assertRaisesRegex(TypeError, ""):
harness.dyn_fun(*harness.dyn_args_maker(self.rng()))
else:
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_squeeze)
def test_squeeze(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_conv_general_dilated)
def test_conv_general_dilated(self, harness: primitive_harness.Harness):
if jtu.device_under_test() == "gpu":
raise unittest.SkipTest("TODO: test failures on GPU")
tol = None
# TODO(bchetioui): significant discrepancies in some float16 cases.
if harness.params["dtype"] is np.float16:
tol = 1.
# TODO(bchetioui): slight occasional discrepancy in float32 cases.
elif harness.params["dtype"] is np.float32:
tol = 1e-5
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),
atol=tol, rtol=tol)
@primitive_harness.parameterized(primitive_harness.lax_gather)
def test_gather(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
@primitive_harness.parameterized(primitive_harness.lax_scatter)
def test_scatter(self, harness: primitive_harness.Harness):
f_name = harness.params['f_lax'].__name__
dtype = harness.params['dtype']
if jtu.device_under_test() == 'tpu':
if dtype is np.complex64 and f_name in ['scatter_min', 'scatter_max']:
raise unittest.SkipTest(f"TODO: complex {f_name} on TPU fails in JAX")
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
def test_boolean_gather(self):
values = np.array([[True, True], [False, True], [False, False]],
dtype=np.bool_)
indices = np.array([0, 1], dtype=np.int32)
for axis in [0, 1]:
f_jax = jax.jit(lambda v, i: jnp.take(v, i, axis=axis)) # pylint: disable=cell-var-from-loop
self.ConvertAndCompare(f_jax, values, indices)
def test_gather_rank_change(self):
params = jnp.array([[1.0, 1.5, 2.0], [2.0, 2.5, 3.0], [3.0, 3.5, 4.0]])
indices = jnp.array([[1, 1, 2], [0, 1, 0]])
f_jax = jax.jit(lambda i: params[i])
self.ConvertAndCompare(f_jax, indices)
@parameterized.named_parameters(jtu.cases_from_list(
dict(testcase_name=f"_{f_jax.__name__}",
f_jax=f_jax)
for f_jax in REDUCE))
def test_reduce_ops_with_numerical_input(self, f_jax):
values = np.array([1, 2, 3], dtype=np.float32)
self.ConvertAndCompare(f_jax, values)
@parameterized.named_parameters(jtu.cases_from_list(
dict(testcase_name=f"_{f_jax.__name__}",
f_jax=f_jax)
for f_jax in (jnp.cumsum, jnp.cumprod)))
def test_cumulated_ops(self, f_jax):
values = np.array([1, 2, 3], dtype=np.float32)
self.ConvertAndCompare(f_jax, values)
@parameterized.named_parameters(jtu.cases_from_list(
dict(testcase_name=f"_{op.__name__}",
op=op)
for op in INDEX))
def test_scatter_static(self, op):
values = np.ones((5, 6), dtype=np.float32)
update = np.float32(6.)
f_jax = jax.jit(lambda v, u: op(v, jax.ops.index[::2, 3:], u))
self.ConvertAndCompare(f_jax, values, update)
@parameterized.named_parameters(jtu.cases_from_list(
dict(testcase_name=f"_{f_jax.__name__}",
f_jax=f_jax)
for f_jax in REDUCE))
def test_reduce_ops_with_boolean_input(self, f_jax):
values = np.array([True, False, True], dtype=np.bool_)
self.ConvertAndCompare(f_jax, values)
@primitive_harness.parameterized(primitive_harness.random_gamma)
def test_random_gamma(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),
rtol=1e-5)
@primitive_harness.parameterized(primitive_harness.random_split)
def test_random_split(self, harness: primitive_harness.Harness):
self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))
def test_zeros_like(self):
v = np.float32(2.)
f_jax = jax.ad_util.zeros_like_jaxval
self.ConvertAndCompare(f_jax, v)
def test_stop_gradient(self):
f = jax2tf.convert(lax.stop_gradient)
self.assertEqual(f(tf.ones([])), 1.)
# test_bfloat16_constant checks that https://github.com/google/jax/issues/3942 is
# fixed
def test_bfloat16_constant(self):
def jax_fn_scalar(x):
x = x.astype(jnp.bfloat16)
x *= 2.
return x
def jax_fn_array(x):
x = x.astype(jnp.bfloat16)
x *= np.array([1.5, 2.5, 3.5], jnp.bfloat16)
return x
tf_fn_scalar = jax2tf.convert(jax_fn_scalar)
self.assertAllClose(tf_fn_scalar(1.375).numpy(), jnp.bfloat16(2.750))
tf_fn_array = jax2tf.convert(jax_fn_array)
self.assertAllClose(tf_fn_array(np.array([3, 4, 5])),
np.array([4.5, 10, 17.5], jnp.bfloat16))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operations in eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
class OpsTest(test_util.TensorFlowTestCase):
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testMatMulGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = math_ops.matmul(three, five)
self.assertEqual([[15.0]], product.numpy())
def testExecuteStringAttr(self):
three = constant_op.constant(3.0)
checked_three = array_ops.check_numerics(three,
message='just checking')
self.assertEqual([[3]], checked_three.numpy())
def testExecuteFloatAttr(self):
three = constant_op.constant(3.0)
almost_three = constant_op.constant(2.8)
almost_equal = math_ops.approximate_equal(
three, almost_three, tolerance=0.3)
self.assertTrue(almost_equal)
def testExecuteIntAttr(self):
three = constant_op.constant(3)
four = constant_op.constant(4)
total = math_ops.add_n([three, four])
self.assertAllEqual(7, total)
def testExecuteBoolAttr(self):
three = constant_op.constant([[3]])
five = constant_op.constant([[5]])
product = math_ops.matmul(three, five, transpose_a=True)
self.assertAllEqual([[15]], product)
def testExecuteOneListOutput(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
x1, x2, x3 = array_ops.split(value, 3, axis=split_dim)
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testGraphMode(self):
graph = ops.Graph()
with graph.as_default(), context.graph_mode():
array_ops.placeholder(dtypes.int32)
self.assertEqual(1, len(graph.get_operations()))
# See comments on handling of int32 tensors on GPU in
# EagerTensor.__init__.
def testInt32CPUDefault(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('/gpu:0'):
r = constant_op.constant(1) + constant_op.constant(2)
self.assertAllEqual(r, 3)
def testExecuteListOutputLen1(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen0(self):
empty = constant_op.constant([], dtype=dtypes.int32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteMultipleNonListOutput(self):
x = constant_op.constant([1, 2, 3, 4, 5, 6])
y = constant_op.constant([1, 3, 5])
result = array_ops.listdiff(x, y)
out, idx = result
self.assertTrue(out is result.out)
self.assertTrue(idx is result.idx)
self.assertAllEqual([2, 4, 6], out)
self.assertAllEqual([1, 3, 5], idx)
def testExecuteMultipleListOutput(self):
split_dim = constant_op.constant(1, dtype=dtypes.int64)
indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]],
dtype=dtypes.int64)
values = constant_op.constant([2, 3, 5, 7, 11])
shape = constant_op.constant([2, 7], dtype=dtypes.int64)
result = sparse_ops.gen_sparse_ops.sparse_split(
split_dim,
indices,
values,
shape,
num_split=2)
output_indices, output_values, output_shape = result
self.assertEqual(2, len(output_indices))
self.assertEqual(2, len(output_values))
self.assertEqual(2, len(output_shape))
self.assertEqual(output_indices, result.output_indices)
self.assertEqual(output_values, result.output_values)
self.assertEqual(output_shape, result.output_shape)
self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0])
self.assertAllEqual([[0, 0], [0, 1]], output_indices[1])
self.assertAllEqual([2, 7, 11], output_values[0])
self.assertAllEqual([3, 5], output_values[1])
self.assertAllEqual([2, 4], output_shape[0])
self.assertAllEqual([2, 3], output_shape[1])
# TODO(josh11b): Test an op that has multiple outputs, some but not
# all of which are lists. Examples: barrier_take_many (currently
# unsupported since it uses a type list) or sdca_optimizer (I don't
# have an example of legal inputs & outputs).
def testComposition(self):
x = constant_op.constant(1, dtype=dtypes.int32)
three_x = x + x + x
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
def testOperatorOverrides(self):
# TODO(henrytan): test with negative number.
a = constant_op.constant([1])
b = constant_op.constant([2])
self.assertAllEqual((-a), [-1])
self.assertAllEqual(abs(b), [2])
self.assertAllEqual((a + b), [3])
self.assertAllEqual((a - b), [-1])
self.assertAllEqual((a * b), [2])
self.assertAllEqual((a * a), [1])
self.assertAllEqual((a**b), [1])
self.assertAllEqual((a / b), [1 / 2])
self.assertAllEqual((a / a), [1])
self.assertAllEqual((a % b), [1])
self.assertAllEqual((a < b), [True])
self.assertAllEqual((a <= b), [True])
self.assertAllEqual((a > b), [False])
self.assertAllEqual((a >= b), [False])
self.assertAllEqual((a == b), False)
self.assertAllEqual((a != b), True)
self.assertAllEqual(1, a[constant_op.constant(0)])
def test_basic_slice(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :], t[:, :, :])
self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::])
self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1])
self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2])
self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :])
self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :])
self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1])
self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1])
self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2])
def testDegenerateSlices(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testEllipsis(self):
npt = np.array(
[[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[0:], t[0:])
# implicit ellipsis
self.assertAllEqual(npt[0:, ...], t[0:, ...])
# ellipsis alone
self.assertAllEqual(npt[...], t[...])
# ellipsis at end
self.assertAllEqual(npt[0:1, ...], t[0:1, ...])
# ellipsis at begin
self.assertAllEqual(npt[..., 0:1], t[..., 0:1])
# ellipsis at middle
self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1])
def testShrink(self):
npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3])
self.assertAllEqual(npt[..., 3], t[..., 3])
self.assertAllEqual(npt[:, 0], t[:, 0])
self.assertAllEqual(npt[:, :, 0], t[:, :, 0])
def testOpWithInputsOnDifferentDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The GPU kernel for the Reshape op requires that the
# shape input be on CPU.
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = array_ops.reshape(value, shape)
self.assertAllEqual([[1], [2]], reshaped.cpu())
def testInt64(self):
# Fill requires the first input to be an int32 tensor.
self.assertAllEqual(
[1.0, 1.0],
array_ops.fill(constant_op.constant([2], dtype=dtypes.int64),
constant_op.constant(1)))
def testOutputOnHostMemory(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# The Shape op kernel on GPU places the output in host memory.
value = constant_op.constant([1.]).gpu()
shape = array_ops.shape(value)
self.assertEqual([1], shape.numpy())
def testSilentCopy(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# Temporarily replace the context
# pylint: disable=protected-access
del context._context
try:
context._context = context.Context(
device_policy=context.DEVICE_PLACEMENT_SILENT)
cpu_tensor = constant_op.constant(1.0)
gpu_tensor = cpu_tensor.gpu()
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
finally:
del context._context
context._context = context.Context()
# pylint: enable=protected-access
def testSoftPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# Temporarily replace the context
# pylint: disable=protected-access
del context._context
try:
context._context = context.Context(
device_policy=context.DEVICE_PLACEMENT_SILENT,
config=config_pb2.ConfigProto(allow_soft_placement=True))
cpu_tensor = constant_op.constant(1.0)
result = cpu_tensor + cpu_tensor
self.assertEqual(result.device,
'/job:localhost/replica:0/task:0/device:GPU:0')
finally:
del context._context
context._context = context.Context()
# pylint: enable=protected-access
def testRandomUniform(self):
scalar_shape = constant_op.constant([], dtype=dtypes.int32)
x = random_ops.random_uniform(scalar_shape)
self.assertEquals(0, x.shape.ndims)
self.assertEquals(dtypes.float32, x.dtype)
x = random_ops.random_uniform(
scalar_shape, minval=constant_op.constant(5.),
maxval=constant_op.constant(6.))
self.assertLess(x, 6)
self.assertGreaterEqual(x, 5)
def testArgsToMatchingEagerDefault(self):
# Uses default
ctx = context.context()
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32)
self.assertEquals(t, dtypes.int32)
self.assertEquals(r[0].dtype, dtypes.int32)
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64)
self.assertEquals(t, dtypes.int64)
self.assertEquals(r[0].dtype, dtypes.int64)
# Doesn't use default
t, r = execute.args_to_matching_eager(
[['string', 'arg']], ctx, dtypes.int32)
self.assertEquals(t, dtypes.string)
self.assertEquals(r[0].dtype, dtypes.string)
def testFlattenLayer(self):
flatten_layer = core.Flatten()
x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]])
y = flatten_layer(x)
self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y)
def testIdentity(self):
self.assertAllEqual(2, array_ops.identity(2))
def testIdentityOnVariable(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('/gpu:0'):
v = resource_variable_ops.ResourceVariable(True)
self.assertAllEqual(True, array_ops.identity(v))
def testIncompatibleSetShape(self):
x = constant_op.constant(1)
with self.assertRaises(ValueError):
x.set_shape((1, 2))
def testCompatibleSetShape(self):
x = constant_op.constant([[1, 2]])
x.set_shape(tensor_shape.TensorShape([None, 2]))
self.assertEqual(x.get_shape(), (1, 2))
def testCastScalarToPrimitiveTypes(self):
x = constant_op.constant(1.3)
self.assertIsInstance(int(x), int)
self.assertEqual(int(x), 1)
self.assertIsInstance(float(x), float)
self.assertAllClose(float(x), 1.3)
def testCastNonScalarToPrimitiveTypesFails(self):
x = constant_op.constant([1.3, 2])
with self.assertRaises(TypeError):
int(x)
with self.assertRaises(TypeError):
float(x)
def testFormatString(self):
x = constant_op.constant(3.1415)
self.assertEqual('3.14', '{:.2f}'.format(x))
def testNoOpIsNone(self):
self.assertTrue(control_flow_ops.no_op() is None)
if __name__ == '__main__':
test.main()
|
# -*- coding: utf-8 -*-
#
# dataflake.fakeldap documentation build configuration file, created by
# sphinx-quickstart on Sat May 27 10:35:35 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime
import os
import sys
parent = os.path.dirname(os.path.dirname(__file__))
parent_dir = os.path.abspath(parent)
with open(os.path.join(parent_dir, 'version.txt'), 'r') as version_file:
pkg_version = version_file.read().strip()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dataflake.fakeldap'
copyright = u'2010-%i, Jens Vagelpohl' % datetime.datetime.now().year
author = u'Jens Vagelpohl'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pkg_version.replace(u'.dev0', u'')
# The full version, including alpha/beta/rc tags.
release = pkg_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'dataflakefakeldapdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dataflakefakeldap.tex', u'dataflake.fakeldap Documentation',
u'Jens Vagelpohl', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dataflakefakeldap', u'dataflake.fakeldap Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dataflakefakeldap', u'dataflake.fakeldap Documentation',
author, 'dataflakefakeldap', 'One line description of project.',
'Miscellaneous'),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 10:14:35 2018
@author: alexissoto
"""
'''
Music Production Kit
'''
import librosa as lb
song = "Fantasia_Impromptu.m4a" #Input file
def TempoChange():
y, sr = lb.load(song, duration = 30)
tempo, beat_frames = lb.beat.beat_track(y = y, sr = sr)
stretch = lb.effects.time_stretch(y, 2.0) #2.0 for double fast. 0.5 for half
lb.output.write_wav('Double fast.wav', stretch, sr = sr) #Your output here.
def Harmonics():
y, sr = lb.load(song, duration = 30)
y_harmonic = lb.effects.harmonic(y)
lb.output.write_wav('Just harmonic content.wav', y_harmonic, sr=sr)
def Percussive():
y, sr = lb.load(song, duration = 30)
y_percussive = lb.effects.percussive(y)
lb.output.write_wav('Just percusive content.wav', y_percussive, sr = sr)
def Both():
y, sr = lb.load(song, duration= 30)
y_harmonic, y_percussive = lb.effects.hpss(y, margin = (1.0, 5.0))
lb.output.write_wav('Percussive.wav', y_percussive, sr = sr)
def Steps():
y, sr = lb.load(song, duration = 30)
y_third = lb.effects.pitch_shift(y, sr, n_steps = 2)
lb.output.write_wav('Major second.wav', y_third, sr=sr)
def Tempo():
y, sr = lb.load(song, duration = 30)
envelope = lb.onset.onset_strength(y, sr = sr)
tempo = lb.beat.tempo(onset_envelope = envelope, sr=sr)
print('Tempo is %d BPM' % tempo)
|
import scipy.signal as signal
import torch
import torch.nn as nn
import numpy as np
import models
import gym
import wandb
def create_feedforward(sizes, activation=nn.ReLU):
layers = []
for i in range(len(sizes) - 1):
layers.append(nn.Linear(sizes[i], sizes[i+1]))
if i < len(sizes) - 2:
layers.append(activation())
return nn.Sequential(*layers)
def get_shape(shape):
if shape is None:
return ()
return shape
def discounted_cumsum(rewards, reward_decay):
"""Taken from https://stackoverflow.com/questions/47970683/vectorize-a-numpy-discount-calculation"""
return signal.lfilter([1], [1, -reward_decay], x=rewards[::-1])[::-1]
class TrajectoryBuffer:
def __init__(self, observation_shape, action_shape, size, reward_decay=0.99):
self.max_size = size
self.trajectory_start = 0
self.pos = 0
self.reward_decay = reward_decay
self.observations = np.empty((size, *observation_shape), dtype=np.float32)
self.actions = np.empty((size, *get_shape(action_shape)), dtype=np.float32)
self.rewards = np.empty((size,), dtype=np.float32)
self.returns = np.empty((size,), dtype=np.float32)
self.dones = np.empty((size,), dtype=np.float32)
def store(self, observation, action, reward, done):
assert self.pos < self.max_size, "Buffer Overflow"
self.observations[self.pos] = observation
self.actions[self.pos] = action
self.rewards[self.pos] = reward
self.dones[self.pos] = done
self.pos += 1
def end_trajectory(self, value=0):
# Compute return
sl = slice(self.trajectory_start, self.pos)
rewards = self.rewards[sl]
rewards = np.append(rewards, value)
self.returns[sl] = discounted_cumsum(rewards, self.reward_decay)[:-1]
self.trajectory_start = self.pos
def get_data(self):
sl = slice(0, self.pos)
data = dict(
observations=self.observations[sl],
actions=self.actions[sl],
rewards=self.rewards[sl],
returns=self.returns[sl],
dones=self.dones[sl]
)
return {key : torch.from_numpy(value) for key, value in data.items()}
def clear(self):
self.pos = 0
self.trajectory_start = 0
class VecTrajectoryBuffer:
def __init__(self, observation_shape, action_shape, num_envs, size, reward_decay=0.99):
self.max_size = size
self.pos = 0
self.reward_decay = reward_decay
self.traj_starts = np.zeros((num_envs,), dtype=int)
self.observations = np.empty((size, num_envs, *observation_shape), dtype=np.float32)
self.actions = np.empty((size, num_envs, *get_shape(action_shape)), dtype=np.float32)
self.rewards = np.empty((size, num_envs), dtype=np.float32)
self.returns = np.empty((size, num_envs), dtype=np.float32)
self.dones = np.empty((size, num_envs), dtype=np.float32)
def store(self, observations, actions, rewards, dones):
assert self.pos < self.max_size, "Buffer Overflow"
self.observations[self.pos] = observations
self.actions[self.pos] = actions
self.rewards[self.pos] = rewards
self.dones[self.pos] = dones
self.pos += 1
# Compute returns
for env_index, done in enumerate(dones):
if done:
self._end_trajectory(env_index)
def end_trajectory(self, values):
for env_index, value in enumerate(values):
self._end_trajectory(env_index, value)
def _end_trajectory(self, env_index, value=0):
# Compute return
sl = slice(self.traj_starts[env_index], self.pos)
rewards = self.rewards[sl, env_index]
rewards = np.append(rewards, value)
self.returns[sl, env_index] = discounted_cumsum(rewards, self.reward_decay)[:-1]
# Update trajectory start
self.traj_starts[env_index] = self.pos
def get_data(self, device=torch.device('cpu')):
sl = slice(0, self.pos)
data = dict(
observations=self._remove_env_axis(self.observations[sl]),
actions=self._remove_env_axis(self.actions[sl]),
rewards=self._remove_env_axis(self.rewards[sl]),
returns=self._remove_env_axis(self.returns[sl]),
dones=self._remove_env_axis(self.dones[sl])
)
return {key : torch.from_numpy(value).to(device) for key, value in data.items()}
def clear(self):
self.pos = 0
self.traj_starts.fill(0)
def _remove_env_axis(self, array):
# array.shape = (size, num_envs, ???)
shape = array.shape
# Swap size with num_envs to ensure reshaping won't mix trajectories
array = array.swapaxes(0, 1)
# Flatten
new_shape = (shape[0] * shape[1], *shape[2:])
array = array.reshape(new_shape)
return array
def play(model: models.Policy, env: gym.Env, repeats=10, device=torch.device('cpu')):
for _ in range(repeats):
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state]).to(device)
action = model.get_actions(inp)[0]
state, reward, done, _ = env.step(action)
env.render()
env.close()
def capture_video(model: models.Policy, env: gym.Env, fps=30, device=torch.device('cpu')):
frames = []
reward_sum = 0
step_count = 0
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state]).to(device)
action = model.get_actions(inp)[0]
state, reward, done, _ = env.step(action)
frames.append(np.array(env.render("rgb_array")))
reward_sum += reward
step_count += 1
frames = np.array(frames) # (Time, Width, Height, Channels)
frames = np.moveaxis(frames, 3, 1) # (Time, Channels, Width, Height)
return wandb.Video(frames, caption=f"RewardSum={reward_sum}; EpisodeLength={step_count}", fps=fps)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.