text
stringlengths
2
999k
"""TODO(wikitext): Add a description here.""" import os import datasets _CITATION = """\ @misc{merity2016pointer, title={Pointer Sentinel Mixture Models}, author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher}, year={2016}, eprint={1609.07843}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _DESCRIPTION = """\ The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License. """ _HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/" _LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" _DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext" class WikitextConfig(datasets.BuilderConfig): """BuilderConfig for GLUE.""" def __init__(self, data_url, **kwargs): """BuilderConfig for Wikitext Args: data_url: `string`, url to the dataset (word or raw level) **kwargs: keyword arguments forwarded to super. """ super(WikitextConfig, self).__init__( version=datasets.Version( "1.0.0", ), **kwargs, ) self.data_url = data_url class Wikitext(datasets.GeneratorBasedBuilder): """TODO(wikitext_103): Short description of my dataset.""" # TODO(wikitext_103): Set up version. VERSION = datasets.Version("0.1.0") BUILDER_CONFIGS = [ WikitextConfig( name="wikitext-103-v1", data_url=_DATA_URL + "/" + "wikitext-103-v1.zip", description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.", ), WikitextConfig( name="wikitext-2-v1", data_url=_DATA_URL + "/" + "wikitext-2-v1.zip", description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.", ), WikitextConfig( name="wikitext-103-raw-v1", data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip", description="Raw level dataset: the raw tokens before the addition of <unk> tokens. " "They should only be used for character level work or for creating newly derived datasets.", ), WikitextConfig( name="wikitext-2-raw-v1", data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip", description="Raw level dataset: the raw tokens before the addition of <unk> tokens. " "They should only be used for character level work or for creating newly derived datasets.", ), ] def _info(self): # TODO(wikitext): Specifies the datasets.DatasetInfo object return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # datasets.features.FeatureConnectors features=datasets.Features( { "text": datasets.Value("string") # These are the features of your dataset like images, labels ... } ), # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # TODO(wikitext): Downloads the data and defines the splits # dl_manager is a datasets.download.DownloadManager that can be used to # download and extract URLs if self.config.name == "wikitext-103-v1": data_file = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(data_file, "wikitext-103") return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"}, ), datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"}, ), ] else: if self.config.name == "wikitext-103-raw-v1": data_file = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(data_file, "wikitext-103-raw") return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"}, ), datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"}, ), ] else: if self.config.name == "wikitext-2-raw-v1": data_file = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(data_file, "wikitext-2-raw") return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"}, ), datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"}, ), ] else: if self.config.name == "wikitext-2-v1": data_file = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(data_file, "wikitext-2") return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"}, ), datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid", }, ), ] def _generate_examples(self, data_file, split): """Yields examples.""" # TODO(wikitext): Yields (key, example) tuples from the dataset with open(data_file, encoding="utf-8") as f: for idx, row in enumerate(f): if row.strip(): yield idx, {"text": row} else: yield idx, {"text": ""}
""" PixelVAE: A Latent Variable Model for Natural Images Ishaan Gulrajani, Kundan Kumar, Faruk Ahmed, Adrien Ali Taiga, Francesco Visin, David Vazquez, Aaron Courville """ import os, sys sys.path.append(os.getcwd()) N_GPUS = 2 import random import tflib as lib import tflib.sampling_loop_cifar_filter_3 import tflib.ops.kl_unit_gaussian import tflib.ops.kl_gaussian_gaussian import tflib.ops.conv2d import tflib.ops.linear import tflib.ops.batchnorm import tflib.ops.embedding import tflib.cifar import tflib.cifar_256 import numpy as np import tensorflow as tf import imageio from imageio import imsave import keras import time import functools import sklearn from sklearn.model_selection import train_test_split DATASET = 'cifar10' # mnist_256 SETTINGS = '32px_cifar' # mnist_256, 32px_small, 32px_big, 64px_small, 64px_big OUT_DIR = DATASET + '_interpolation1_final_filter_3_mean_beta_largesample' if not os.path.isdir(OUT_DIR): os.makedirs(OUT_DIR) print "Created directory {}".format(OUT_DIR) if SETTINGS == 'mnist_256': from keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() # two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level # one_level uses EncFull/DecFull for the bottom (and only) level MODE = 'one_level' # Whether to treat pixel inputs to the model as real-valued (as in the # original PixelCNN) or discrete (gets better likelihoods). EMBED_INPUTS = True # Turn on/off the bottom-level PixelCNN in Dec1/DecFull PIXEL_LEVEL_PIXCNN = True HIGHER_LEVEL_PIXCNN = True DIM_EMBED = 16 DIM_PIX_1 = 32 DIM_1 = 16 DIM_2 = 32 DIM_3 = 32 DIM_4 = 64 LATENT_DIM_2 = 128 NUM_CLASSES = 10 ALPHA1_ITERS = 5000 ALPHA2_ITERS = 5000 KL_PENALTY = 1.0 BETA_ITERS = 1000 # In Dec2, we break each spatial location into N blocks (analogous to channels # in the original PixelCNN) and model each spatial location autoregressively # as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1 # actually hurt performance. Unsure why; might be a bug. PIX_2_N_BLOCKS = 1 TIMES = { 'test_every': 2*500, 'stop_after': 500*500, 'callback_every': 10*500 } LR = 1e-3 LR_DECAY_AFTER = TIMES['stop_after'] LR_DECAY_FACTOR = 1. BATCH_SIZE = 100 N_CHANNELS = 1 HEIGHT = 28 WIDTH = 28 # These aren't actually used for one-level models but some parts # of the code still depend on them being defined. LATENT_DIM_1 = 64 LATENTS1_HEIGHT = 7 LATENTS1_WIDTH = 7 elif SETTINGS == '32px_small': MODE = 'two_level' EMBED_INPUTS = True PIXEL_LEVEL_PIXCNN = True HIGHER_LEVEL_PIXCNN = True DIM_EMBED = 16 DIM_PIX_1 = 128 DIM_1 = 64 DIM_2 = 128 DIM_3 = 256 LATENT_DIM_1 = 64 DIM_PIX_2 = 512 DIM_4 = 512 LATENT_DIM_2 = 512 ALPHA1_ITERS = 2000 ALPHA2_ITERS = 5000 KL_PENALTY = 1.00 BETA_ITERS = 1000 PIX_2_N_BLOCKS = 1 TIMES = { 'test_every': 1000, 'stop_after': 200000, 'callback_every': 20000 } LR = 1e-3 LR_DECAY_AFTER = 180000 LR_DECAY_FACTOR = 1e-1 BATCH_SIZE = 64 N_CHANNELS = 3 HEIGHT = 32 WIDTH = 32 LATENTS1_HEIGHT = 8 LATENTS1_WIDTH = 8 elif SETTINGS == '32px_big': MODE = 'two_level' EMBED_INPUTS = False PIXEL_LEVEL_PIXCNN = True HIGHER_LEVEL_PIXCNN = True DIM_EMBED = 16 DIM_PIX_1 = 256 DIM_1 = 128 DIM_2 = 256 DIM_3 = 512 LATENT_DIM_1 = 128 DIM_PIX_2 = 512 DIM_4 = 512 LATENT_DIM_2 = 512 ALPHA1_ITERS = 2000 ALPHA2_ITERS = 5000 KL_PENALTY = 1.00 BETA_ITERS = 1000 PIX_2_N_BLOCKS = 1 TIMES = { 'test_every': 1000, 'stop_after': 300000, 'callback_every': 20000 } VANILLA = False LR = 1e-3 LR_DECAY_AFTER = 300000 LR_DECAY_FACTOR = 1e-1 BATCH_SIZE = 64 N_CHANNELS = 3 HEIGHT = 32 WIDTH = 32 LATENTS1_HEIGHT = 8 LATENTS1_WIDTH = 8 elif SETTINGS == '64px_small': MODE = 'two_level' EMBED_INPUTS = True PIXEL_LEVEL_PIXCNN = True HIGHER_LEVEL_PIXCNN = True DIM_EMBED = 16 DIM_PIX_1 = 128 DIM_0 = 64 DIM_1 = 64 DIM_2 = 128 LATENT_DIM_1 = 64 DIM_PIX_2 = 256 DIM_3 = 256 DIM_4 = 512 LATENT_DIM_2 = 512 PIX_2_N_BLOCKS = 1 TIMES = { 'test_every': 10000, 'stop_after': 200000, 'callback_every': 50000 } VANILLA = False LR = 1e-3 LR_DECAY_AFTER = 180000 LR_DECAY_FACTOR = .1 ALPHA1_ITERS = 2000 ALPHA2_ITERS = 10000 KL_PENALTY = 1.0 BETA_ITERS = 1000 BATCH_SIZE = 64 N_CHANNELS = 3 HEIGHT = 64 WIDTH = 64 LATENTS1_WIDTH = 16 LATENTS1_HEIGHT = 16 elif SETTINGS == '64px_big': MODE = 'two_level' EMBED_INPUTS = True PIXEL_LEVEL_PIXCNN = True HIGHER_LEVEL_PIXCNN = True DIM_EMBED = 16 DIM_PIX_1 = 384 DIM_0 = 192 DIM_1 = 256 DIM_2 = 512 LATENT_DIM_1 = 64 DIM_PIX_2 = 512 DIM_3 = 512 DIM_4 = 512 LATENT_DIM_2 = 512 PIX_2_N_BLOCKS = 1 TIMES = { 'test_every': 10000, 'stop_after': 400000, 'callback_every': 50000 } VANILLA = False LR = 1e-3 LR_DECAY_AFTER = 180000 LR_DECAY_FACTOR = .5 ALPHA1_ITERS = 1000 ALPHA2_ITERS = 10000 KL_PENALTY = 1.00 BETA_ITERS = 500 BATCH_SIZE = 48 N_CHANNELS = 3 HEIGHT = 64 WIDTH = 64 LATENTS1_WIDTH = 16 LATENTS1_HEIGHT = 16 elif SETTINGS=='64px_big_onelevel': # two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level # one_level uses EncFull/DecFull for the bottom (and only) level MODE = 'one_level' # Whether to treat pixel inputs to the model as real-valued (as in the # original PixelCNN) or discrete (gets better likelihoods). EMBED_INPUTS = True # Turn on/off the bottom-level PixelCNN in Dec1/DecFull PIXEL_LEVEL_PIXCNN = True HIGHER_LEVEL_PIXCNN = True DIM_EMBED = 16 DIM_PIX_1 = 384 DIM_0 = 192 DIM_1 = 256 DIM_2 = 512 DIM_3 = 512 DIM_4 = 512 LATENT_DIM_2 = 512 ALPHA1_ITERS = 50000 ALPHA2_ITERS = 50000 KL_PENALTY = 1.0 BETA_ITERS = 1000 # In Dec2, we break each spatial location into N blocks (analogous to channels # in the original PixelCNN) and model each spatial location autoregressively # as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1 # actually hurt performance. Unsure why; might be a bug. PIX_2_N_BLOCKS = 1 TIMES = { 'test_every': 10000, 'stop_after': 400000, 'callback_every': 50000 } LR = 1e-3 LR_DECAY_AFTER = 180000 LR_DECAY_FACTOR = 0.5 BATCH_SIZE = 48 N_CHANNELS = 3 HEIGHT = 64 WIDTH = 64 # These aren't actually used for one-level models but some parts # of the code still depend on them being defined. LATENT_DIM_1 = 64 LATENTS1_HEIGHT = 7 LATENTS1_WIDTH = 7 elif SETTINGS=='32px_cifar': from keras.datasets import cifar10 (x_train_set, y_train_set), (x_test_set, y_test_set) = cifar10.load_data() x_train_set = x_train_set.transpose(0,3,1,2) x_test_set = x_test_set.transpose(0,3,1,2) seed = 333 x_train_set, x_dev_set, y_train_set, y_dev_set = train_test_split(x_train_set, y_train_set, test_size=0.1, random_state=seed) # two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level # one_level uses EncFull/DecFull for the bottom (and only) level MODE = 'one_level' # Whether to treat pixel inputs to the model as real-valued (as in the # original PixelCNN) or discrete (gets better likelihoods). EMBED_INPUTS = True # Turn on/off the bottom-level PixelCNN in Dec1/DecFull PIXEL_LEVEL_PIXCNN = True HIGHER_LEVEL_PIXCNN = True DIM_EMBED = 16 DIM_PIX_1 = 192 #LEILA EDIT: was previously 384 DIM_0 = 96 #LEILA EDIT: was previously 192 DIM_1 = 128 #LEILA EDIT: was previously 256 DIM_2 = 256 #LEILA EDIT: was previously 512 DIM_3 = 256 #LEILA EDIT: was previously 512 DIM_4 = 256 #LEILA EDIT: was previously 512 LATENT_DIM_2 = 256 #LEILA EDIT: was previously 512 ALPHA1_ITERS = 50000 ALPHA2_ITERS = 50000 KL_PENALTY = 1.0 BETA_ITERS = 1000 # In Dec2, we break each spatial location into N blocks (analogous to channels # in the original PixelCNN) and model each spatial location autoregressively # as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1 # actually hurt performance. Unsure why; might be a bug. PIX_2_N_BLOCKS = 1 TIMES = { 'test_every': 10000, 'stop_after': 400000, 'callback_every': 50000 } LR = 1e-3 LR_DECAY_AFTER = 180000 LR_DECAY_FACTOR = 0.5 BATCH_SIZE = 50 # 48 N_CHANNELS = 3 HEIGHT = 32 #64 WIDTH = 32 #64 NUM_CLASSES = 10 # These aren't actually used for one-level models but some parts # of the code still depend on them being defined. LATENT_DIM_1 = 32 #LEILAEDIT: was previously 64 LATENTS1_HEIGHT = 7 LATENTS1_WIDTH = 7 if DATASET == 'mnist_256': train_data, dev_data, test_data = lib.mnist_256.load(BATCH_SIZE, BATCH_SIZE) # TODO: define new data-loader so I don't load batches elif DATASET == 'lsun_32': train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=True) elif DATASET == 'lsun_64': train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=False) elif DATASET == 'imagenet_64': train_data, dev_data = lib.small_imagenet.load(BATCH_SIZE) elif DATASET == 'cifar10': train_data, dev_data, test_data = lib.cifar_256.load(BATCH_SIZE) #LEILAEDIT lib.print_model_settings(locals().copy()) DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)] lib.ops.conv2d.enable_default_weightnorm() lib.ops.linear.enable_default_weightnorm() with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session: bn_is_training = tf.placeholder(tf.bool, shape=None, name='bn_is_training') bn_stats_iter = tf.placeholder(tf.int32, shape=None, name='bn_stats_iter') total_iters = tf.placeholder(tf.int32, shape=None, name='total_iters') all_images = tf.placeholder(tf.int32, shape=[None, N_CHANNELS, HEIGHT, WIDTH], name='all_images') all_latents1 = tf.placeholder(tf.float32, shape=[None, LATENT_DIM_1, LATENTS1_HEIGHT, LATENTS1_WIDTH], name='all_latents1') split_images = tf.split(all_images, len(DEVICES), axis=0) split_latents1 = tf.split(all_images, len(DEVICES), axis=0) tower_cost = [] tower_outputs1_sample = [] for device_index, (device, images, latents1_sample) in enumerate(zip(DEVICES, split_images, split_latents1)): with tf.device(device): def nonlinearity(x): return tf.nn.elu(x) def pixcnn_gated_nonlinearity(a, b): return tf.sigmoid(a) * tf.tanh(b) def SubpixelConv2D(*args, **kwargs): kwargs['output_dim'] = 4*kwargs['output_dim'] output = lib.ops.conv2d.Conv2D(*args, **kwargs) output = tf.transpose(output, [0,2,3,1]) output = tf.depth_to_space(output, 2) output = tf.transpose(output, [0,3,1,2]) return output def ResidualBlock(name, input_dim, output_dim, inputs, filter_size, mask_type=None, resample=None, he_init=True): """ resample: None, 'down', or 'up' """ if mask_type != None and resample != None: raise Exception('Unsupported configuration') if resample=='down': conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2) conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim) conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim, stride=2) elif resample=='up': conv_shortcut = SubpixelConv2D conv_1 = functools.partial(SubpixelConv2D, input_dim=input_dim, output_dim=output_dim) conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim) elif resample==None: conv_shortcut = lib.ops.conv2d.Conv2D conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim) conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim) else: raise Exception('invalid resample value') if output_dim==input_dim and resample==None: shortcut = inputs # Identity skip-connection else: shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, mask_type=mask_type, he_init=False, biases=True, inputs=inputs) output = inputs if mask_type == None: output = nonlinearity(output) output = conv_1(name+'.Conv1', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False) output = nonlinearity(output) output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False, biases=False) if device_index == 0: output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter) else: output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter, update_moving_stats=False) else: output = nonlinearity(output) output_a = conv_1(name+'.Conv1A', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init) output_b = conv_1(name+'.Conv1B', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init) output = pixcnn_gated_nonlinearity(output_a, output_b) output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init) return shortcut + output def Enc1(images): output = images if WIDTH == 64: if EMBED_INPUTS: output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False) output = ResidualBlock('Enc1.InputRes0', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.InputRes', input_dim=DIM_0, output_dim=DIM_1, filter_size=3, resample='down', inputs=output) else: output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False) output = ResidualBlock('Enc1.InputRes', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample='down', inputs=output) else: if EMBED_INPUTS: output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False) else: output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False) output = ResidualBlock('Enc1.Res1Pre', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res1Pre2', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res1', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output) if LATENTS1_WIDTH == 16: output = ResidualBlock('Enc1.Res4Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res4', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res4Post', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) mu_and_sigma = lib.ops.conv2d.Conv2D('Enc1.Out', input_dim=DIM_2, output_dim=2*LATENT_DIM_1, filter_size=1, inputs=output, he_init=False) else: output = ResidualBlock('Enc1.Res2Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res2Pre2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res2', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output) output = ResidualBlock('Enc1.Res3Pre', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res3Pre2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Enc1.Res3Pre3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) mu_and_sigma = lib.ops.conv2d.Conv2D('Enc1.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, inputs=output, he_init=False) return mu_and_sigma, output def Dec1(latents, images): output = tf.clip_by_value(latents, -50., 50.) if LATENTS1_WIDTH == 16: output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_2, filter_size=1, inputs=output, he_init=False) output = ResidualBlock('Dec1.Res1A', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res1B', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res1C', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) else: output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=1, inputs=output, he_init=False) output = ResidualBlock('Dec1.Res1', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res1Post', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res1Post2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res2', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs=output) output = ResidualBlock('Dec1.Res2Post', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res2Post2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res3', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs=output) output = ResidualBlock('Dec1.Res3Post', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output) output = ResidualBlock('Dec1.Res3Post2', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output) if WIDTH == 64: output = ResidualBlock('Dec1.Res4', input_dim=DIM_1, output_dim=DIM_0, filter_size=3, resample='up', inputs=output) output = ResidualBlock('Dec1.Res4Post', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output) if PIXEL_LEVEL_PIXCNN: if WIDTH == 64: if EMBED_INPUTS: masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False) else: masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_0, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False) else: if EMBED_INPUTS: masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False) else: masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False) # Make the variance of output and masked_images (roughly) match output /= 2 # Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat output = tf.concat([masked_images, output], axis=1) if WIDTH == 64: output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_0, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) output = ResidualBlock('Dec1.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) else: output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output) else: if WIDTH == 64: output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_0, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output) else: output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_1, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output) return tf.transpose( tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]), [0,2,3,4,1] ) def Enc2(h1): output = h1 if LATENTS1_WIDTH == 16: output = ResidualBlock('Enc2.Res0', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res1Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res1Pre2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res1', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', he_init=True, inputs=output) output = ResidualBlock('Enc2.Res2Pre', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res2Pre2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res2Pre3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res1A', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', he_init=True, inputs=output) output = ResidualBlock('Enc2.Res2PreA', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Enc2.Res2Post', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = tf.reshape(output, [-1, 4*4*DIM_4]) output = lib.ops.linear.Linear('Enc2.Output', input_dim=4*4*DIM_4, output_dim=2*LATENT_DIM_2, inputs=output) return output def Dec2(latents, targets): output = tf.clip_by_value(latents, -50., 50.) output = lib.ops.linear.Linear('Dec2.Input', input_dim=LATENT_DIM_2, output_dim=4*4*DIM_4, inputs=output) output = tf.reshape(output, [-1, DIM_4, 4, 4]) output = ResidualBlock('Dec2.Res1Pre', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Dec2.Res1', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Dec2.Res1Post', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Dec2.Res3', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('Dec2.Res3Post', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Dec2.Res3Post2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Dec2.Res3Post3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) if LATENTS1_WIDTH == 16: output = ResidualBlock('Dec2.Res3Post5', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('Dec2.Res3Post6', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Dec2.Res3Post7', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('Dec2.Res3Post8', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) if HIGHER_LEVEL_PIXCNN: if LATENTS1_WIDTH == 16: masked_targets = lib.ops.conv2d.Conv2D('Dec2.Pix1', input_dim=LATENT_DIM_1, output_dim=DIM_2, filter_size=5, mask_type=('a', PIX_2_N_BLOCKS), he_init=False, inputs=targets) else: masked_targets = lib.ops.conv2d.Conv2D('Dec2.Pix1', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=5, mask_type=('a', PIX_2_N_BLOCKS), he_init=False, inputs=targets) # Make the variance of output and masked_targets roughly match output /= 2 output = tf.concat([masked_targets, output], axis=1) if LATENTS1_WIDTH == 16: output = ResidualBlock('Dec2.Pix2Res', input_dim=2*DIM_2, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output) else: output = ResidualBlock('Dec2.Pix2Res', input_dim=2*DIM_3, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output) output = ResidualBlock('Dec2.Pix3Res', input_dim=DIM_PIX_2, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output) output = ResidualBlock('Dec2.Pix4Res', input_dim=DIM_PIX_2, output_dim=DIM_PIX_2, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output) output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_PIX_2, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output) else: if LATENTS1_WIDTH == 16: output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_2, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output) else: output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output) return output # Only for 32px_cifar, 64px_big_onelevel, and MNIST. Needs modification for others. def EncFull(images): output = images if WIDTH == 32: #64 if EMBED_INPUTS: output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False) else: output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False) output = ResidualBlock('EncFull.Res1', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res2', input_dim=DIM_0, output_dim=DIM_1, filter_size=3, resample='down', inputs=output) output = ResidualBlock('EncFull.Res3', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res4', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res5', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output) output = ResidualBlock('EncFull.Res6', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res7', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res8', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output) output = ResidualBlock('EncFull.Res9', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res10', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res11', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', inputs=output) output = ResidualBlock('EncFull.Res12', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res13', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs=output) output = tf.reshape(output, [-1, 2*2*DIM_4]) output = lib.ops.linear.Linear('EncFull.Output', input_dim=2*2*DIM_4, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output) else: if EMBED_INPUTS: output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False) else: output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False) output = ResidualBlock('EncFull.Res1', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res2', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output) output = ResidualBlock('EncFull.Res3', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res4', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output) output = ResidualBlock('EncFull.Res5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = ResidualBlock('EncFull.Res6', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output) output = tf.reduce_mean(output, reduction_indices=[2,3]) output = lib.ops.linear.Linear('EncFull.Output', input_dim=DIM_3, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output) return output # Only for 32px_CIFAR, 64px_big_onelevel and MNIST. Needs modification for others. def DecFull(latents, images): output = tf.clip_by_value(latents, -50., 50.) if WIDTH == 32: # 64:LEILAEDIT. Also changed 4*4 to 2*2 and 4,4 to 2,2 in the two lines below output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=2*2*DIM_4, initialization='glorot', inputs=output) output = tf.reshape(output, [-1, DIM_4, 2, 2]) output = ResidualBlock('DecFull.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res3', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res4', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('DecFull.Res5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res6', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res7', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('DecFull.Res8', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res9', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res10', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('DecFull.Res11', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res12', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res13', input_dim=DIM_1, output_dim=DIM_0, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('DecFull.Res14', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, he_init=True, inputs=output) else: output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_3, initialization='glorot', inputs=output) output = tf.reshape(tf.tile(tf.reshape(output, [-1, DIM_3, 1]), [1, 1, 49]), [-1, DIM_3, 7, 7]) output = ResidualBlock('DecFull.Res2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res4', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('DecFull.Res5', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output) output = ResidualBlock('DecFull.Res6', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', he_init=True, inputs=output) output = ResidualBlock('DecFull.Res7', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output) if WIDTH == 32: #64: dim = DIM_0 else: dim = DIM_1 if PIXEL_LEVEL_PIXCNN: if EMBED_INPUTS: masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=dim, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False) else: masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS, output_dim=dim, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False) # Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat output = tf.concat([masked_images, output], axis=1) output = ResidualBlock('DecFull.Pix2Res', input_dim=2*dim, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) output = ResidualBlock('DecFull.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) output = ResidualBlock('DecFull.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) if WIDTH != 32: #64: LEILAEDIT output = ResidualBlock('DecFull.Pix5Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output) output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output) else: output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=dim, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output) return tf.transpose( tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]), [0,2,3,4,1] ) def split(mu_and_logsig): mu, logsig = tf.split(mu_and_logsig, 2, axis=1) sig = 0.5 * (tf.nn.softsign(logsig)+1) logsig = tf.log(sig) return mu, logsig, sig def clamp_logsig_and_sig(logsig, sig): # Early during training (see BETA_ITERS), stop sigma from going too low floor = 1. - tf.minimum(1., tf.cast(total_iters, 'float32') / BETA_ITERS) log_floor = tf.log(floor) return tf.maximum(logsig, log_floor), tf.maximum(sig, floor) scaled_images = (tf.cast(images, 'float32') - 128.) / 64. if EMBED_INPUTS: embedded_images = lib.ops.embedding.Embedding('Embedding', 256, DIM_EMBED, images) embedded_images = tf.transpose(embedded_images, [0,4,1,2,3]) embedded_images = tf.reshape(embedded_images, [-1, DIM_EMBED*N_CHANNELS, HEIGHT, WIDTH]) if MODE == 'one_level': # Layer 1 if EMBED_INPUTS: mu_and_logsig1 = EncFull(embedded_images) else: mu_and_logsig1 = EncFull(scaled_images) mu1, logsig1, sig1 = split(mu_and_logsig1) eps = tf.random_normal(tf.shape(mu1)) latents1 = mu1 # LEILAEDIT if EMBED_INPUTS: outputs1 = DecFull(latents1, embedded_images) else: outputs1 = DecFull(latents1, scaled_images) reconst_cost = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(outputs1, [-1, 256]), labels=tf.reshape(images, [-1]) ) ) # Assembly # An alpha of exactly 0 can sometimes cause inf/nan values, so we're # careful to avoid it. alpha = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA1_ITERS) * KL_PENALTY kl_cost_1 = tf.reduce_mean( lib.ops.kl_unit_gaussian.kl_unit_gaussian( mu1, logsig1, sig1 ) ) kl_cost_1 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT) cost = reconst_cost + (alpha * kl_cost_1) elif MODE == 'two_level': # Layer 1 if EMBED_INPUTS: mu_and_logsig1, h1 = Enc1(embedded_images) else: mu_and_logsig1, h1 = Enc1(scaled_images) mu1, logsig1, sig1 = split(mu_and_logsig1) if mu1.get_shape().as_list()[2] != LATENTS1_HEIGHT: raise Exception("LATENTS1_HEIGHT doesn't match mu1 shape!") if mu1.get_shape().as_list()[3] != LATENTS1_WIDTH: raise Exception("LATENTS1_WIDTH doesn't match mu1 shape!") eps = tf.random_normal(tf.shape(mu1)) latents1 = mu1 + (eps * sig1) if EMBED_INPUTS: outputs1 = Dec1(latents1, embedded_images) outputs1_sample = Dec1(latents1_sample, embedded_images) else: outputs1 = Dec1(latents1, scaled_images) outputs1_sample = Dec1(latents1_sample, scaled_images) reconst_cost = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(outputs1, [-1, 256]), labels=tf.reshape(images, [-1]) ) ) # Layer 2 mu_and_logsig2 = Enc2(h1) mu2, logsig2, sig2 = split(mu_and_logsig2) eps = tf.random_normal(tf.shape(mu2)) latents2 = mu2 + (eps * sig2) outputs2 = Dec2(latents2, latents1) mu1_prior, logsig1_prior, sig1_prior = split(outputs2) logsig1_prior, sig1_prior = clamp_logsig_and_sig(logsig1_prior, sig1_prior) mu1_prior = 2. * tf.nn.softsign(mu1_prior / 2.) # Assembly # An alpha of exactly 0 can sometimes cause inf/nan values, so we're # careful to avoid it. alpha1 = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA1_ITERS) * KL_PENALTY alpha2 = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA2_ITERS) * alpha1# * KL_PENALTY kl_cost_1 = tf.reduce_mean( lib.ops.kl_gaussian_gaussian.kl_gaussian_gaussian( mu1, logsig1, sig1, mu1_prior, logsig1_prior, sig1_prior ) ) kl_cost_2 = tf.reduce_mean( lib.ops.kl_unit_gaussian.kl_unit_gaussian( mu2, logsig2, sig2 ) ) kl_cost_1 *= float(LATENT_DIM_1 * LATENTS1_WIDTH * LATENTS1_HEIGHT) / (N_CHANNELS * WIDTH * HEIGHT) kl_cost_2 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT) cost = reconst_cost + (alpha1 * kl_cost_1) + (alpha2 * kl_cost_2) tower_cost.append(cost) if MODE == 'two_level': tower_outputs1_sample.append(outputs1_sample) full_cost = tf.reduce_mean( tf.concat([tf.expand_dims(x, 0) for x in tower_cost], axis=0), 0 ) if MODE == 'two_level': full_outputs1_sample = tf.concat(tower_outputs1_sample, axis=0) # Sampling if MODE == 'one_level': ch_sym = tf.placeholder(tf.int32, shape=None) y_sym = tf.placeholder(tf.int32, shape=None) x_sym = tf.placeholder(tf.int32, shape=None) logits = tf.reshape(tf.slice(outputs1, tf.stack([0, ch_sym, y_sym, x_sym, 0]), tf.stack([-1, 1, 1, 1, -1])), [-1, 256]) dec1_fn_out = tf.multinomial(logits, 1)[:, 0] def dec1_fn(_latents, _targets, _ch, _y, _x): return session.run(dec1_fn_out, feed_dict={latents1: _latents, images: _targets, ch_sym: _ch, y_sym: _y, x_sym: _x, total_iters: 99999, bn_is_training: False, bn_stats_iter:0}) def enc_fn(_images): return session.run(latents1, feed_dict={images: _images, total_iters: 99999, bn_is_training: False, bn_stats_iter:0}) sample_fn_latents1 = np.random.normal(size=(1, LATENT_DIM_2)).astype('float32') def generate_and_save_samples(tag): from keras.utils import np_utils x_augmentation_set = np.zeros((1, N_CHANNELS, HEIGHT, WIDTH)) #LEILEDIT: to enable .npy image saving y_augmentation_set = np.zeros((1, 1, NUM_CLASSES)) #LEILEDIT: to enable .npy image saving. # Function to translate numeric images into plots def color_grid_vis(X, nh, nw, save_path): # from github.com/Newmu X = X.transpose(0,2,3,1) h, w = X[0].shape[:2] img = np.zeros((h*nh, w*nw, 3)) for n, x in enumerate(X): j = n/nw i = n%nw img[j*h:j*h+h, i*w:i*w+w, :] = x imsave(OUT_DIR + '/' + save_path, img) numsamples = 1125 #pvals = np.linspace(0.2, 0.8, num=4) #pvals = np.linspace(0.2, 0.8, num=1) x_train_set_array = np.array(x_train_set) y_train_set_array = np.array(y_train_set) for imagenum in range(numsamples): pvals = np.random.beta(0.2, 0.2, 1) imageindices = random.sample(range(x_train_set.shape[0]),2) imageindex1 = imageindices[0] imageindex2 = imageindices[1] # Draw the corresponding images and labels from the training data image1 = x_train_set[imageindex1,:] image2 = x_train_set[imageindex2,:] label1 = y_train_set[imageindex1,:] label2 = y_train_set[imageindex2,:] # Reshape image1 = image1.reshape(1, N_CHANNELS, HEIGHT, WIDTH) image2 = image2.reshape(1, N_CHANNELS, HEIGHT, WIDTH) label1 = label1.reshape(1, 1) label2 = label2.reshape(1, 1) # Save the original images #print "Saving original samples" #color_grid_vis( # image1, # 1, # 1, # 'original_1_classes{}and{}_num{}.png'.format(label1,label2,imagenum) #) #color_grid_vis( # image2, # 1, # 1, # 'original_2_classes{}and{}_num{}.png'.format(label1,label2,imagenum) #) # Encode the images image_code1 = enc_fn(image1) image_code2 = enc_fn(image2) # Change labels to matrix form before performing interpolations label1 = np_utils.to_categorical(label1, NUM_CLASSES) label2 = np_utils.to_categorical(label2, NUM_CLASSES) # Combine the latent codes for p in pvals: new_code = np.multiply(p,image_code1) + np.multiply((1-p),image_code2) new_label = np.multiply(p,label1) + np.multiply((1-p),label2) new_label = new_label.reshape(1,1,NUM_CLASSES) samples = np.zeros( (1, N_CHANNELS, HEIGHT, WIDTH), dtype='int32') print "Generating samples" for y in xrange(HEIGHT): for x in xrange(WIDTH): for ch in xrange(N_CHANNELS): next_sample = dec1_fn(new_code, samples, ch, y, x) samples[:,ch,y,x] = next_sample x_augmentation_set = np.concatenate((x_augmentation_set, samples), axis=0)#LEILAEDIT for .npy saving y_augmentation_set = np.concatenate((y_augmentation_set, new_label), axis=0)#LEILAEDIT for .npy saving color_grid_vis( samples, 1, 1, 'interpolation1_classes{}and{}_pval{}_num{}.png'.format(label1,label2,p,imagenum) ) x_augmentation_array = np.delete(x_augmentation_set, (0), axis=0) y_augmentation_array = np.delete(y_augmentation_set, (0), axis=0) x_augmentation_array = x_augmentation_array.astype(np.uint8) np.save(OUT_DIR + '/' + 'x_augmentation_array_mean_beta_largesample', x_augmentation_array) #LEILAEDIT for .npy saving np.save(OUT_DIR + '/' + 'y_augmentation_array_mean_beta_largesample', y_augmentation_array) #LEILAEDIT for .npy saving # Run if MODE == 'one_level': prints=[ ('alpha', alpha), ('reconst', reconst_cost), ('kl1', kl_cost_1) ] decayed_lr = tf.train.exponential_decay( LR, total_iters, LR_DECAY_AFTER, LR_DECAY_FACTOR, staircase=True ) lib.sampling_loop_cifar_filter_3.sampling_loop( #LEIlAEDIT. TODO: update to remove uncessary arguments session=session, inputs=[total_iters, all_images], inject_iteration=True, bn_vars=(bn_is_training, bn_stats_iter), cost=full_cost, stop_after=TIMES['stop_after'], prints=prints, optimizer=tf.train.AdamOptimizer(decayed_lr), train_data=train_data, test_data=dev_data, callback=generate_and_save_samples, callback_every=TIMES['callback_every'], test_every=TIMES['test_every'], save_checkpoints=True )
import collections import json import mock from django.test import TestCase from django.test.utils import override_settings from django.urls import reverse from wagtail.api.v2 import signal_handlers from wagtail.core.models import Page, Site from wagtail.tests.demosite import models from wagtail.tests.testapp.models import StreamPage def get_total_page_count(): # Need to take away 1 as the root page is invisible over the API return Page.objects.live().public().count() - 1 class TestPageListing(TestCase): fixtures = ['demosite.json'] def get_response(self, **params): return self.client.get(reverse('wagtailapi_v2:pages:listing'), params) def get_page_id_list(self, content): return [page['id'] for page in content['items']] # BASIC TESTS def test_basic(self): response = self.get_response() self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-type'], 'application/json') # Will crash if the JSON is invalid content = json.loads(response.content.decode('UTF-8')) # Check that the meta section is there self.assertIn('meta', content) self.assertIsInstance(content['meta'], dict) # Check that the total count is there and correct self.assertIn('total_count', content['meta']) self.assertIsInstance(content['meta']['total_count'], int) self.assertEqual(content['meta']['total_count'], get_total_page_count()) # Check that the items section is there self.assertIn('items', content) self.assertIsInstance(content['items'], list) # Check that each page has a meta section with type, detail_url, html_url, slug and first_published_at attributes for page in content['items']: self.assertIn('meta', page) self.assertIsInstance(page['meta'], dict) self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'}) def test_unpublished_pages_dont_appear_in_list(self): total_count = get_total_page_count() page = models.BlogEntryPage.objects.get(id=16) page.unpublish() response = self.get_response() content = json.loads(response.content.decode('UTF-8')) self.assertEqual(content['meta']['total_count'], total_count - 1) def test_private_pages_dont_appear_in_list(self): total_count = get_total_page_count() page = models.BlogIndexPage.objects.get(id=5) page.view_restrictions.create(password='test') new_total_count = get_total_page_count() self.assertNotEqual(total_count, new_total_count) response = self.get_response() content = json.loads(response.content.decode('UTF-8')) self.assertEqual(content['meta']['total_count'], new_total_count) # TYPE FILTER def test_type_filter_items_are_all_blog_entries(self): response = self.get_response(type='demosite.BlogEntryPage') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage') # No specific fields available by default self.assertEqual(set(page.keys()), {'id', 'meta', 'title'}) def test_type_filter_total_count(self): response = self.get_response(type='demosite.BlogEntryPage') content = json.loads(response.content.decode('UTF-8')) # Total count must be reduced as this filters the results self.assertEqual(content['meta']['total_count'], 3) def test_type_filter_multiple(self): response = self.get_response(type='demosite.BlogEntryPage,demosite.EventPage') content = json.loads(response.content.decode('UTF-8')) blog_page_seen = False event_page_seen = False for page in content['items']: self.assertIn(page['meta']['type'], ['demosite.BlogEntryPage', 'demosite.EventPage']) if page['meta']['type'] == 'demosite.BlogEntryPage': blog_page_seen = True elif page['meta']['type'] == 'demosite.EventPage': event_page_seen = True # Only generic fields available self.assertEqual(set(page.keys()), {'id', 'meta', 'title'}) self.assertTrue(blog_page_seen, "No blog pages were found in the items") self.assertTrue(event_page_seen, "No event pages were found in the items") def test_non_existant_type_gives_error(self): response = self.get_response(type='demosite.IDontExist') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "type doesn't exist"}) def test_non_page_type_gives_error(self): response = self.get_response(type='auth.User') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "type doesn't exist"}) # FIELDS def test_fields_default(self): response = self.get_response(type='demosite.BlogEntryPage') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta', 'title'}) self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'}) def test_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'feed_image'}) def test_remove_fields(self): response = self.get_response(fields='-title') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta'}) def test_remove_meta_fields(self): response = self.get_response(fields='-html_url') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta', 'title'}) self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'slug', 'first_published_at'}) def test_remove_all_meta_fields(self): response = self.get_response(fields='-type,-detail_url,-slug,-first_published_at,-html_url') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'title'}) def test_remove_id_field(self): response = self.get_response(fields='-id') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'meta', 'title'}) def test_all_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='*') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'}) self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'seo_title', 'slug', 'html_url', 'search_description'}) def test_all_fields_then_remove_something(self): response = self.get_response(type='demosite.BlogEntryPage', fields='*,-title,-date,-seo_title') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'}) self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'slug', 'html_url', 'search_description'}) def test_remove_all_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='_,id,type') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta'}) self.assertEqual(set(page['meta'].keys()), {'type'}) def test_nested_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(width,height)') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'}) def test_remove_nested_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(-title)') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta'}) def test_all_nested_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(*)') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'}) def test_remove_all_nested_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(_,id)') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page['feed_image'].keys()), {'id'}) def test_nested_nested_fields(self): response = self.get_response(type='demosite.BlogEntryPage', fields='carousel_items(image(width,height))') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: for carousel_item in page['carousel_items']: # Note: inline objects default to displaying all fields self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'}) self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'}) def test_fields_child_relation(self): response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'related_links'}) self.assertIsInstance(page['related_links'], list) def test_fields_foreign_key(self): response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: feed_image = page['feed_image'] if feed_image is not None: self.assertIsInstance(feed_image, dict) self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'}) self.assertIsInstance(feed_image['id'], int) self.assertIsInstance(feed_image['meta'], dict) self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'}) self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image') self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id']) def test_fields_tags(self): response = self.get_response(type='demosite.BlogEntryPage', fields='tags') content = json.loads(response.content.decode('UTF-8')) for page in content['items']: self.assertEqual(set(page.keys()), {'id', 'meta', 'tags', 'title'}) self.assertIsInstance(page['tags'], list) def test_fields_ordering(self): response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links') # Will crash if the JSON is invalid content = json.loads(response.content.decode('UTF-8')) # Test field order content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8')) field_order = [ 'id', 'meta', 'title', 'date', 'feed_image', 'related_links', ] self.assertEqual(list(content['items'][0].keys()), field_order) def test_star_in_wrong_position_gives_error(self): response = self.get_response(fields='title,*') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "fields error: '*' must be in the first position"}) def test_unknown_nested_fields_give_error(self): response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(123,title,abc)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: 123, abc"}) def test_parent_field_gives_error(self): # parent field isn't allowed in listings response = self.get_response(fields='parent') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: parent"}) def test_fields_without_type_gives_error(self): response = self.get_response(fields='title,related_links') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: related_links"}) def test_fields_which_are_not_in_api_fields_gives_error(self): response = self.get_response(fields='path') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: path"}) def test_fields_unknown_field_gives_error(self): response = self.get_response(fields='123,title,abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: 123, abc"}) def test_fields_remove_unknown_field_gives_error(self): response = self.get_response(fields='-123,-title,-abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: 123, abc"}) def test_nested_fields_on_non_relational_field_gives_error(self): response = self.get_response(type='demosite.BlogEntryPage', fields='title(foo,bar)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "'title' does not support nested fields"}) # FILTERING def test_filtering_exact_filter(self): response = self.get_response(title='Home page') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [2]) def test_filtering_exact_filter_on_specific_field(self): response = self.get_response(type='demosite.BlogEntryPage', date='2013-12-02') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [16]) def test_filtering_on_id(self): response = self.get_response(id=16) content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [16]) def test_filtering_on_boolean(self): response = self.get_response(show_in_menus='false') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 17]) def test_filtering_doesnt_work_on_specific_fields_without_type(self): response = self.get_response(date='2013-12-02') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: date"}) def test_filtering_tags(self): response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [16, 18]) def test_filtering_multiple_tags(self): response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail,bird') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [16]) def test_filtering_unknown_field_gives_error(self): response = self.get_response(not_a_field='abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"}) def test_filtering_int_validation(self): response = self.get_response(id='abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for id (invalid literal for int() with base 10: 'abc')"}) def test_filtering_boolean_validation(self): response = self.get_response(show_in_menus='abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for show_in_menus (expected 'true' or 'false', got 'abc')"}) # CHILD OF FILTER def test_child_of_filter(self): response = self.get_response(child_of=5) content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [16, 18, 19]) def test_child_of_root(self): # "root" gets children of the homepage of the current site response = self.get_response(child_of='root') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [4, 5, 6, 20, 12]) def test_child_of_with_type(self): response = self.get_response(type='demosite.EventPage', child_of=5) content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, []) def test_child_of_unknown_page_gives_error(self): response = self.get_response(child_of=1000) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "parent page doesn't exist"}) def test_child_of_not_integer_gives_error(self): response = self.get_response(child_of='abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "child_of must be a positive integer"}) def test_child_of_page_thats_not_in_same_site_gives_error(self): # Root page is not in any site, so pretend it doesn't exist response = self.get_response(child_of=1) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "parent page doesn't exist"}) # DESCENDANT OF FILTER def test_descendant_of_filter(self): response = self.get_response(descendant_of=6) content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23]) def test_descendant_of_root(self): # "root" gets decendants of the homepage of the current site # Basically returns every page except the homepage response = self.get_response(descendant_of='root') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12]) def test_descendant_of_with_type(self): response = self.get_response(type='tests.EventPage', descendant_of=6) content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, []) def test_descendant_of_unknown_page_gives_error(self): response = self.get_response(descendant_of=1000) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "ancestor page doesn't exist"}) def test_descendant_of_not_integer_gives_error(self): response = self.get_response(descendant_of='abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "descendant_of must be a positive integer"}) def test_descendant_of_page_thats_not_in_same_site_gives_error(self): # Root page is not in any site, so pretend it doesn't exist response = self.get_response(descendant_of=1) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "ancestor page doesn't exist"}) def test_descendant_of_when_filtering_by_child_of_gives_error(self): response = self.get_response(descendant_of=6, child_of=5) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "filtering by descendant_of with child_of is not supported"}) # ORDERING def test_ordering_default(self): response = self.get_response() content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12]) def test_ordering_by_title(self): response = self.get_response(order='title') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [21, 22, 19, 23, 5, 16, 18, 12, 14, 8, 9, 4, 2, 13, 20, 17, 6, 10, 15]) def test_ordering_by_title_backwards(self): response = self.get_response(order='-title') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [15, 10, 6, 17, 20, 13, 2, 4, 9, 8, 14, 12, 18, 16, 5, 23, 19, 22, 21]) def test_ordering_by_random(self): response_1 = self.get_response(order='random') content_1 = json.loads(response_1.content.decode('UTF-8')) page_id_list_1 = self.get_page_id_list(content_1) response_2 = self.get_response(order='random') content_2 = json.loads(response_2.content.decode('UTF-8')) page_id_list_2 = self.get_page_id_list(content_2) self.assertNotEqual(page_id_list_1, page_id_list_2) def test_ordering_by_random_backwards_gives_error(self): response = self.get_response(order='-random') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"}) def test_ordering_by_random_with_offset_gives_error(self): response = self.get_response(order='random', offset=10) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "random ordering with offset is not supported"}) def test_ordering_default_with_type(self): response = self.get_response(type='demosite.BlogEntryPage') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [16, 18, 19]) def test_ordering_by_title_with_type(self): response = self.get_response(type='demosite.BlogEntryPage', order='title') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [19, 16, 18]) def test_ordering_by_specific_field_with_type(self): response = self.get_response(type='demosite.BlogEntryPage', order='date') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [16, 18, 19]) def test_ordering_by_unknown_field_gives_error(self): response = self.get_response(order='not_a_field') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"}) # LIMIT def test_limit_only_two_items_returned(self): response = self.get_response(limit=2) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(len(content['items']), 2) def test_limit_total_count(self): response = self.get_response(limit=2) content = json.loads(response.content.decode('UTF-8')) # The total count must not be affected by "limit" self.assertEqual(content['meta']['total_count'], get_total_page_count()) def test_limit_not_integer_gives_error(self): response = self.get_response(limit='abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "limit must be a positive integer"}) def test_limit_too_high_gives_error(self): response = self.get_response(limit=1000) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "limit cannot be higher than 20"}) @override_settings(WAGTAILAPI_LIMIT_MAX=None) def test_limit_max_none_gives_no_errors(self): response = self.get_response(limit=1000000) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 200) self.assertEqual(len(content['items']), get_total_page_count()) @override_settings(WAGTAILAPI_LIMIT_MAX=10) def test_limit_maximum_can_be_changed(self): response = self.get_response(limit=20) content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "limit cannot be higher than 10"}) @override_settings(WAGTAILAPI_LIMIT_MAX=2) def test_limit_default_changes_with_max(self): # The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that, # the default should change accordingly. response = self.get_response() content = json.loads(response.content.decode('UTF-8')) self.assertEqual(len(content['items']), 2) # OFFSET def test_offset_5_usually_appears_5th_in_list(self): response = self.get_response() content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list.index(5), 4) def test_offset_5_moves_after_offset(self): response = self.get_response(offset=4) content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list.index(5), 0) def test_offset_total_count(self): response = self.get_response(offset=10) content = json.loads(response.content.decode('UTF-8')) # The total count must not be affected by "offset" self.assertEqual(content['meta']['total_count'], get_total_page_count()) def test_offset_not_integer_gives_error(self): response = self.get_response(offset='abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "offset must be a positive integer"}) # SEARCH def test_search_for_blog(self): response = self.get_response(search='blog') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) # Check that the items are the blog index and three blog pages self.assertEqual(set(page_id_list), set([5, 16, 18, 19])) def test_search_with_type(self): response = self.get_response(type='demosite.BlogEntryPage', search='blog') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(set(page_id_list), set([16, 18, 19])) def test_search_with_filter(self): response = self.get_response(title="Another blog post", search='blog', order='title') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [19]) def test_search_with_filter_on_non_filterable_field(self): response = self.get_response(type='demosite.BlogEntryPage', body="foo", search='blog', order='title') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, { 'message': "cannot filter by 'body' while searching (field is not indexed)" }) def test_search_with_order(self): response = self.get_response(search='blog', order='title') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(page_id_list, [19, 5, 16, 18]) def test_search_with_order_on_non_filterable_field(self): response = self.get_response(type='demosite.BlogEntryPage', search='blog', order='body') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, { 'message': "cannot order by 'body' while searching (field is not indexed)" }) @override_settings(WAGTAILAPI_SEARCH_ENABLED=False) def test_search_when_disabled_gives_error(self): response = self.get_response(search='blog') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "search is disabled"}) def test_search_when_filtering_by_tag_gives_error(self): response = self.get_response(type='demosite.BlogEntryPage', search='blog', tags='wagtail') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"}) def test_search_operator_and(self): response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='and') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(set(page_id_list), set([18])) def test_search_operator_or(self): response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='or') content = json.loads(response.content.decode('UTF-8')) page_id_list = self.get_page_id_list(content) self.assertEqual(set(page_id_list), set([16, 18, 19])) def test_empty_searches_work(self): response = self.get_response(search='') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-type'], 'application/json') self.assertEqual(content['meta']['total_count'], 0) # REGRESSION TESTS def test_issue_3967(self): # The API crashed whenever the listing view was called without a site configured Site.objects.all().delete() response = self.get_response() self.assertEqual(response.status_code, 200) class TestPageDetail(TestCase): fixtures = ['demosite.json'] def get_response(self, page_id, **params): return self.client.get(reverse('wagtailapi_v2:pages:detail', args=(page_id, )), params) def test_basic(self): response = self.get_response(16) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-type'], 'application/json') # Will crash if the JSON is invalid content = json.loads(response.content.decode('UTF-8')) # Check the id field self.assertIn('id', content) self.assertEqual(content['id'], 16) # Check that the meta section is there self.assertIn('meta', content) self.assertIsInstance(content['meta'], dict) # Check the meta type self.assertIn('type', content['meta']) self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage') # Check the meta detail_url self.assertIn('detail_url', content['meta']) self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v2beta/pages/16/') # Check the meta html_url self.assertIn('html_url', content['meta']) self.assertEqual(content['meta']['html_url'], 'http://localhost/blog-index/blog-post/') # Check the parent field self.assertIn('parent', content['meta']) self.assertIsInstance(content['meta']['parent'], dict) self.assertEqual(set(content['meta']['parent'].keys()), {'id', 'meta', 'title'}) self.assertEqual(content['meta']['parent']['id'], 5) self.assertIsInstance(content['meta']['parent']['meta'], dict) self.assertEqual(set(content['meta']['parent']['meta'].keys()), {'type', 'detail_url', 'html_url'}) self.assertEqual(content['meta']['parent']['meta']['type'], 'demosite.BlogIndexPage') self.assertEqual(content['meta']['parent']['meta']['detail_url'], 'http://localhost/api/v2beta/pages/5/') self.assertEqual(content['meta']['parent']['meta']['html_url'], 'http://localhost/blog-index/') # Check that the custom fields are included self.assertIn('date', content) self.assertIn('body', content) self.assertIn('tags', content) self.assertIn('feed_image', content) self.assertIn('related_links', content) self.assertIn('carousel_items', content) # Check that the date was serialised properly self.assertEqual(content['date'], '2013-12-02') # Check that the tags were serialised properly self.assertEqual(content['tags'], ['bird', 'wagtail']) # Check that the feed image was serialised properly self.assertIsInstance(content['feed_image'], dict) self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title'}) self.assertEqual(content['feed_image']['id'], 7) self.assertIsInstance(content['feed_image']['meta'], dict) self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url'}) self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image') self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/api/v2beta/images/7/') # Check that the feed images' thumbnail was serialised properly self.assertEqual(content['feed_image_thumbnail'], { # This is OK because it tells us it used ImageRenditionField to generate the output 'error': 'SourceImageIOError' }) # Check that the child relations were serialised properly self.assertEqual(content['related_links'], []) for carousel_item in content['carousel_items']: self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'embed_url', 'link', 'caption', 'image'}) self.assertEqual(set(carousel_item['meta'].keys()), {'type'}) def test_meta_parent_id_doesnt_show_root_page(self): # Root page isn't in the site so don't show it if the user is looking at the home page response = self.get_response(2) content = json.loads(response.content.decode('UTF-8')) self.assertIsNone(content['meta']['parent']) def test_field_ordering(self): response = self.get_response(16) # Will crash if the JSON is invalid content = json.loads(response.content.decode('UTF-8')) # Test field order content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8')) field_order = [ 'id', 'meta', 'title', 'body', 'tags', 'date', 'feed_image', 'feed_image_thumbnail', 'carousel_items', 'related_links', ] self.assertEqual(list(content.keys()), field_order) def test_null_foreign_key(self): models.BlogEntryPage.objects.filter(id=16).update(feed_image_id=None) response = self.get_response(16) content = json.loads(response.content.decode('UTF-8')) self.assertIn('related_links', content) self.assertEqual(content['feed_image'], None) # FIELDS def test_remove_fields(self): response = self.get_response(16, fields='-title') content = json.loads(response.content.decode('UTF-8')) self.assertIn('id', set(content.keys())) self.assertNotIn('title', set(content.keys())) def test_remove_meta_fields(self): response = self.get_response(16, fields='-html_url') content = json.loads(response.content.decode('UTF-8')) self.assertIn('detail_url', set(content['meta'].keys())) self.assertNotIn('html_url', set(content['meta'].keys())) def test_remove_all_meta_fields(self): response = self.get_response(16, fields='-type,-detail_url,-slug,-first_published_at,-html_url,-search_description,-show_in_menus,-parent,-seo_title') content = json.loads(response.content.decode('UTF-8')) self.assertIn('id', set(content.keys())) self.assertNotIn('meta', set(content.keys())) def test_remove_id_field(self): response = self.get_response(16, fields='-id') content = json.loads(response.content.decode('UTF-8')) self.assertIn('title', set(content.keys())) self.assertNotIn('id', set(content.keys())) def test_remove_all_fields(self): response = self.get_response(16, fields='_,id,type') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(set(content.keys()), {'id', 'meta'}) self.assertEqual(set(content['meta'].keys()), {'type'}) def test_nested_fields(self): response = self.get_response(16, fields='feed_image(width,height)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'}) def test_remove_nested_fields(self): response = self.get_response(16, fields='feed_image(-title)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta'}) def test_all_nested_fields(self): response = self.get_response(16, fields='feed_image(*)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'}) def test_remove_all_nested_fields(self): response = self.get_response(16, fields='feed_image(_,id)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(set(content['feed_image'].keys()), {'id'}) def test_nested_nested_fields(self): response = self.get_response(16, fields='carousel_items(image(width,height))') content = json.loads(response.content.decode('UTF-8')) for carousel_item in content['carousel_items']: # Note: inline objects default to displaying all fields self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'}) self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'}) def test_fields_child_relation_is_list(self): response = self.get_response(16) content = json.loads(response.content.decode('UTF-8')) self.assertIsInstance(content['related_links'], list) def test_fields_foreign_key(self): response = self.get_response(16) content = json.loads(response.content.decode('UTF-8')) feed_image = content['feed_image'] self.assertIsInstance(feed_image, dict) self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'}) self.assertIsInstance(feed_image['id'], int) self.assertIsInstance(feed_image['meta'], dict) self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'}) self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image') self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id']) def test_star_in_wrong_position_gives_error(self): response = self.get_response(16, fields='title,*') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "fields error: '*' must be in the first position"}) def test_unknown_nested_fields_give_error(self): response = self.get_response(16, fields='feed_image(123,title,abc)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: 123, abc"}) def test_fields_which_are_not_in_api_fields_gives_error(self): response = self.get_response(16, fields='path') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: path"}) def test_fields_unknown_field_gives_error(self): response = self.get_response(16, fields='123,title,abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: 123, abc"}) def test_fields_remove_unknown_field_gives_error(self): response = self.get_response(16, fields='-123,-title,-abc') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "unknown fields: 123, abc"}) def test_nested_fields_on_non_relational_field_gives_error(self): response = self.get_response(16, fields='title(foo,bar)') content = json.loads(response.content.decode('UTF-8')) self.assertEqual(response.status_code, 400) self.assertEqual(content, {'message': "'title' does not support nested fields"}) class TestPageDetailWithStreamField(TestCase): fixtures = ['test.json'] def setUp(self): self.homepage = Page.objects.get(url_path='/home/') def make_stream_page(self, body): stream_page = StreamPage( title='stream page', slug='stream-page', body=body ) return self.homepage.add_child(instance=stream_page) def test_can_fetch_streamfield_content(self): stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]') response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, )) response = self.client.get(response_url) self.assertEqual(response.status_code, 200) self.assertEqual(response['content-type'], 'application/json') content = json.loads(response.content.decode('utf-8')) self.assertIn('id', content) self.assertEqual(content['id'], stream_page.id) self.assertIn('body', content) self.assertEqual(len(content['body']), 1) self.assertEqual(content['body'][0]['type'], 'text') self.assertEqual(content['body'][0]['value'], 'foo') self.assertTrue(content['body'][0]['id']) def test_image_block(self): stream_page = self.make_stream_page('[{"type": "image", "value": 1}]') response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, )) response = self.client.get(response_url) content = json.loads(response.content.decode('utf-8')) # ForeignKeys in a StreamField shouldn't be translated into dictionary representation self.assertEqual(content['body'][0]['type'], 'image') self.assertEqual(content['body'][0]['value'], 1) def test_image_block_with_custom_get_api_representation(self): stream_page = self.make_stream_page('[{"type": "image", "value": 1}]') response_url = '{}?extended=1'.format( reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, )) ) response = self.client.get(response_url) content = json.loads(response.content.decode('utf-8')) # the custom get_api_representation returns a dict of id and title for the image self.assertEqual(content['body'][0]['type'], 'image') self.assertEqual(content['body'][0]['value'], {'id': 1, 'title': 'A missing image'}) @override_settings( WAGTAILFRONTENDCACHE={ 'varnish': { 'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend', 'LOCATION': 'http://localhost:8000', }, }, WAGTAILAPI_BASE_URL='http://api.example.com', ) @mock.patch('wagtail.contrib.frontend_cache.backends.HTTPBackend.purge') class TestPageCacheInvalidation(TestCase): fixtures = ['demosite.json'] @classmethod def setUpClass(cls): super(TestPageCacheInvalidation, cls).setUpClass() signal_handlers.register_signal_handlers() @classmethod def tearDownClass(cls): super(TestPageCacheInvalidation, cls).tearDownClass() signal_handlers.unregister_signal_handlers() def test_republish_page_purges(self, purge): Page.objects.get(id=2).save_revision().publish() purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/') def test_unpublish_page_purges(self, purge): Page.objects.get(id=2).unpublish() purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/') def test_delete_page_purges(self, purge): Page.objects.get(id=16).delete() purge.assert_any_call('http://api.example.com/api/v2beta/pages/16/') def test_save_draft_doesnt_purge(self, purge): Page.objects.get(id=2).save_revision() purge.assert_not_called()
# Copyright 2021 Hakan Kjellerstrand hakank@gmail.com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple coloring problem (MIP approach) in OR-tools CP-SAT Solver. Inspired by the GLPK:s model color.mod ''' COLOR, Graph Coloring Problem Written in GNU MathProg by Andrew Makhorin <mao@mai2.rcnet.ru> Given an undirected loopless graph G = (V, E), where V is a set of nodes, E <= V x V is a set of arcs, the Graph Coloring Problem is to find a mapping (coloring) F: V -> C, where C = {1, 2, ... } is a set of colors whose cardinality is as small as possible, such that F(i) != F(j) for every arc (i,j) in E, that is adjacent nodes must be assigned different colors. ''' This is a port of my old OR-tools CP solver coloring_ip.py This model was created by Hakan Kjellerstrand (hakank@gmail.com) Also see my other OR-tols models: http://www.hakank.org/or_tools/ """ from __future__ import print_function from ortools.sat.python import cp_model as cp import math, sys # from cp_sat_utils import * def main(): model = cp.CpModel() # max number of colors # [we know that 4 suffices for normal maps] nc = 5 # number of nodes n = 11 # set of nodes V = list(range(n)) num_edges = 20 # # Neighbours # # This data correspond to the instance myciel3.col from: # http://mat.gsia.cmu.edu/COLOR/instances.html # # Note: 1-based (adjusted below) E = [[1, 2], [1, 4], [1, 7], [1, 9], [2, 3], [2, 6], [2, 8], [3, 5], [3, 7], [3, 10], [4, 5], [4, 6], [4, 10], [5, 8], [5, 9], [6, 11], [7, 11], [8, 11], [9, 11], [10, 11]] # # declare variables # # x[i,c] = 1 means that node i is assigned color c x = {} for v in V: for j in range(nc): x[v, j] = model.NewIntVar(0, 1, 'v[%i,%i]' % (v, j)) # u[c] = 1 means that color c is used, i.e. assigned to some node u = [model.NewIntVar(0, 1, 'u[%i]' % i) for i in range(nc)] # number of colors used, to minimize num_colors = model.NewIntVar(0,nc, "num_colors") model.Add(num_colors == sum(u)) # # constraints # # each node must be assigned exactly one color for i in V: model.Add(sum([x[i, c] for c in range(nc)]) == 1) # adjacent nodes cannot be assigned the same color # (and adjust to 0-based) for i in range(num_edges): for c in range(nc): model.Add(x[E[i][0] - 1, c] + x[E[i][1] - 1, c] <= u[c]) # objective model.Minimize(num_colors) # # solution # solver = cp.CpSolver() status = solver.Solve(model) if status == cp.OPTIMAL: print() print('number of colors:', solver.Value(num_colors)) print('colors used:', [solver.Value(u[i]) for i in range(nc)]) print() for v in V: print('v%i' % v, ' color ', end=' ') for c in range(nc): if solver.Value(x[v, c]) == 1: print(c) print() print('NumConflicts:', solver.NumConflicts()) print('NumBranches:', solver.NumBranches()) print('WallTime:', solver.WallTime()) if __name__ == '__main__': main()
import os import torch from torchvision.utils import make_grid from tensorboardX import SummaryWriter from dataloaders.utils import decode_seg_map_sequence class TensorboardSummary(object): def __init__(self, directory): self.directory = directory def create_summary(self): writer = SummaryWriter(log_dir=os.path.join(self.directory)) return writer def visualize_image(self, writer, dataset, image, target, output, global_step): grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True) writer.add_image('Image', grid_image, global_step) grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255)) writer.add_image('Predicted label', grid_image, global_step) grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(), dataset=dataset), 3, normalize=False, range=(0, 255)) writer.add_image('Groundtruth label', grid_image, global_step)
import logging from logging.handlers import SMTPHandler, RotatingFileHandler import os from flask import Flask, request, current_app from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import LoginManager from flask_mail import Mail from flask_bootstrap import Bootstrap from flask_moment import Moment from flask_babel import Babel, lazy_gettext as _l from elasticsearch import Elasticsearch from redis import Redis import rq from config import Config db = SQLAlchemy() migrate = Migrate() login = LoginManager() login.login_view = 'auth.login' login.login_message = _l('Please log in to access this page.') mail = Mail() bootstrap = Bootstrap() moment = Moment() babel = Babel() def create_app(config_class=Config): app = Flask(__name__) app.config.from_object(config_class) db.init_app(app) migrate.init_app(app, db) login.init_app(app) mail.init_app(app) bootstrap.init_app(app) moment.init_app(app) babel.init_app(app) app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \ if app.config['ELASTICSEARCH_URL'] else None app.redis = Redis.from_url(app.config['REDIS_URL']) app.task_queue = rq.Queue('microblog-tasks', connection=app.redis) from app.errors import bp as errors_bp app.register_blueprint(errors_bp) from app.auth import bp as auth_bp app.register_blueprint(auth_bp, url_prefix='/auth') from app.main import bp as main_bp app.register_blueprint(main_bp) from app.api import bp as api_bp app.register_blueprint(api_bp, url_prefix='/api') @app.route("/hello") def hello(): return "Hello, World!" if not app.debug and not app.testing: if app.config['MAIL_SERVER']: auth = None if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']: auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']) secure = None if app.config['MAIL_USE_TLS']: secure = () mail_handler = SMTPHandler( mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']), fromaddr='no-reply@' + app.config['MAIL_SERVER'], toaddrs=app.config['ADMINS'], subject='Microblog Failure', credentials=auth, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if app.config['LOG_TO_STDOUT']: stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.INFO) app.logger.addHandler(stream_handler) else: if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/microblog.log', maxBytes=10240, backupCount=10) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('Microblog startup') return app @babel.localeselector def get_locale(): return request.accept_languages.best_match(current_app.config['LANGUAGES']) from app import models
""" Given two sorted integer arrays A and B, merge B into A as one sorted array. Note: You may assume that A has enough space (size that is greater or equal to m + n) to hold additional elements from B. The number of elements initialized in A and B are m and n respectively. """ class Solution: # @param A a list of integers # @param m an integer, length of A # @param B a list of integers # @param n an integer, length of B # @return nothing def merge(self, A, m, B, n): i = m - 1 j = n - 1 x = m + n - 1 while i>=0 and j>=0: if A[i] > B[j]: A[x] = A[i] i -= 1 else: A[x] = B[j] j -= 1 x -= 1 while j>=0: A[x] = B[j] x -= 1 j -= 1 # Focus on detail!!!
import json import os import shutil import subprocess from six.moves.urllib import parse from . import utils from .exceptions import GitCommandError def set_repo_version(repo, version): with open(repo.get_metadata_path('version'), 'w') as out: out.write(str(version)) repo.run_git_command( 'add', '-f', repo.get_metadata_path('version'), failure_ok=True, ) repo.run_git_command( 'commit', '-m', 'Upgraded Repository to v%s' % version, failure_ok=True ) def migration_0002(repo, **kwargs): """ Creates shadow repository used for storing remote values """ os.mkdir( repo.get_metadata_path('shadow') ) subprocess.check_call( ( 'git', 'clone', '-q', '../git', '.' ), cwd=repo.get_metadata_path('shadow'), stdout=subprocess.PIPE, ) try: repo.run_git_command('checkout', '-b', 'jira', shadow=True) except GitCommandError: repo.run_git_command('checkout', 'jira', shadow=True) repo.run_git_command( 'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True ) repo.run_git_command('push', 'origin', 'jira', shadow=True) set_repo_version(repo, 2) def migration_0003(repo, init=False, **kwargs): """ Creates a shadow copy of the issue. .. note:: Early versions of this migration improperly created the shadow copy using an absolute path. """ try: os.mkdir(repo.get_shadow_path('.jirafs')) except OSError: pass storable = { 'options': repo.issue._options, 'raw': repo.issue.raw } with open(repo.get_shadow_path('.jirafs/issue.json'), 'w') as out: out.write(json.dumps(storable)) issue_pickle_path = repo.get_shadow_path('.jirafs/issue.json') repo.run_git_command('add', '-f', issue_pickle_path, shadow=True) repo.run_git_command( 'commit', '-m', 'Completing migration_0003', shadow=True ) repo.run_git_command('push', 'origin', 'jira', shadow=True) repo.run_git_command('merge', 'jira') set_repo_version(repo, 3) def migration_0004(repo, **kwargs): """ Moves remote_files.json into version control. """ local_remote_files_path = repo.get_metadata_path('remote_files.json') jira_remote_files_path = repo.get_shadow_path('.jirafs/remote_files.json') try: os.rename(local_remote_files_path, jira_remote_files_path) except (IOError, OSError): with open(jira_remote_files_path, 'w') as out: out.write('{}') repo.run_git_command('add', '-f', jira_remote_files_path, shadow=True) repo.run_git_command( 'commit', '-m', 'Completing migration_0004', shadow=True ) repo.run_git_command('push', 'origin', 'jira', shadow=True) repo.run_git_command('merge', 'jira') set_repo_version(repo, 4) def migration_0005(repo, init=False, **kwargs): """ Dummy migration for RST->Jira format change. Note: TicketFolders older than version 5 cannot be upgraded past version 5; although I had written a migration for this originally, there were a few hard-to-work-around bugs that I decided were not quite important enough. """ if init: set_repo_version(repo, 5) return repo_path = repo.path temp_path = os.path.normpath( os.path.join( repo_path, '../', repo.path.split('/')[-1] + '.tmp' ) ) repo.clone( repo.issue_url, repo.get_jira, temp_path, ) temp_dir = os.listdir(temp_path) for filename in os.listdir(repo_path): if filename not in temp_dir and not filename.endswith('.jira.rst'): shutil.copyfile( os.path.join(repo_path, filename), os.path.join(temp_path, filename), ) shutil.rmtree(repo_path) os.rename(temp_path, repo_path) set_repo_version(repo, 5) def migration_0006(repo, init=False, **kwargs): """ Fix a glitch preventing folders from being completely portable. Early versions of Jirafs would write an absolute path to the ignore file to the local git configuration, but that's not very desirable because if you move the folder, the @stash_local_changes decorator would then wipe out the git repository itself (among other things) after stashing. Whoops; that's embarrassing. """ if init: set_repo_version(repo, 6) return repo.run_git_command( 'config', '--file=%s' % repo.get_metadata_path( 'git', 'config', ), 'core.excludesfile', '.jirafs/gitignore', ) set_repo_version(repo, 6) def migration_0007(repo, init=False, **kwargs): """ Create the plugin metadata directory.""" try: os.mkdir( repo.get_metadata_path( 'plugin_meta', ) ) except OSError: pass with open(repo.get_metadata_path('plugin_meta', '.empty'), 'w') as out: out.write('') repo.run_git_command( 'add', '-f', repo.get_metadata_path('plugin_meta', '.empty',) ) repo.run_git_command( 'commit', '-m', 'Completing migration_0007', failure_ok=True ) set_repo_version(repo, 7) def migration_0008(repo, init=False, **kwargs): """ Commit most of .jirafs folder to git so we can back up. """ if init: set_repo_version(repo, 8) return with open(repo.get_metadata_path('gitignore'), 'w') as out: out.write( '\n'.join( [ '.jirafs/git', '.jirafs/shadow', '.jirafs/operation.log' ] ) ) repo.run_git_command( 'add', '.jirafs/gitignore', ) repo.run_git_command( 'commit', '-m', 'Updating gitignore', failure_ok=True ) files_to_add = [ 'config', 'gitignore', 'issue_url', 'plugin_meta', 'version', ] for filename in files_to_add: repo.run_git_command( 'add', repo.get_metadata_path(filename), failure_ok=True ) set_repo_version(repo, 8) def migration_0009(repo, init=False, **kwargs): """ Re-clone shadow copy so it does not reference an absolute path.""" if init: set_repo_version(repo, 9) shutil.rmtree(repo.get_metadata_path('shadow')) os.mkdir( repo.get_metadata_path('shadow') ) subprocess.check_call( ( 'git', 'clone', '-q', '../git', '.' ), cwd=repo.get_metadata_path('shadow'), stdout=subprocess.PIPE, ) try: repo.run_git_command('checkout', '-b', 'jira', shadow=True) except GitCommandError: repo.run_git_command('checkout', 'jira', shadow=True) repo.run_git_command( 'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True ) repo.run_git_command('push', 'origin', 'jira', shadow=True) set_repo_version(repo, 9) def migration_0010(repo, init=False, **kwargs): """ Make sure that the operation.log and plugin_meta are untracked/tracked. * ``operation.log`` *cannot* be tracked, since if we make a change, followed by a stash pop, operation.log may have encountered changes since then. * ``plugin_meta`` *must* be tracked, or when we pop stash, """ if init: set_repo_version(repo, 10) return with open(repo.get_metadata_path('gitignore'), 'w') as out: out.write( '\n'.join( [ '.jirafs/git', '.jirafs/shadow', '.jirafs/operation.log' ] ) ) repo.run_git_command( 'add', '-f', '.jirafs/gitignore', ) try: os.mkdir( repo.get_metadata_path( 'plugin_meta', ) ) except OSError: # Already exists pass with open(repo.get_metadata_path('plugin_meta', '.empty'), 'w') as out: out.write('') repo.run_git_command( 'add', '-f', repo.get_metadata_path( 'plugin_meta', '.empty' ) ) repo.run_git_command( 'rm', '-f', '--cached', '.jirafs/operation.log', failure_ok=True, ) repo.run_git_command( 'commit', '-m', 'Completing migration_0010', failure_ok=True ) set_repo_version(repo, 10) def migration_0011(repo, init=False, **kwargs): """ Re-clone shadow copy so it does not reference an absolute path. .. note:: The amount of stumbling I've engaged in in managing this shadow copy has been terribly embarassing. Who knew it was so complicated. The TLDR is that you *cannot* use `shared` if you ever want the folder to be portable, since it'll write an absolute path to the repository in your `.jirafs/shadow/.git/objects/info/alternates` file. """ if init: set_repo_version(repo, 11) return shutil.rmtree(repo.get_metadata_path('shadow')) os.mkdir( repo.get_metadata_path('shadow') ) subprocess.check_call( ( 'git', 'clone', '-q', '../git', '.' ), cwd=repo.get_metadata_path('shadow'), stdout=subprocess.PIPE, ) try: repo.run_git_command('checkout', '-b', 'jira', shadow=True) except GitCommandError: repo.run_git_command('checkout', 'jira', shadow=True) repo.run_git_command( 'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True ) repo.run_git_command('push', '-f', 'origin', 'jira', shadow=True) repo.run_git_command('merge', 'jira') set_repo_version(repo, 11) def migration_0012(repo, init=False, **kwargs): """ Force the shadow repository to use a relative URL.""" subprocess.check_call( ( 'git', 'remote', 'set-url', 'origin', '../git' ), cwd=repo.get_metadata_path('shadow'), stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) set_repo_version(repo, 12) def migration_0013(repo, init=False, **kwargs): """ Ensure that folder URL is written to issue_url file.""" if init: set_repo_version(repo, 13) return result = repo.get_ticket_url() if result is not None: set_repo_version(repo, 13) return jira_base = utils.get_default_jira_server() ticket_number = repo.path.split('/')[-1:][0].upper() issue_url = parse.urljoin( jira_base, 'browse/' + ticket_number + '/', ) with open(repo.get_metadata_path('issue_url', 'w')) as out: out.write(issue_url) set_repo_version(repo, 13) def migration_0014(repo, init=False, **kwargs): if init: set_repo_version(repo, 14) return with open(repo.get_metadata_path('git/info/exclude'), 'w') as out: out.write( '\n'.join( [ '.jirafs/git', '.jirafs/shadow', '.jirafs/operation.log' ] ) ) if os.path.exists(repo.get_local_path('.jirafs_ignore')): shutil.copyfile( repo.get_local_path('.jirafs_ignore'), repo.get_local_path('.jirafs_local'), ) repo.run_git_command( 'add', '.jirafs_local', ) if os.path.exists(repo.get_metadata_path('gitignore')): shutil.copyfile( repo.get_metadata_path('gitignore'), repo.get_local_path('.jirafs_ignore') ) repo.run_git_command( 'add', '.jirafs_ignore', ) repo.run_git_command( 'rm', repo.get_metadata_path('gitignore') ) repo.run_git_command( 'config', '--file=%s' % repo.get_metadata_path( 'git', 'config', ), 'core.excludesfile', '.jirafs/combined_ignore', ) tracked_files = repo.run_git_command( 'ls-files', '-c', failure_ok=True ).split('\n') filtered_files = repo.filter_ignored_files( tracked_files, '.jirafs_ignore' ) ignored = repo.filter_ignored_files( set(tracked_files) - set(filtered_files), '.jirafs_local' ) for filename in ignored: repo.run_git_command( 'rm', '--cached', filename, failure_ok=True, shadow=True ) repo.run_git_command( 'commit', '-m', 'Completing migration_0014', failure_ok=True, shadow=True ) set_repo_version(repo, 14) def migration_0015(repo, init=False, **kwargs): """ No-op; was previously something else.""" set_repo_version(repo, 15) def migration_0016(repo, init=False, **kwargs): """ Add the 'macros_applied.patch' file to the repository.""" macro_path = repo.get_metadata_path('macros_applied.patch') if not os.path.exists(macro_path): with open(macro_path, 'w') as out: out.write('') repo.run_git_command('add', '-f', macro_path) repo.run_git_command( 'commit', '-m', 'Completing migration_0015', failure_ok=True ) set_repo_version(repo, 16)
import util import math from Compiler.types import Array, sint, sfloat, sfix, MemValue, cint, Matrix, _int # import operator # import math # from Compiler.instructions import * from Compiler.library import for_range, print_str, for_range, print_float_prec import ml pint = sint pfloat = sfloat pfix = sfix pnum = pfloat print_float_prec(4) # Use to limit the tester workload MAX_DATA_LENGTH = 500 MAX_ML_SIZE = 500 ppcConv2d = ml.FixConv2d ppcMaxPool = ml.MaxPool ppcRelu = ml.Relu ppcDense = ml.Dense def set_display_field_names(name_list): println("result_fields = %s", ' '.join(name_list)) def display_data(field_values): printfmt("result_values =") for value in field_values: printfmt(" %s", value) println() def get_ml_size(shape_array): ml_size = 1 for i in range(1, len(shape_array)): ml_size *= shape_array[i] return ml_size def pConv2d(input_shape, weight_shape, bias_shape, output_shape, stride, padding='SAME', tf_weight_format=False, inputs=None): input_shape_size = get_ml_size(input_shape) if input_shape_size > MAX_ML_SIZE: raise TypeError('input_shape could not larger than %s', MAX_ML_SIZE) bias_shape_size = get_ml_size(bias_shape) if bias_shape_size > MAX_ML_SIZE: raise TypeError('bias_shape could not larger than %s', MAX_ML_SIZE) return ml.FixConv2d(input_shape, weight_shape, bias_shape, output_shape, stride, padding, tf_weight_format=False, inputs=None) def pMaxPool(shape, strides=(1, 2, 2, 1), ksize=(1, 2, 2, 1), padding='VALID'): shape_size = get_ml_size(shape) if shape_size > MAX_ML_SIZE: raise TypeError('shape could not larger than %s', MAX_ML_SIZE) strides_size = get_ml_size(strides) if strides_size > MAX_ML_SIZE: raise TypeError('strides_size could not larger than %s', MAX_ML_SIZE) ksize_size = get_ml_size(ksize) if ksize_size > MAX_ML_SIZE: raise TypeError('ksize_size could not larger than %s', MAX_ML_SIZE) return ml.MaxPool(shape, strides, ksize, padding) def pRelu(shape, inputs=None): shape_size = get_ml_size(shape) if shape_size > MAX_ML_SIZE: raise TypeError('shape could not larger than %s', MAX_ML_SIZE) return ml.Relu(shape, inputs) def pDense(N, d_in, d_out, d=1, activation='id', debug=False): if d_out > MAX_ML_SIZE: raise TypeError('d_out could not larger than %s', MAX_ML_SIZE) return ml.Dense(N, d_in, d_out, d, activation, debug) def read_array(party_id, source_record_count, value_type=pnum): if source_record_count > MAX_DATA_LENGTH: raise TypeError( 'Array length could not larger than %s', MAX_DATA_LENGTH) array_value = Array(source_record_count, value_type) array_value.input_from(party_id) return array_value def max_in_array(array): max_value = MemValue(array[0]) max_index = MemValue(pint(0)) @for_range(1, array.length) def _(i): cond = array[i] > max_value max_index.write(condition(cond, pint(i), max_index.read())) max_value.write(condition(cond, array[i], max_value.read())) return max_value.read(), max_index.read() def min_in_array(array): value = MemValue(array[0]) index = MemValue(pint(0)) @for_range(1, array.length) def _(i): cond = array[i] < value index.write(condition(cond, pint(i), index.read())) value.write(condition(cond, array[i], value.read())) return value.read(), index.read() def combine_array(array1, array2): if array1.value_type != array2.value_type: raise TypeError('Array type does not match') result_array = Array(array1.length+array2.length, array1.value_type) result_array.assign(array1) result_array.assign(array2, array1.length) return result_array def print_array(array): printfmt("[ ") @for_range(array.length) def _(i): printfmt("%s ", array[i].reveal()) println("]") def read_matrix(party_id, height, width, value_type=pnum): if height*width > MAX_DATA_LENGTH: raise TypeError('Matrix size could not larger than %s', MAX_DATA_LENGTH) value = Matrix(height, width, value_type) value.input_from(party_id) return value def print_matrix(matrix): println("[") @for_range(matrix.sizes[0]) def _(i): printfmt(" [ ") @for_range(matrix.sizes[1]) def _(j): printfmt("%s ", matrix[i][j].reveal()) println("]") println("]") def condition(cond, a, b): return util.if_else(cond, a, b) def println(s='', *args): print_str(s + '\n', *args) def printfmt(s='', *args): print_str(s, *args) def to_pint(num): if isinstance(num, pint): return num if isinstance(num, pfloat): num = pfix(num) if isinstance(num, pfix): return num.v >> pfix.f raise NotImplementedError('to_pint only implemented for pfloat and pfix.') def pint_mod(self, other): if isinstance(other, int): l = math.log(other, 2) if 2**int(round(l)) == other: return self.mod2m(int(l)) else: return self - to_pint(pfix(self) / other) * other if isinstance(other, _int): return self - to_pint(pfix(self) / other) * other raise NotImplementedError('Argument modulus should be an integer type.') def pint_div(self, other): if isinstance(other, int): l = math.log(other, 2) if 2**int(round(l)) == other: println("%s, %s, %s", (self >> l).reveal(), self.reveal(), l) return self >> l else: return pfix(self) / other # pfloat sometime produces buggy results, has to use pfix here. if isinstance(other, _int): return pfix(self) / other raise NotImplementedError( 'Argument denominator should be an integer type.') def pint_truediv(self, other): return pnum(pint_div(self, other)) def pint_floordiv(self, other): return to_pint(pint_div(self, other)) pint.__mod__ = pint_mod #pint.__truediv__ = pint_truediv pint.__floordiv__ = pint_floordiv
""" Train a new model. """ import sys import argparse import h5py import datetime import subprocess as sp import numpy as np import pandas as pd import gzip as gz from tqdm import tqdm import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from torch.utils.data import IterableDataset, DataLoader from sklearn.metrics import average_precision_score as average_precision import dscript from dscript.utils import PairedDataset, collate_paired_sequences from dscript.models.embedding import ( IdentityEmbed, FullyConnectedEmbed, ) from dscript.models.contact import ContactCNN from dscript.models.interaction import ModelInteraction def add_args(parser): """ Create parser for command line utility. :meta private: """ data_grp = parser.add_argument_group("Data") proj_grp = parser.add_argument_group("Projection Module") contact_grp = parser.add_argument_group("Contact Module") inter_grp = parser.add_argument_group("Interaction Module") train_grp = parser.add_argument_group("Training") misc_grp = parser.add_argument_group("Output and Device") # Data data_grp.add_argument("--train", help="Training data", required=True) data_grp.add_argument("--val", help="Validation data", required=True) data_grp.add_argument("--embedding", help="h5 file with embedded sequences", required=True) data_grp.add_argument( "--no-augment", action="store_false", dest='augment', help="Set flag to not augment data by adding (B A) for all pairs (A B)", ) # Embedding model proj_grp.add_argument( "--projection-dim", type=int, default=100, help="Dimension of embedding projection layer (default: 100)", ) proj_grp.add_argument( "--dropout-p", type=float, default=0.5, help="Parameter p for embedding dropout layer (default: 0.5)", ) # Contact model contact_grp.add_argument( "--hidden-dim", type=int, default=50, help="Number of hidden units for comparison layer in contact prediction (default: 50)", ) contact_grp.add_argument( "--kernel-width", type=int, default=7, help="Width of convolutional filter for contact prediction (default: 7)", ) # Interaction Model inter_grp.add_argument( "--no-w", action="store_false", dest='use_w', help="Don't use weight matrix in interaction prediction model", ) inter_grp.add_argument( "--pool-width", type=int, default=9, help="Size of max-pool in interaction model (default: 9)", ) # Training train_grp.add_argument( "--negative-ratio", type=int, default=10, help="Number of negative training samples for each positive training sample (default: 10)", ) train_grp.add_argument( "--epoch-scale", type=int, default=1, help="Report heldout performance every this many epochs (default: 1)", ) train_grp.add_argument("--num-epochs", type=int, default=10, help="Number of epochs (default: 10)") train_grp.add_argument("--batch-size", type=int, default=25, help="Minibatch size (default: 25)") train_grp.add_argument("--weight-decay", type=float, default=0, help="L2 regularization (default: 0)") train_grp.add_argument("--lr", type=float, default=0.001, help="Learning rate (default: 0.001)") train_grp.add_argument( "--lambda", dest="lambda_", type=float, default=0.35, help="Weight on the similarity objective (default: 0.35)", ) # Output misc_grp.add_argument("-o", "--outfile", help="Output file path (default: stdout)") misc_grp.add_argument("--save-prefix", help="Path prefix for saving models") misc_grp.add_argument("-d", "--device", type=int, default=-1, help="Compute device to use") misc_grp.add_argument("--checkpoint", help="Checkpoint model to start training from") return parser def predict_interaction(model, n0, n1, tensors, use_cuda): """ Predict whether a list of protein pairs will interact. :param model: Model to be trained :type model: dscript.models.interaction.ModelInteraction :param n0: First protein names :type n0: list[str] :param n1: Second protein names :type n1: list[str] :param tensors: Dictionary of protein names to embeddings :type tensors: dict[str, torch.Tensor] :param use_cuda: Whether to use GPU :type use_cuda: bool """ b = len(n0) p_hat = [] for i in range(b): z_a = tensors[n0[i]] z_b = tensors[n1[i]] if use_cuda: z_a = z_a.cuda() z_b = z_b.cuda() p_hat.append(model.predict(z_a, z_b)) p_hat = torch.stack(p_hat, 0) return p_hat def predict_cmap_interaction(model, n0, n1, tensors, use_cuda): """ Predict whether a list of protein pairs will interact, as well as their contact map. :param model: Model to be trained :type model: dscript.models.interaction.ModelInteraction :param n0: First protein names :type n0: list[str] :param n1: Second protein names :type n1: list[str] :param tensors: Dictionary of protein names to embeddings :type tensors: dict[str, torch.Tensor] :param use_cuda: Whether to use GPU :type use_cuda: bool """ b = len(n0) p_hat = [] c_map_mag = [] for i in range(b): z_a = tensors[n0[i]] z_b = tensors[n1[i]] if use_cuda: z_a = z_a.cuda() z_b = z_b.cuda() cm, ph = model.map_predict(z_a, z_b) p_hat.append(ph) c_map_mag.append(torch.mean(cm)) p_hat = torch.stack(p_hat, 0) c_map_mag = torch.stack(c_map_mag, 0) return c_map_mag, p_hat def interaction_grad(model, n0, n1, y, tensors, use_cuda, weight=0.35): """ Compute gradient and backpropagate loss for a batch. :param model: Model to be trained :type model: dscript.models.interaction.ModelInteraction :param n0: First protein names :type n0: list[str] :param n1: Second protein names :type n1: list[str] :param y: Interaction labels :type y: torch.Tensor :param tensors: Dictionary of protein names to embeddings :type tensors: dict[str, torch.Tensor] :param use_cuda: Whether to use GPU :type use_cuda: bool :param weight: Weight on the contact map magnitude objective. BCE loss is :math:`1 - \\text{weight}`. :type weight: float :return: (Loss, number correct, mean square error, batch size) :rtype: (torch.Tensor, int, torch.Tensor, int) """ c_map_mag, p_hat = predict_cmap_interaction(model, n0, n1, tensors, use_cuda) if use_cuda: y = y.cuda() y = Variable(y) bce_loss = F.binary_cross_entropy(p_hat.float(), y.float()) cmap_loss = torch.mean(c_map_mag) loss = (weight * bce_loss) + ((1 - weight) * cmap_loss) b = len(p_hat) # backprop loss loss.backward() if use_cuda: y = y.cpu() p_hat = p_hat.cpu() with torch.no_grad(): guess_cutoff = 0.5 p_hat = p_hat.float() p_guess = (guess_cutoff * torch.ones(b) < p_hat).float() y = y.float() correct = torch.sum(p_guess == y).item() mse = torch.mean((y.float() - p_hat) ** 2).item() return loss, correct, mse, b def interaction_eval(model, test_iterator, tensors, use_cuda): """ Evaluate test data set performance. :param model: Model to be trained :type model: dscript.models.interaction.ModelInteraction :param test_iterator: Test data iterator :type test_iterator: torch.utils.data.DataLoader :param tensors: Dictionary of protein names to embeddings :type tensors: dict[str, torch.Tensor] :param use_cuda: Whether to use GPU :type use_cuda: bool :return: (Loss, number correct, mean square error, precision, recall, F1 Score, AUPR) :rtype: (torch.Tensor, int, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) """ p_hat = [] true_y = [] for n0, n1, y in test_iterator: p_hat.append(predict_interaction(model, n0, n1, tensors, use_cuda)) true_y.append(y) y = torch.cat(true_y, 0) p_hat = torch.cat(p_hat, 0) if use_cuda: y.cuda() p_hat = torch.Tensor([x.cuda() for x in p_hat]) p_hat.cuda() loss = F.binary_cross_entropy(p_hat.float(), y.float()).item() b = len(y) with torch.no_grad(): guess_cutoff = torch.Tensor([0.5]).float() p_hat = p_hat.float() y = y.float() p_guess = (guess_cutoff * torch.ones(b) < p_hat).float() correct = torch.sum(p_guess == y).item() mse = torch.mean((y.float() - p_hat) ** 2).item() tp = torch.sum(y * p_hat).item() pr = tp / torch.sum(p_hat).item() re = tp / torch.sum(y).item() f1 = 2 * pr * re / (pr + re) y = y.cpu().numpy() p_hat = p_hat.data.cpu().numpy() aupr = average_precision(y, p_hat) return loss, correct, mse, pr, re, f1, aupr def main(args): """ Run training from arguments. :meta private: """ output = args.outfile if output is None: output = sys.stdout else: output = open(output, "w") print(f'# Called as: {" ".join(sys.argv)}', file=output) if output is not sys.stdout: print(f'Called as: {" ".join(sys.argv)}') # Set device device = args.device use_cuda = (device >= 0) and torch.cuda.is_available() if use_cuda: torch.cuda.set_device(device) print( f"# Using CUDA device {device} - {torch.cuda.get_device_name(device)}", file=output, ) else: print("# Using CPU", file=output) device = "cpu" batch_size = args.batch_size train_fi = args.train test_fi = args.val augment = args.augment embedding_h5 = args.embedding h5fi = h5py.File(embedding_h5, "r") print(f"# Loading training pairs from {train_fi}...", file=output) output.flush() train_df = pd.read_csv(train_fi, sep="\t", header=None) if augment: train_n0 = pd.concat((train_df[0], train_df[1]), axis=0).reset_index(drop=True) train_n1 = pd.concat((train_df[1], train_df[0]), axis=0).reset_index(drop=True) train_y = torch.from_numpy(pd.concat((train_df[2], train_df[2])).values) else: train_n0, train_n1 = train_df[0], train_df[1] train_y = torch.from_numpy(train_df[2].values) print(f"# Loading testing pairs from {test_fi}...", file=output) output.flush() test_df = pd.read_csv(test_fi, sep="\t", header=None) test_n0, test_n1 = test_df[0], test_df[1] test_y = torch.from_numpy(test_df[2].values) output.flush() train_pairs = PairedDataset(train_n0, train_n1, train_y) pairs_train_iterator = torch.utils.data.DataLoader( train_pairs, batch_size=batch_size, collate_fn=collate_paired_sequences, shuffle=True, ) test_pairs = PairedDataset(test_n0, test_n1, test_y) pairs_test_iterator = torch.utils.data.DataLoader( test_pairs, batch_size=batch_size, collate_fn=collate_paired_sequences, shuffle=True, ) output.flush() print(f"# Loading embeddings", file=output) tensors = {} all_proteins = set(train_n0).union(set(train_n1)).union(set(test_n0)).union(set(test_n1)) for prot_name in tqdm(all_proteins): tensors[prot_name] = torch.from_numpy(h5fi[prot_name][:, :]) use_cuda = (args.device > -1) and torch.cuda.is_available() if args.checkpoint is None: projection_dim = args.projection_dim dropout_p = args.dropout_p embedding = FullyConnectedEmbed(6165, projection_dim, dropout=dropout_p) print("# Initializing embedding model with:", file=output) print(f"\tprojection_dim: {projection_dim}", file=output) print(f"\tdropout_p: {dropout_p}", file=output) # Create contact model hidden_dim = args.hidden_dim kernel_width = args.kernel_width print("# Initializing contact model with:", file=output) print(f"\thidden_dim: {hidden_dim}", file=output) print(f"\tkernel_width: {kernel_width}", file=output) contact = ContactCNN(projection_dim, hidden_dim, kernel_width) # Create the full model use_W = args.use_w pool_width = args.pool_width print("# Initializing interaction model with:", file=output) print(f"\tpool_width: {pool_width}", file=output) print(f"\tuse_w: {use_W}", file=output) model = ModelInteraction(embedding, contact, use_W=use_W, pool_size=pool_width) print(model, file=output) else: print("# Loading model from checkpoint {}".format(args.checkpoint), file=output) model = torch.load(args.checkpoint) model.use_cuda = use_cuda if use_cuda: model = model.cuda() # Train the model lr = args.lr wd = args.weight_decay num_epochs = args.num_epochs batch_size = args.batch_size report_steps = args.epoch_scale inter_weight = args.lambda_ cmap_weight = 1 - inter_weight digits = int(np.floor(np.log10(num_epochs))) + 1 save_prefix = args.save_prefix if save_prefix is None: save_prefix = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M") params = [p for p in model.parameters() if p.requires_grad] optim = torch.optim.Adam(params, lr=lr, weight_decay=wd) print(f'# Using save prefix "{save_prefix}"', file=output) print(f"# Training with Adam: lr={lr}, weight_decay={wd}", file=output) print(f"\tnum_epochs: {num_epochs}", file=output) print(f"\tepoch_scale: {report_steps}", file=output) print(f"\tbatch_size: {batch_size}", file=output) print(f"\tinteraction weight: {inter_weight}", file=output) print(f"\tcontact map weight: {cmap_weight}", file=output) output.flush() batch_report_fmt = "# [{}/{}] training {:.1%}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}" epoch_report_fmt = "# Finished Epoch {}/{}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}, Precision={:.6}, Recall={:.6}, F1={:.6}, AUPR={:.6}" N = len(pairs_train_iterator) * batch_size for epoch in range(num_epochs): model.train() n = 0 loss_accum = 0 acc_accum = 0 mse_accum = 0 # Train batches for (z0, z1, y) in tqdm(pairs_train_iterator, desc=f"Epoch {epoch+1}/{num_epochs}",total=len(pairs_train_iterator)): loss, correct, mse, b = interaction_grad(model, z0, z1, y, tensors, use_cuda, weight=inter_weight) n += b delta = b * (loss - loss_accum) loss_accum += delta / n delta = correct - b * acc_accum acc_accum += delta / n delta = b * (mse - mse_accum) mse_accum += delta / n report = (n - b) // 100 < n // 100 optim.step() optim.zero_grad() model.clip() if report: tokens = [ epoch + 1, num_epochs, n / N, loss_accum, acc_accum, mse_accum, ] if output is not sys.stdout: print(batch_report_fmt.format(*tokens), file=output) output.flush() if (epoch + 1) % report_steps == 0: model.eval() with torch.no_grad(): ( inter_loss, inter_correct, inter_mse, inter_pr, inter_re, inter_f1, inter_aupr, ) = interaction_eval(model, pairs_test_iterator, tensors, use_cuda) tokens = [ epoch + 1, num_epochs, inter_loss, inter_correct / (len(pairs_test_iterator) * batch_size), inter_mse, inter_pr, inter_re, inter_f1, inter_aupr, ] print(epoch_report_fmt.format(*tokens), file=output) output.flush() # Save the model if save_prefix is not None: save_path = save_prefix + "_epoch" + str(epoch + 1).zfill(digits) + ".sav" print(f"# Saving model to {save_path}", file=output) model.cpu() torch.save(model, save_path) if use_cuda: model.cuda() output.flush() if save_prefix is not None: save_path = save_prefix + "_final.sav" print(f"# Saving final model to {save_path}", file=output) model.cpu() torch.save(model, save_path) if use_cuda: model.cuda() output.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) add_args(parser) main(parser.parse_args())
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="pr2roc", version="0.0.1", author="Ameya Daigavane", author_email="ameya.d.98@gmail.com", description="A package to resample precision-recall curves correctly.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/ameya98/pr2roc", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=2.7', )
import requests from abc import ABC, abstractmethod from typing import Tuple, List import json class CoordinateConverter(ABC): def __init__(self): super().__init__() @abstractmethod def convert_coordinate(self, coordinate: Tuple, base_system_code, target_system_code): pass @abstractmethod def convert_multiple_coordinates(self, coordinates: List[Tuple], base_system_code, target_system_code): pass class EpsgCoordinateConverter(CoordinateConverter): def __init__(self): super().__init__() self.base_url = 'http://epsg.io/trans?' def convert_coordinate(self, coordinate: Tuple, base_system_code: str, target_system_code: str): """ :param coordinate: tuple of 2 or 3 coordinate :param base_system_code: source system code in epsg in string format (ESPG:3879 -> 3879) :param target_system_code: target system code :return: Converted coordinates """ if len(coordinate) < 2 or len(coordinate) > 3: raise ValueError('Coordinate must be a tuple contains (x, y) or (x, y, z) coordinates') if len(coordinate) == 2: query = f"x={coordinate[0]}&y={coordinate[1]}" else: query = f"x={coordinate[0]}&y={coordinate[1]}&z={coordinate[2]}" query += f"&s_srs={base_system_code}&t_srs={target_system_code}" r = requests.get(self.base_url + query) r.raise_for_status() result_as_json = json.loads(r.content.decode('latin1')) return result_as_json['x'], result_as_json['y'] def convert_multiple_coordinates(self, coordinates: List[Tuple], base_system_code, target_system_code): """ :param coordinates: list of tuple of 2 or 3 coordinate :param base_system_code: source system code in epsg in string format (ESPG:3879 -> 3879) :param target_system_code: target system code :return: List of converted coordinates """ if len(coordinates[0]) < 2 or len(coordinates[0]) > 3: raise ValueError('Coordinates must be a list of tuple contains (x, y) or (x, y, z) coordinates') query = 'data=' for idx, coor in enumerate(coordinates): query += ','.join([str(c) for c in coor]) if idx != len(coor) - 1: query += ';' query += f"&s_srs={base_system_code}&t_srs={target_system_code}" r = requests.get(self.base_url + query) r.raise_for_status() result_as_json = json.loads(r.content.decode('latin1')) if len(coordinates[0]) == 2: results = [(t['x'], t['y']) for t in result_as_json] else: results = [(t['x'], t['y'], t['z']) for t in result_as_json] return results
#!/usr/bin/env python3 import fileinput import hashlib hash = None with fileinput.input() as fp: hash = fp.readline().strip() res = None i = 0 zeros = 5 while True: s = f'{hash}{str(i)}' h = hashlib.md5(s.encode()) res = h.hexdigest() if res.startswith('0'*zeros): break; i += 1 print(i) print(res)
"""Verify DeadLetter handling behavior. Current behavior is that an Actor may register for DeadLetter handling. If it is registered, any message sent to an Actor that is no longer present will be redirected to the register DeadLetter actor (in its original form). On exit of the DeadLetter handling Actor, the system reverts to the default where dead letters are discarded. If another Actor registers for DeadLetter handling, the new registration will supercede the old registration. The original handler is not aware of this, and will no longer receive DeadLetters, even if the new handler de-registers. Dead letters are handled by the local ActorSystem. Even if the parent of an Actor is located in a separate system, the DeadLetter handler is in the local System. """ import time from thespian.actors import * from thespian.test import * from datetime import timedelta ASK_WAIT = timedelta(seconds=15) dead_routing_wait = lambda: inTestDelay(timedelta(milliseconds=125)) actor_exit_wait = lambda: inTestDelay(timedelta(milliseconds=50)) actor_create_wait = lambda: inTestDelay(timedelta(milliseconds=750)) actor_do_stuff_wait = lambda: inTestDelay(timedelta(milliseconds=500)) class DLHandler(Actor): def receiveMessage(self, msg, sender): if msg == 'Start': self.handleDeadLetters() elif msg == 'Stop': self.handleDeadLetters(False) elif msg == 'Count': self.send(sender, getattr(self, 'numDeadLetters', 0)) elif isinstance(msg, ActorExitRequest): pass else: # got a dead letter self.numDeadLetters = getattr(self, 'numDeadLetters', 0) + 1 class DLParent(Actor): def receiveMessage(self, msg, sender): if not isinstance(msg, ActorSystemMessage): # or isinstance(msg, DeadEnvelope): if not getattr(self, 'dlchild', None): self.dlchild = self.createActor(DLHandler) if self.dlchild == sender: # Upward self.send(self.lastSender, msg) else: # Downward self.lastSender = sender if msg == 'exit please': self.send(self.dlchild, ActorExitRequest()) else: self.send(self.dlchild, msg) # UDP does not provide the ability to validate delivery of messages # (outside of higher-level validation handshakes), so this system base # cannot support Dead Lettering (as documented). class TestFuncDeadLettering(object): def checkNewDLCount(self, asys, handlerAddress, oldCount): #asys = ActorSystem() cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT) retries = 30 while cnt <= oldCount and retries: retries -= 1 dead_routing_wait() cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT) assert cnt > oldCount return cnt def test01_registerDeadLetter(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLHandler) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Stop') assert 0 == asys.ask(handler, 'Count', ASK_WAIT) def test11_registerDeadLetterSubActor(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLParent) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Stop') assert 0 == asys.ask(handler, 'Count', ASK_WAIT) def test02_GetDeadLetter(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLHandler) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') cnt = self.checkNewDLCount(asys, handler, -1) pawn = asys.createActor(DLHandler) asys.tell(pawn, ActorExitRequest()) actor_exit_wait() asys.tell(pawn, 'hello') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'hi') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(handler, 'Stop') actor_exit_wait() asys.tell(pawn, 'another') assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(pawn, 'and another') assert cnt == asys.ask(handler, 'Count', ASK_WAIT) def test12_GetDeadLetterSubActor(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLParent) r = asys.ask(handler, 'Count', ASK_WAIT) assert 0 == r asys.tell(handler, 'Start') cnt = self.checkNewDLCount(asys, handler, -1) pawn = asys.createActor(DLParent) asys.tell(pawn, 'exit please') actor_create_wait() asys.tell(pawn, 'hello') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'hi') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(handler, 'Stop') actor_exit_wait() asys.tell(pawn, 'another') r = asys.ask(handler, 'Count', ASK_WAIT) assert cnt == r asys.tell(pawn, 'and another') r = asys.ask(handler, 'Count', ASK_WAIT) assert cnt == r def test03_DLRegisterOnlyOnce(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLHandler) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') cnt = self.checkNewDLCount(asys, handler, -1) # Create another actor and shut it down so we can capture its dead letters pawn = asys.createActor(DLHandler) asys.tell(pawn, ActorExitRequest()) actor_do_stuff_wait() # Send a couple of messages and verify they are each passed to the dead letter handler asys.tell(pawn, 'hello') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'hi') cnt = self.checkNewDLCount(asys, handler, cnt) # Another start has no effect; remains the dead letter handler. asys.tell(handler, 'Start') actor_do_stuff_wait() # Send another couple of messages to the dead actor and verify dead letter receipt. asys.tell(pawn, 'another') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'and another') cnt = self.checkNewDLCount(asys, handler, cnt) def test13_DLRegisterOnlyOnce(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLParent) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') cnt = self.checkNewDLCount(asys, handler, -1) # Create another actor and shut it down so we can capture its dead letters pawn = asys.createActor(DLParent) asys.tell(pawn, ActorExitRequest()) actor_exit_wait() # Send a couple of messages and verify they are each passed to the dead letter handler asys.tell(pawn, 'hello') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'hi') cnt = self.checkNewDLCount(asys, handler, cnt) # Another start has no effect; remains the dead letter handler. asys.tell(handler, 'Start') actor_do_stuff_wait() # Send another couple of messages to the dead actor and verify dead letter receipt. asys.tell(pawn, 'another') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'and another') cnt = self.checkNewDLCount(asys, handler, cnt) def test04_DLMultipleHandlers(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLHandler) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') cnt = self.checkNewDLCount(asys, handler, -1) pawn = asys.createActor(DLHandler) asys.tell(pawn, ActorExitRequest()) actor_exit_wait() asys.tell(pawn, 'hello') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'hi') cnt = self.checkNewDLCount(asys, handler, cnt) handler2 = asys.createActor(DLHandler) asys.tell(handler2, 'Start') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert 0 == asys.ask(handler2, 'Count', ASK_WAIT) cnt2 = self.checkNewDLCount(asys, handler2, -1) asys.tell(pawn, 'another') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(pawn, 'and another') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Stop') # no effect actor_do_stuff_wait() asys.tell(pawn, 'more messages') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages again') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler2, 'Stop') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages repeated') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages again repeated') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(handler, 'Start') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages repeated reprised') cnt = self.checkNewDLCount(asys, handler, cnt) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages again repeated reprised') cnt = self.checkNewDLCount(asys, handler, cnt) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) def test14_DLMultipleHandlers(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLParent) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') cnt = self.checkNewDLCount(asys, handler, -1) pawn = asys.createActor(DLParent) asys.tell(pawn, ActorExitRequest()) actor_exit_wait() asys.tell(pawn, 'hello') cnt = self.checkNewDLCount(asys, handler, cnt) asys.tell(pawn, 'hi') cnt = self.checkNewDLCount(asys, handler, cnt) handler2 = asys.createActor(DLParent) asys.tell(handler2, 'Start') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert 0 == asys.ask(handler2, 'Count', ASK_WAIT) cnt2 = self.checkNewDLCount(asys, handler2, -1) asys.tell(pawn, 'another') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(pawn, 'and another') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Stop') # no effect actor_do_stuff_wait() asys.tell(pawn, 'more messages') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages again') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler2, 'Stop') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages repeated') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages again repeated') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(handler, 'Start') actor_do_stuff_wait() assert cnt == asys.ask(handler, 'Count', ASK_WAIT) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages repeated reprised') cnt = self.checkNewDLCount(asys, handler, cnt) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) asys.tell(pawn, 'more messages again repeated reprised') cnt = self.checkNewDLCount(asys, handler, cnt) assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT) def test05_DLAutoRemoval(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLHandler) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') handler2 = asys.createActor(DLHandler) asys.tell(handler2, 'Start') assert 0 == asys.ask(handler, 'Count', ASK_WAIT) assert 0 == asys.ask(handler2, 'Count', ASK_WAIT) # Create actor and kill it so messages to it it will be dead-letter routed. pawn = asys.createActor(DLHandler) asys.tell(pawn, ActorExitRequest()) actor_exit_wait() # Send a message ane make sure the later dead-letter handler receives it cnt = 0 cnt2 = 0 asys.tell(pawn, 'hello') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) # Again, to ensure no round-robining is occurring asys.tell(pawn, 'hi') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) # Now remove dead letter handler; ensure dead letters are dropped asys.tell(handler2, ActorExitRequest()) actor_exit_wait() assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(pawn, 'another') actor_do_stuff_wait() assert 0 == asys.ask(handler, 'Count', ASK_WAIT) # Tell first dead letter handler to re-register asys.tell(handler, 'Start') # n.b. tell or ask might create temporary actor, so can't assume startnum == 0 cnt = asys.ask(handler, 'Count', ASK_WAIT) # Verify first dead letter handler is getting dead letters again asys.tell(pawn, 'another again') cnt = self.checkNewDLCount(asys, handler, cnt) def test15_DLAutoRemoval(self, asys, run_unstable_tests): unstable_test(run_unstable_tests, asys, 'multiprocUDPBase') handler = asys.createActor(DLParent) assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(handler, 'Start') handler2 = asys.createActor(DLParent) asys.tell(handler2, 'Start') actor_do_stuff_wait() assert 0 == asys.ask(handler, 'Count', ASK_WAIT) assert 0 == asys.ask(handler2, 'Count', ASK_WAIT) # Create actor and kill it so messages to it it will be dead-letter routed. pawn = asys.createActor(DLParent) asys.tell(pawn, ActorExitRequest()) actor_exit_wait() # Send a message and make sure the later dead-letter handler receives it cnt = 0 cnt2 = 0 asys.tell(pawn, 'hello') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) # Again, to ensure no round-robining is occurring asys.tell(pawn, 'hi') cnt2 = self.checkNewDLCount(asys, handler2, cnt2) assert cnt == asys.ask(handler, 'Count', ASK_WAIT) # Now remove dead letter handler; ensure dead letters are dropped asys.tell(handler2, ActorExitRequest()) actor_exit_wait() assert 0 == asys.ask(handler, 'Count', ASK_WAIT) asys.tell(pawn, 'another') actor_do_stuff_wait() assert 0 == asys.ask(handler, 'Count', ASK_WAIT) # Tell first dead letter handler to re-register asys.tell(handler, 'Start') actor_do_stuff_wait() # n.b. tell or ask might create temporary actor, so can't assume startnum == 0 cnt = asys.ask(handler, 'Count', ASK_WAIT) # Verify first dead letter handler is getting dead letters again asys.tell(pawn, 'another again') cnt = self.checkNewDLCount(asys, handler, cnt) #KWQ: test multiple actor systems
from part1 import ( gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new, ) """ scenario: test_random_actions uuid: 366414300 """ """ random actions, total chaos """ board = gamma_new(5, 4, 4, 1) assert board is not None assert gamma_move(board, 1, 2, 0) == 1 assert gamma_free_fields(board, 1) == 3 assert gamma_golden_possible(board, 1) == 0 assert gamma_move(board, 2, 2, 1) == 1 assert gamma_move(board, 3, 2, 1) == 0 assert gamma_move(board, 3, 1, 1) == 1 assert gamma_move(board, 4, 2, 3) == 1 assert gamma_move(board, 2, 2, 0) == 0 assert gamma_move(board, 2, 3, 0) == 0 assert gamma_move(board, 3, 0, 4) == 0 assert gamma_move(board, 4, 4, 2) == 0 assert gamma_move(board, 4, 1, 1) == 0 assert gamma_move(board, 1, 0, 4) == 0 assert gamma_busy_fields(board, 1) == 1 assert gamma_move(board, 2, 1, 1) == 0 assert gamma_move(board, 3, 1, 0) == 1 assert gamma_move(board, 1, 1, 3) == 0 assert gamma_move(board, 2, 2, 2) == 1 assert gamma_move(board, 3, 1, 0) == 0 assert gamma_move(board, 3, 4, 0) == 0 assert gamma_move(board, 4, 3, 4) == 0 assert gamma_move(board, 4, 0, 1) == 0 assert gamma_move(board, 1, 2, 4) == 0 assert gamma_move(board, 1, 4, 3) == 0 board162686102 = gamma_board(board) assert board162686102 is not None assert board162686102 == ("..4..\n" "..2..\n" ".32..\n" ".31..\n") del board162686102 board162686102 = None assert gamma_move(board, 2, 1, 3) == 0 assert gamma_move(board, 2, 0, 3) == 0 assert gamma_free_fields(board, 2) == 3 assert gamma_move(board, 3, 1, 1) == 0 assert gamma_move(board, 4, 3, 1) == 0 assert gamma_move(board, 1, 3, 4) == 0 assert gamma_move(board, 2, 1, 4) == 0 assert gamma_move(board, 2, 3, 0) == 0 assert gamma_busy_fields(board, 2) == 2 assert gamma_move(board, 3, 2, 0) == 0 assert gamma_move(board, 3, 1, 1) == 0 assert gamma_move(board, 4, 0, 0) == 0 assert gamma_move(board, 1, 0, 3) == 0 assert gamma_move(board, 1, 1, 0) == 0 assert gamma_move(board, 2, 2, 1) == 0 assert gamma_move(board, 2, 4, 0) == 0 assert gamma_move(board, 3, 0, 0) == 1 assert gamma_move(board, 4, 2, 3) == 0 assert gamma_move(board, 2, 2, 0) == 0 assert gamma_move(board, 3, 1, 0) == 0 assert gamma_golden_possible(board, 3) == 1 assert gamma_move(board, 4, 4, 0) == 0 assert gamma_move(board, 1, 0, 2) == 0 assert gamma_move(board, 1, 3, 0) == 1 assert gamma_move(board, 2, 3, 0) == 0 assert gamma_move(board, 3, 3, 3) == 0 assert gamma_move(board, 3, 4, 1) == 0 assert gamma_golden_possible(board, 3) == 1 assert gamma_move(board, 4, 3, 1) == 0 assert gamma_move(board, 4, 3, 2) == 0 assert gamma_move(board, 1, 3, 1) == 1 assert gamma_move(board, 3, 2, 1) == 0 assert gamma_move(board, 4, 3, 0) == 0 gamma_delete(board)
import misc_tools import random def create_routing(env, first_step='op1'): tasks = { 'op1': misc_tools.make_assembly_step( env=env, run_time=random.gauss(mu=12, sigma=0.5), route_to='op2'), 'op2': { 'location': env['machine_3'], 'worker': env['technician'], 'manned': False, 'setup_time': random.uniform(a=2, b=5), 'run_time': random.gauss(mu=15, sigma=0.25), 'teardown_time': 0, 'transit_time': 1, 'yield': 0.85, 'route_to_pass': 'op3', 'route_to_fail': 'rework' }, 'op3': { 'location': env['common_process'], 'worker': env['technician'], 'manned': True, 'setup_time': random.triangular(low=1, high=4, mode=2), 'run_time': random.gauss(mu=2, sigma=0.5), 'teardown_time': random.uniform(a=1, b=2), 'transit_time': 1, 'route_to': env['part_c_storage'] }, 'rework': { 'location': env['assembly_bench'], 'worker': env['assembler'], 'manned': True, 'setup_time': 0, 'run_time': random.expovariate(lambd=0.5)*15, 'teardown_time': 0, 'transit_time': 1, 'fail_count': 2, 'route_to_pass': 'op2', 'route_to_fail': env['scrap_storage'] } } return misc_tools.make_steps(first_step=first_step, tasks=tasks) def get_bom(env): return { 'part_a': { 'location': env['part_a_kanban'], 'qty': 1 }, 'part_b': { 'location': env['part_b_kanban'], 'qty': 2 } }
from irnl_rdt_correction.irnl_rdt_correction import main, log_setup if __name__ == '__main__': log_setup() main()
""" Reolink Camera API """
# coding:utf-8 # modified from: https://github.com/haqishen/MFNet-pytorch # By Yuxiang Sun, Aug. 2, 2019 # Email: sun.yuxiang@outlook.com import os import argparse import time import datetime import numpy as np import sys import torch from torch.autograd import Variable from torch.utils.data import DataLoader from util.MF_dataset import MF_dataset from model import RTFNet from sklearn.metrics import confusion_matrix n_class = 9 data_dir = './dataset/' model_dir = './weights_backup/' def main(): conf_total = np.zeros((n_class,n_class)) model = eval(args.model_name)(n_class=n_class) if args.gpu >= 0: model.cuda(args.gpu) print('| loading model file %s... ' % model_file) pretrained_weight = torch.load(model_file, map_location = lambda storage, loc: storage.cuda(args.gpu)) own_state = model.state_dict() for name, param in pretrained_weight.items(): if name not in own_state: continue own_state[name].copy_(param) print('done!') test_dataset = MF_dataset(data_dir, args.dataset_name, have_label=True, input_h=args.img_height, input_w=args.img_width) test_loader = DataLoader( dataset = test_dataset, batch_size = batch_size, shuffle = False, num_workers = args.num_workers, pin_memory = True, drop_last = False ) test_loader.n_iter = len(test_loader) ave_time_cost = 0.0 model.eval() with torch.no_grad(): for it, (images, labels, names) in enumerate(test_loader): images = Variable(images) labels = Variable(labels) if args.gpu >= 0: images = images.cuda(args.gpu) labels = labels.cuda(args.gpu) start_time = time.time() logits = model(images) # logits.size(): mini_batch*num_class*480*640 end_time = time.time() if it>10: # # ignore the first 10 frames ave_time_cost += (end_time-start_time) # convert tensor to numpy 1d array label = labels.cpu().numpy().squeeze().flatten() prediction = logits.argmax(1).cpu().numpy().squeeze().flatten() # prediction and label are both 1-d array, size: minibatch*640*480 # generate confusion matrix frame-by-frame conf = confusion_matrix(label, prediction, [0,1,2,3,4,5,6,7,8]) # conf is an n_class*n_class matrix, vertical axis: groundtruth, horizontal axis: prediction conf_total += conf print("| frame %d/%d, time cost: %.2f ms" %(it+1, test_loader.n_iter, (end_time-start_time)*1000)) # calculate recall (Acc) and IoU for each class recall_per_class = np.zeros(n_class) iou_per_class = np.zeros(n_class) for cid in range(0, n_class): # cid: class id if conf_total[cid, 0:].sum() == 0: recall_per_class[cid] = np.nan else: recall_per_class[cid] = float(conf_total[cid, cid]) / float(conf_total[cid, 0:].sum()) # recall (Acc) = TP/TP+FN if (conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid]) == 0: iou_per_class[cid] = np.nan else: iou_per_class[cid] = float(conf_total[cid, cid]) / float((conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid])) # IoU = TP/TP+FP+FN print('\n###########################################################################') print('\n| %s: %s test results (with batch size %d) on %s using %s:' %(args.model_name, args.weight_name, batch_size, datetime.date.today(), torch.cuda.get_device_name(args.gpu))) print('\n| * the tested dataset name: %s' % args.dataset_name) print('| * the tested image count: %d' % test_loader.n_iter) print('| * the tested image size: %d*%d' %(args.img_height, args.img_width)) print("| * recall per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \ %(recall_per_class[0], recall_per_class[1], recall_per_class[2], recall_per_class[3], recall_per_class[4], recall_per_class[5], recall_per_class[6], recall_per_class[7], recall_per_class[8])) print("| * iou per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \ %(iou_per_class[0], iou_per_class[1], iou_per_class[2], iou_per_class[3], iou_per_class[4], iou_per_class[5], iou_per_class[6], iou_per_class[7], iou_per_class[8])) print("\n| * average values (np.mean(x)): \n recall: %.6f, iou: %.6f" \ %(recall_per_class.mean(), iou_per_class.mean())) print("| * average values (np.mean(np.nan_to_num(x))): \n recall: %.6f, iou: %.6f" \ %(np.mean(np.nan_to_num(recall_per_class)), np.mean(np.nan_to_num(iou_per_class)))) print('\n| * the average time cost per frame (with batch size %d): %.2f ms, namely, the inference speed is %.2f fps' %(batch_size, ave_time_cost*1000/(test_loader.n_iter-11), 1.0/(ave_time_cost/(test_loader.n_iter-11)))) # ignore the first 10 frames #print('\n| * the total confusion matrix: ') #np.set_printoptions(precision=8, threshold=np.inf, linewidth=np.inf, suppress=True) #print(conf_total) print('\n###########################################################################') if __name__ == '__main__': parser = argparse.ArgumentParser(description='Test with pytorch') parser.add_argument('--model_name', '-M', type=str, default='RTFNet') parser.add_argument('--weight_name', '-W', type=str, default='RTFNet_152') # RTFNet_152, RTFNet_50, please change the number of layers in the network file parser.add_argument('--dataset_name', '-D', type=str, default='test') # test, test_day, test_night parser.add_argument('--img_height', '-IH', type=int, default=480) parser.add_argument('--img_width', '-IW', type=int, default=640) parser.add_argument('--gpu', '-G', type=int, default=0) parser.add_argument('--num_workers', '-j', type=int, default=8) args = parser.parse_args() batch_size = 1 # do not change this parameter! torch.cuda.set_device(args.gpu) print("\n| the gpu count:", torch.cuda.device_count()) print("| the current used gpu:", torch.cuda.current_device(), '\n') model_dir = os.path.join(model_dir, args.weight_name) # model_dir = './weights_backup/' if os.path.exists(model_dir) is False: print("| the %s does not exit." %(model_dir)) sys.exit() model_file = os.path.join(model_dir, 'final.pth') if os.path.exists(model_file) is True: print('| use the final model file.') else: print('| no model file found.') sys.exit() print('| testing %s: %s on GPU #%d with pytorch' % (args.model_name, args.weight_name, args.gpu)) main()
# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ import os import unittest from txnmain.validator_cli import get_configuration class TestValidatorCLI(unittest.TestCase): def test_currency_home(self): os.environ.clear() os.environ["CURRENCYHOME"] = "/test_path" cfg = get_configuration(args=[], config_files_required=False) self.assertIn("CurrencyHome", cfg) self.assertEquals(cfg["CurrencyHome"], "/test_path") self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc") self.assertEquals(cfg["LogDirectory"], "/test_path/logs") self.assertEquals(cfg["DataDirectory"], "/test_path/data") def test_default_config_posix(self): os.environ.clear() cfg = get_configuration(args=[], os_name='posix', config_files_required=False) self.assertNotIn("CurrencyHome", cfg) self.assertEquals(cfg["ConfigDirectory"], "/etc/sawtooth-validator") self.assertEquals(cfg["LogDirectory"], "/var/log/sawtooth-validator") self.assertEquals(cfg["DataDirectory"], "/var/lib/sawtooth-validator") def test_default_config_nt(self): os.environ.clear() cfg = get_configuration(args=[], os_name='nt', config_files_required=False) self.assertNotIn("CurrencyHome", cfg) self.assertEquals( cfg["ConfigDirectory"], "C:\\Program Files (x86)\\Intel\\sawtooth-validator\\conf") self.assertEquals( cfg["LogDirectory"], "C:\\Program Files (x86)\\Intel\\sawtooth-validator\\logs") self.assertEquals( cfg["DataDirectory"], "C:\\Program Files (x86)\\Intel\\sawtooth-validator\\data") def test_logconfig_arg(self): os.environ.clear() cfg = get_configuration(args=["--log-config=Logging.js"], config_files_required=False) self.assertIn("LogConfigFile", cfg) self.assertEquals(cfg["LogConfigFile"], "Logging.js") def test_options_mapping_conf_dir(self): os.environ.clear() cfg = get_configuration(args=["--conf-dir=/test_path/etc"], config_files_required=False) self.assertIn("ConfigDirectory", cfg) self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc") def test_options_mapping_data_dir(self): os.environ.clear() cfg = get_configuration(args=["--data-dir=/test_path/data"], config_files_required=False) self.assertIn("DataDirectory", cfg) self.assertEquals(cfg["DataDirectory"], "/test_path/data") def test_options_mapping_type(self): os.environ.clear() cfg = get_configuration(args=["--type=test"], config_files_required=False) self.assertIn("LedgerType", cfg) self.assertEquals(cfg["LedgerType"], "test") def test_options_mapping_key_file(self): os.environ.clear() cfg = get_configuration(args=["--keyfile=/test_path/keys/key.wif"], config_files_required=False) self.assertIn("KeyFile", cfg) self.assertEquals(cfg["KeyFile"], "/test_path/keys/key.wif") def test_options_mapping_node(self): os.environ.clear() cfg = get_configuration(args=["--node=test000"], config_files_required=False) self.assertIn("NodeName", cfg) self.assertEquals(cfg["NodeName"], "test000") def test_options_mapping_listsn(self): os.environ.clear() cfg = get_configuration(args=['--listen="localhost:5500/UDP gossip"'], config_files_required=False) self.assertIn("Listen", cfg) self.assertEquals(cfg["Listen"], ['"localhost:5500/UDP gossip"']) def test_options_mapping_restore(self): os.environ.clear() cfg = get_configuration(args=["--restore"], config_files_required=False) self.assertEquals(cfg["Restore"], True) def test_options_mapping_peers(self): os.environ.clear() cfg = get_configuration(args=["--peers=testpeer1"], config_files_required=False) self.assertIn("Peers", cfg) self.assertIn("testpeer1", cfg["Peers"]) def test_options_mapping_url(self): os.environ.clear() cfg = get_configuration(args=["--url", "http://testhost:8888," "http://testhost:8889", "--url", "http://testhost:8890"], config_files_required=False) self.assertIn("LedgerURL", cfg) self.assertIn("http://testhost:8888", cfg["LedgerURL"]) self.assertIn("http://testhost:8889", cfg["LedgerURL"]) self.assertIn("http://testhost:8890", cfg["LedgerURL"]) if __name__ == '__main__': unittest.main()
""" OpenAPI Petstore This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from petstore_api.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from petstore_api.exceptions import ApiAttributeError def lazy_import(): from petstore_api.model.child_lizard_all_of import ChildLizardAllOf from petstore_api.model.parent_pet import ParentPet globals()['ChildLizardAllOf'] = ChildLizardAllOf globals()['ParentPet'] = ParentPet class ChildLizard(ModelComposed): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'pet_type': (str,), # noqa: E501 'loves_rocks': (bool,), # noqa: E501 } @cached_property def discriminator(): val = { } if not val: return None return {'pet_type': val} attribute_map = { 'pet_type': 'pet_type', # noqa: E501 'loves_rocks': 'lovesRocks', # noqa: E501 } read_only_vars = { } @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """ChildLizard - a model defined in OpenAPI Keyword Args: pet_type (str): _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) loves_rocks (bool): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) constant_args = { '_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes, } composed_info = validate_get_composed_info( constant_args, kwargs, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] discarded_args = composed_info[3] for var_name, var_value in kwargs.items(): if var_name in discarded_args and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self._additional_properties_model_instances: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', '_composed_instances', '_var_name_to_model_instances', '_additional_properties_model_instances', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """ChildLizard - a model defined in OpenAPI Keyword Args: pet_type (str): _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) loves_rocks (bool): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) constant_args = { '_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes, } composed_info = validate_get_composed_info( constant_args, kwargs, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] discarded_args = composed_info[3] for var_name, var_value in kwargs.items(): if var_name in discarded_args and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self._additional_properties_model_instances: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.") @cached_property def _composed_schemas(): # we need this here to make our import statements work # we must store _composed_schemas in here so the code is only run # when we invoke this method. If we kept this at the class # level we would get an error because the class level # code would be run when this module is imported, and these composed # classes don't exist yet because their module has not finished # loading lazy_import() return { 'anyOf': [ ], 'allOf': [ ChildLizardAllOf, ParentPet, ], 'oneOf': [ ], }
from scrapy.crawler import CrawlerProcess from scrapy.utils.project import get_project_settings from botmovies.spiders.ptt import PttMoviesSpider from botmovies.spiders.yahoo import YahooSpider process = CrawlerProcess(get_project_settings()) process.crawl(PttMoviesSpider) process.crawl(YahooSpider) process.start()
#!/usr/bin/env python3 # Foundations of Python Network Programming, Third Edition # https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter18/rpyc_server.py # RPyC server import rpyc def main(): from rpyc.utils.server import ThreadedServer t = ThreadedServer(MyService, port = 18861) t.start() class MyService(rpyc.Service): def exposed_line_counter(self, fileobj, function): print('Client has invoked exposed_line_counter()') for linenum, line in enumerate(fileobj.readlines()): function(line) return linenum + 1 if __name__ == '__main__': main()
# Copyright (C) 2018 Garth N. Wells # # SPDX-License-Identifier: MIT from floodsystem.stationdata import build_station_list, update_water_levels from floodsystem.flood import stations_level_over_threshold def run(): stations = build_station_list() update_water_levels(stations) for station_tuple in stations_level_over_threshold(stations, 0.8): print(station_tuple[0].name + " " + str(station_tuple[1])) if __name__ == "__main__": print("*** Task 2B: CUED Part IA Flood Warning System ***") run()
from auxiliar import receberInt def voto(nasc): from datetime import date idade = int(date.today().year) - nasc if idade < 16: return f'Com {idade} anos, voto: NEGADO' elif idade < 18 or idade >= 60: return f'Com {idade} anos, voto: OPCIONAL' else: return f'Com {idade} anos, voto: OBRIGATÓRIO' # main nascimento = receberInt('Digite o ano de nascimento: ') print(voto(nascimento)) input('\n\nPressione <enter> para continuar')
import argtyper @argtyper.ArgumentGroup( ["firstname", "lastname"], title="Name details", description="Give your full name here", ) @argtyper.ArgumentGroup( ["nickname", "firstname"], title="Nickname details", description="Give your Nickname here", ) @argtyper.Argument( "amount", "repetitions", help="How often should we say hello?", metavar="reps" ) @argtyper.Argument( "lastname", "--name", "--n", help="Give me your name", default="Yoda" ) def hello(nickname: str, firstname: str, lastname: str, amount: int = 2): print("\n".join([f"Hello {firstname} '{nickname.upper()}' {lastname}"] * amount)) at = argtyper.ArgTyper(hello) at()
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Text datasets.""" from tensorflow_datasets.text.definite_pronoun_resolution import DefinitePronounResolution from tensorflow_datasets.text.gap import Gap from tensorflow_datasets.text.glue import Glue from tensorflow_datasets.text.imdb import IMDBReviews from tensorflow_datasets.text.imdb import IMDBReviewsConfig from tensorflow_datasets.text.lm1b import Lm1b from tensorflow_datasets.text.lm1b import Lm1bConfig from tensorflow_datasets.text.multi_nli import MultiNLI from tensorflow_datasets.text.multi_nli_mismatch import MultiNLIMismatch from tensorflow_datasets.text.snli import Snli from tensorflow_datasets.text.squad import Squad from tensorflow_datasets.text.super_glue import SuperGlue from tensorflow_datasets.text.trivia_qa import TriviaQA from tensorflow_datasets.text.wikipedia import Wikipedia from tensorflow_datasets.text.xnli import Xnli
import os import glob from copy import deepcopy import pytest from ruamel.yaml.compat import StringIO import development_scripts @pytest.fixture(scope="module") def yaml_comments_file(): with open("tests/mocks/load/yaml_comments.yml", encoding="utf-8") as fh: return development_scripts.YAML_OBJECT.load(fh) @pytest.fixture def copy_yaml_comments(yaml_comments_file): return deepcopy(yaml_comments_file) @pytest.fixture def teardown_normalize_file(): filepaths = {} def _teardown_normalize_file(filepath): with open(filepath, encoding="utf-8") as fh: contents = fh.read() filepaths[filepath] = contents yield _teardown_normalize_file for filepath, contents in filepaths.items(): with open(filepath, "w", encoding="utf-8") as fh: fh.write(contents) @pytest.fixture(scope="module") def expected_file(): expected_path = "tests/mocks/expected/parsed_sample.yml" with open(expected_path, encoding="utf-8") as fh: return fh.read() @pytest.fixture(scope="module") def expected_mac_file(): expected_path = "tests/mocks/expected/show_mac.yml" with open(expected_path, encoding="utf-8") as fh: return fh.read() @pytest.fixture def teardown_delete_file(): filepaths = [] def _teardown_delete_file(filepath): filepaths.append(filepath) yield _teardown_delete_file for file in filepaths: os.remove(file) def test_ensure_spacing_for_multiline_comment(): remark = "comment 11\n# comment 12\n#comment 13\n" remark_formatted = development_scripts.ensure_spacing_for_multiline_comment(remark) assert remark_formatted == "comment 11\n# comment 12\n# comment 13" def test_ensure_space_after_octothorpe(copy_yaml_comments): comment = copy_yaml_comments.ca.items["b"][2] development_scripts.ensure_space_after_octothorpe(comment) assert comment.value == "# comment 2\n# comment 3\n" def test_ensure_space_comments(copy_yaml_comments): comments = copy_yaml_comments.ca.items comment_values = comments.values() development_scripts.ensure_space_comments(comment_values) assert comments["a"][2].value == "# comment 1\n" assert comments["b"][2].value == "# comment 2\n# comment 3\n" assert comments["d"][3][0].value == "# comment 7\n" def test_update_yaml_comments(copy_yaml_comments): development_scripts.update_yaml_comments(copy_yaml_comments) string_yaml = StringIO() development_scripts.YAML_OBJECT.dump(copy_yaml_comments, string_yaml) actual = string_yaml.getvalue() with open("tests/mocks/expected/yaml_comments.yml", encoding="utf-8") as fh: expected = fh.read() assert actual == expected def test_transform_file(teardown_normalize_file, expected_file): load_file = "tests/mocks/load/parsed_sample.yml" teardown_normalize_file(load_file) development_scripts.transform_file(load_file) with open(load_file, encoding="utf-8") as actual: assert actual.read() == expected_file def test_transform_glob(teardown_normalize_file, expected_file): glob_dir = "tests/mocks/load/gl*" parsed_files = glob.glob(f"{glob_dir}/*.yml") for file in parsed_files: teardown_normalize_file(file) development_scripts.transform_glob(glob_dir) for file in parsed_files: with open(file, encoding="utf-8") as actual: assert actual.read() == expected_file def test_ensure_yaml_standards(teardown_normalize_file, expected_file): load_file = "tests/mocks/load/parsed_sample.yml" teardown_normalize_file(load_file) with open(load_file, encoding="utf-8") as fh: load_yaml = development_scripts.YAML_OBJECT.load(fh) development_scripts.ensure_yaml_standards(load_yaml, load_file) with open(load_file, encoding="utf-8") as actual: assert actual.read() == expected_file def test_parse_test_filepath(): filepath = "tests/cisco_ios/show_version/cisco_ios_show_version.raw" platform, command, filename = development_scripts.parse_test_filepath(filepath) assert platform == "cisco_ios" assert command == "show version" assert filename == "cisco_ios_show_version" def test_build_parsed_data_from_output(teardown_delete_file, expected_mac_file): load_file = "tests/mocks/cisco_ios/show_mac-address-table/show_mac1.raw" yaml_file = f"{load_file[:-3]}yml" teardown_delete_file(yaml_file) development_scripts.build_parsed_data_from_output(load_file, test_dir="tests/mocks") with open(yaml_file, encoding="utf-8") as actual: assert actual.read() == expected_mac_file def test_build_parsed_data_from_dir(teardown_delete_file, expected_mac_file): glob_dir = "tests/mocks/cisco_ios/show_mac-*" command_files = glob.iglob(f"{glob_dir}/*.raw") parsed_files = [f"{file[:-3]}yml" for file in command_files] for file in parsed_files: teardown_delete_file(file) development_scripts.build_parsed_data_from_dir(glob_dir, test_dir="tests/mocks") for file in parsed_files: with open(file, encoding="utf-8") as actual: assert actual.read() == expected_mac_file
"""Support for Xiaomi lights.""" import logging from functools import partial from homeassistant.const import * # noqa: F401 from homeassistant.components.light import ( DOMAIN as ENTITY_DOMAIN, LightEntity, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_COLOR, SUPPORT_EFFECT, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_EFFECT, ) from homeassistant.util import color from . import ( DOMAIN, CONF_MODEL, XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401 MiotToggleEntity, ToggleSubEntity, async_setup_config_entry, bind_services_to_entries, ) from .core.miot_spec import ( MiotSpec, MiotService, ) from miio.utils import ( rgb_to_int, int_to_rgb, ) try: # hass 2021.4.0b0+ from homeassistant.components.light import ( COLOR_MODE_ONOFF, COLOR_MODE_BRIGHTNESS, COLOR_MODE_COLOR_TEMP, COLOR_MODE_HS, ) except ImportError: COLOR_MODE_ONOFF = 'onoff' COLOR_MODE_BRIGHTNESS = 'brightness' COLOR_MODE_COLOR_TEMP = 'color_temp' COLOR_MODE_HS = 'hs' _LOGGER = logging.getLogger(__name__) DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}' SERVICE_TO_METHOD = {} async def async_setup_entry(hass, config_entry, async_add_entities): await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): hass.data.setdefault(DATA_KEY, {}) hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities model = str(config.get(CONF_MODEL) or '') entities = [] if model.find('mrbond.airer') >= 0: pass else: miot = config.get('miot_type') if miot: spec = await MiotSpec.async_from_type(hass, miot) for srv in spec.get_services(ENTITY_DOMAIN): if not srv.get_property('on'): continue entities.append(MiotLightEntity(config, srv)) for entity in entities: hass.data[DOMAIN]['entities'][entity.unique_id] = entity async_add_entities(entities, update_before_add=True) bind_services_to_entries(hass, SERVICE_TO_METHOD) class MiotLightEntity(MiotToggleEntity, LightEntity): def __init__(self, config: dict, miot_service: MiotService, **kwargs): kwargs.setdefault('logger', _LOGGER) super().__init__(miot_service, config=config, **kwargs) self._prop_power = miot_service.get_property('on') self._prop_mode = miot_service.get_property('mode') self._prop_brightness = miot_service.get_property('brightness') self._prop_color_temp = miot_service.get_property('color_temperature') self._prop_color = miot_service.get_property('color') self._srv_ambient_custom = miot_service.spec.get_service('ambient_light_custom') if self._srv_ambient_custom: if not self._prop_color: self._prop_color = self._srv_ambient_custom.get_property('color') self._attr_supported_color_modes = set() if self._prop_power: self._attr_supported_color_modes.add(COLOR_MODE_ONOFF) if self._prop_brightness: self._supported_features |= SUPPORT_BRIGHTNESS self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS) if self._prop_color_temp: self._supported_features |= SUPPORT_COLOR_TEMP self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP) if self._prop_color: self._supported_features |= SUPPORT_COLOR self._attr_supported_color_modes.add(COLOR_MODE_HS) if self._prop_mode: self._supported_features |= SUPPORT_EFFECT def turn_on(self, **kwargs): ret = False if not self.is_on: ret = self.set_property(self._prop_power, True) if self._prop_brightness and ATTR_BRIGHTNESS in kwargs: brightness = kwargs[ATTR_BRIGHTNESS] per = brightness / 255 val = per * 100 if self._prop_brightness.value_range: val = per * self._prop_brightness.range_max() _LOGGER.debug('Setting light: %s brightness: %s %s%%', self.name, brightness, per * 100) ret = self.set_property(self._prop_brightness, round(val)) if self._prop_color_temp and ATTR_COLOR_TEMP in kwargs: mired = kwargs[ATTR_COLOR_TEMP] color_temp = self.translate_mired(mired) _LOGGER.debug('Setting light: %s color temperature: %s mireds, %s ct', self.name, mired, color_temp) ret = self.set_property(self._prop_color_temp, color_temp) if self._prop_color and ATTR_HS_COLOR in kwargs: rgb = color.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR]) num = rgb_to_int(rgb) _LOGGER.debug('Setting light: %s color: %s', self.name, rgb) ret = self.set_property(self._prop_color, num) if self._prop_mode and ATTR_EFFECT in kwargs: val = self._prop_mode.list_value(kwargs[ATTR_EFFECT]) _LOGGER.debug('Setting light: %s effect: %s(%s)', self.name, kwargs[ATTR_EFFECT], val) ret = self.set_property(self._prop_mode, val) return ret @property def brightness(self): """Return the brightness of this light between 0..255.""" val = None if self._prop_brightness: val = self._prop_brightness.from_dict(self._state_attrs) if val is None: return None rmx = 100 if self._prop_brightness.value_range: rmx = self._prop_brightness.range_max() return round(255 / rmx * int(val)) @property def hs_color(self): """Return the hue and saturation color value [float, float].""" rgb = self.rgb_color if rgb is not None: return color.color_RGB_to_hs(*rgb) return None @property def rgb_color(self): """Return the rgb color value [int, int, int].""" if self._prop_color: num = round(self._prop_color.from_dict(self._state_attrs) or 0) return int_to_rgb(num) return None @property def color_temp(self): if not self._prop_color_temp: return None return self.translate_mired(self._prop_color_temp.from_dict(self._state_attrs) or 2700) @property def min_mireds(self): if not self._prop_color_temp: return None return self.translate_mired(self._prop_color_temp.value_range[1] or 5700) @property def max_mireds(self): if not self._prop_color_temp: return None return self.translate_mired(self._prop_color_temp.value_range[0] or 2700) @staticmethod def translate_mired(num): try: return round(1000000 / num) except TypeError: return round(1000000 / 2700) @property def effect_list(self): if self._prop_mode: return self._prop_mode.list_descriptions() return None @property def effect(self): if self._prop_mode: val = self._prop_mode.from_dict(self._state_attrs) if val is not None: return self._prop_mode.list_description(val) return None class MiotLightSubEntity(MiotLightEntity, ToggleSubEntity): def __init__(self, parent, miot_service: MiotService): prop_power = miot_service.get_property('on') ToggleSubEntity.__init__(self, parent, prop_power.full_name, { 'keys': list((miot_service.mapping() or {}).keys()), }) MiotLightEntity.__init__(self, { **parent.miot_config, 'name': f'{parent.device_name}', }, miot_service, device=parent.miot_device) self.entity_id = miot_service.generate_entity_id(self) self._prop_power = prop_power def update(self, data=None): super().update(data) if not self._available: return async def async_update(self): await self.hass.async_add_executor_job(partial(self.update)) class LightSubEntity(ToggleSubEntity, LightEntity): _brightness = None _color_temp = None def update(self, data=None): super().update(data) if self._available: attrs = self._state_attrs self._brightness = attrs.get('brightness', 0) self._color_temp = attrs.get('color_temp', 0) def turn_on(self, **kwargs): self.call_parent(['turn_on_light', 'turn_on'], **kwargs) def turn_off(self, **kwargs): self.call_parent(['turn_off_light', 'turn_off'], **kwargs) @property def brightness(self): return self._brightness @property def color_temp(self): return self._color_temp
from unittest import TestCase, main from cogent3 import make_aligned_seqs from cogent3.app import evo as evo_app from cogent3.app.result import ( generic_result, model_collection_result, model_result, ) from cogent3.util.deserialise import deserialise_object __author__ = "Gavin Huttley" __copyright__ = "Copyright 2007-2020, The Cogent Project" __credits__ = ["Gavin Huttley"] __license__ = "BSD-3" __version__ = "2020.7.2a" __maintainer__ = "Gavin Huttley" __email__ = "Gavin.Huttley@anu.edu.au" __status__ = "Alpha" class TestGenericResult(TestCase): def test_deserialised_values(self): """correctly deserialises values""" from cogent3 import DNA data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"} result = generic_result(source="blah.json") result["key"] = data result.deserialised_values() got = result["key"] self.assertEqual(got, DNA) # if we have a type value without "cogent3", leaves as is data = {"type": "core.moltype.MolType", "moltype": "dna"} result = generic_result(source="blah.json") result["key"] = data result.deserialised_values() got = result["key"] self.assertEqual(got, data) # or if no "type" entry, leaves as is data = {"moltype": "dna"} result = generic_result(source="blah.json") result["key"] = data result.deserialised_values() got = result["key"] self.assertEqual(got, data) def test_repr_str(self): """it works""" data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"} result = generic_result(source="blah.json") result["key"] = data r = repr(result) s = str(result) def test_keys(self): """it works""" data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"} result = generic_result(source="blah.json") result["key"] = data keys = result.keys() self.assertEqual(keys, ["key"]) class TestModelResult(TestCase): def test_model_result_alignment(self): """returns alignment from lf""" _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") mod = evo_app.model( "F81", show_progress=False, opt_args=dict(max_evaluations=5, limit_action="ignore"), ) result = mod(aln) got = result.alignment self.assertEqual(got.to_dict(), _data) def test_model_result_alignment_split_pos_model(self): """returns alignment from lf with split codon positions""" _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") mod = evo_app.model( "F81", split_codons=True, show_progress=False, opt_args=dict(max_evaluations=5, limit_action="ignore"), ) result = mod(aln) for i in range(1, 4): got = result.alignment[i] expect = aln[i - 1 :: 3] self.assertEqual(got.to_dict(), expect.to_dict()) def test_model_result_repr_split_pos_model(self): """repr works for model_result of split codon positions""" _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") mod = evo_app.model( "F81", split_codons=True, show_progress=False, opt_args=dict(max_evaluations=55, limit_action="ignore"), ) result = mod(aln) s = repr(result) def test_model_result_tree_split_pos_model(self): """returns tree from lf with split codon positions""" _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") mod = evo_app.model( "F81", split_codons=True, show_progress=False, opt_args=dict(max_evaluations=55, limit_action="ignore"), ) result = mod(aln) self.assertTrue(len(result.tree), 3) # check the trees are different by summing lengths lengths = set() for i, t in result.tree.items(): lengths.add(t.total_length()) self.assertTrue(len(lengths) > 1) def test_model_result_simulate_alignment(self): """returns tree from lf with split codon positions""" _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") mod = evo_app.model( "F81", split_codons=True, show_progress=False, opt_args=dict(max_evaluations=55, limit_action="ignore"), ) result = mod(aln) got = result.simulate_alignment() self.assertEqual(len(aln), len(got)) self.assertNotEqual(aln.to_dict(), got.to_dict()) def test_model_result_tree_discrete_time(self): """returns paralinear lengths""" _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") model1 = evo_app.model( "BH", opt_args=dict(max_evaluations=25, limit_action="ignore") ) result = model1(aln) got = result.tree self.assertEqual( got.children[0].params["length"], got.children[0].params["paralinear"] ) def test_model_result_setitem(self): """TypeError if value a likelihood function, or a dict with correct type""" v = dict(type="arbitrary") r = model_result(name="one", source="two") with self.assertRaises(TypeError): r["name"] = v with self.assertRaises(TypeError): r["name"] = 4 _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") with self.assertRaises(TypeError): r["name"] = aln class TestModelCollectionResult(TestCase): _model_results = {} def setUp(self): """constructs _model_results if they don't already exist""" if self._model_results: return _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") model1 = evo_app.model( "F81", opt_args=dict(max_evaluations=25, limit_action="ignore") ) model2 = evo_app.model( "HKY85", opt_args=dict(max_evaluations=25, limit_action="ignore") ) mr1 = model1(aln) mr2 = model2(aln) self._model_results[mr1.name] = mr1 self._model_results[mr2.name] = mr2 def test_get_best_model(self): """should correctly identify the best model""" coll = model_collection_result(None) coll.update(self._model_results) got = coll.get_best_model() # we ensure a model_result instance is returned from the possible set self.assertIn(got, self._model_results.values()) def test_select_model(self): """correctly select models""" # we ensure a series of model_result instances is returned coll = model_collection_result(None) coll.update(self._model_results) got = coll.select_models() self.assertTrue(len(got) > 0) possible = list(self._model_results.values()) for m in got: self.assertIn(m, possible) def test_model_collection_result_repr(self): """constructed result can do the different repr""" result = model_collection_result(None) coll = model_collection_result(None) coll.update(self._model_results) got = result.__repr__() self.assertIsInstance(got, str) got = result._repr_html_() self.assertIsInstance(got, str) def test_json_roundtrip(self): """roundtrip from json correct""" coll = model_collection_result(name="blah", source="blah2") coll.update(self._model_results) self.assertEqual(coll.name, "blah") self.assertEqual(coll.source, "blah2") orig = coll.__repr__() got = deserialise_object(coll.to_json()) self.assertEqual(got.__repr__(), orig) self.assertIsInstance(got, model_collection_result) self.assertEqual(got.name, coll.name) self.assertEqual(got.source, coll.source) # select_models() should not fail got = deserialise_object(coll.to_json()) m = got.select_models() self.assertIsInstance(m[0], model_result) class TestHypothesisResult(TestCase): def test_pvalue(self): """hypothesis test p-value property""" _data = { "Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG", "Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG", "Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG", } aln = make_aligned_seqs(data=_data, moltype="dna") model1 = evo_app.model( "F81", opt_args=dict(max_evaluations=25, limit_action="ignore") ) model2 = evo_app.model( "HKY85", opt_args=dict(max_evaluations=25, limit_action="ignore") ) hyp = evo_app.hypothesis(model1, model2) result = hyp(aln) self.assertTrue(0 <= result.pvalue <= 1) if __name__ == "__main__": main()
from django.shortcuts import render, redirect from django.contrib.auth.models import User from .models import UserProfile from .forms import ProfileForm def profile(request, pk): profile = UserProfile.objects.get(id=pk) context = { 'profile': profile } return render(request, 'account/profile.html', context) def update_profile(request, pk): profile = UserProfile.objects.get(id=pk) forms = ProfileForm(instance=profile) if request.method == 'POST': forms = ProfileForm(request.POST, request.FILES, instance=profile) if forms.is_valid(): forms.save() return redirect('home') context = { 'forms': forms } return render(request, 'account/update-profile.html', context)
# static analysis: ignore from __future__ import print_function from __future__ import absolute_import from __future__ import division from .test_name_check_visitor import TestNameCheckVisitorBase from .test_node_visitor import skip_before from .error_code import ErrorCode class TestAnnotations(TestNameCheckVisitorBase): @skip_before((3, 5)) def test_union(self): self.assert_passes( """ import re from typing import Union, Optional, List, Set, Dict, Match, Pattern _Pattern = type(re.compile("a")) _Match = type(re.match("a", "a")) def capybara() -> Union[int, str]: return 0 def kerodon() -> Optional[int]: return None def complex() -> Union[List[str], Set[int], Dict[float, List[str]], int]: return [] def check() -> None: assert_is_value(capybara(), MultiValuedValue([TypedValue(int), TypedValue(str)])) assert_is_value(kerodon(), MultiValuedValue([TypedValue(int), KnownValue(None)])) assert_is_value( complex(), MultiValuedValue( [ GenericValue(list, [TypedValue(str)]), GenericValue(set, [TypedValue(int)]), GenericValue( dict, [TypedValue(float), GenericValue(list, [TypedValue(str)])] ), TypedValue(int), ] ), ) def rgx(m: Match[str], p: Pattern[bytes]) -> None: assert_is_value(p, GenericValue(_Pattern, [TypedValue(bytes)])) assert_is_value(m, GenericValue(_Match, [TypedValue(str)])) """ ) @skip_before((3, 5)) def test_generic(self): self.assert_passes( """ from typing import List, SupportsInt def capybara(x: List[int], y: List, z: SupportsInt) -> None: assert_is_value(x, GenericValue(list, [TypedValue(int)])) assert_is_value(y, TypedValue(list)) assert_is_value(z, TypedValue(SupportsInt)) """ ) @skip_before((3, 5)) def test_self_type(self): self.assert_passes( """ class Capybara: def f(self: int) -> None: assert_is_value(self, TypedValue(int)) def g(self) -> None: assert_is_value(self, TypedValue(Capybara)) """ ) @skip_before((3, 5)) def test_newtype(self): self.assert_passes( """ from typing import NewType, Tuple X = NewType("X", int) Y = NewType("Y", Tuple[str, ...]) def capybara(x: X, y: Y) -> None: assert_is_value(x, NewTypeValue(X)) print(y) # just asserting that this doesn't cause errors """ ) @skip_before((3, 5)) def test_literal(self): self.assert_passes( """ from typing_extensions import Literal def capybara(x: Literal[True], y: Literal[True, False]) -> None: assert_is_value(x, KnownValue(True)) assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)])) """ ) @skip_before((3, 5)) def test_contextmanager(self): self.assert_passes( """ from contextlib import contextmanager from typing import Iterator @contextmanager def capybara() -> Iterator[int]: yield 3 def kerodon(): # Ideally should be ContextManager[int], but at least # it should not be Iterator[int], which is what pyanalyze # used to infer. assert_is_value(capybara(), UNRESOLVED_VALUE) """ ) @skip_before((3, 0)) def test_none_annotations(self): self.assert_passes( """ def mara() -> None: pass class Capybara: def __init__(self) -> None: pass def check() -> None: # Make sure we don't infer None if __init__ is annotated # as returning None. assert_is_value(Capybara(), TypedValue(Capybara)) assert_is_value(mara(), KnownValue(None)) """ ) @skip_before((3, 0)) def test_annotations(self): self.assert_passes( """ def caviidae() -> None: x = int # tests that annotations in a nested functions are not evaluated in a context where they don't exist def capybara(a: x, *b: x, c: x, d: x=3, **kwargs: x): pass assert_is_value(capybara, KnownValue(capybara)) """ ) self.assert_passes( """ class Caviidae: class Capybara: pass def eat(self, x: Capybara): assert_is_value(self, TypedValue(Caviidae)) @staticmethod def static(x: "Caviidae"): assert_is_value(x, TypedValue(Caviidae)) """ ) self.assert_fails( ErrorCode.incompatible_argument, """ def capybara(x: int) -> None: pass def kerodon(): capybara("not an int") """, ) @skip_before((3, 0)) def test_incompatible_return_value(self): self.assert_fails( ErrorCode.incompatible_return_value, """ def capybara() -> int: return "not an int" """, ) self.assert_fails( ErrorCode.incompatible_return_value, """ def capybara(x: bool) -> int: if not x: return return 42 """, ) self.assert_passes( """ from typing import Generator def capybara(x: bool) -> Generator[int, None, None]: if not x: return yield 42 """ ) self.assert_fails( ErrorCode.incompatible_return_value, """ def f() -> int: pass """, ) self.assert_passes( """ from abc import abstractmethod class X: @abstractmethod def f(self) -> int: pass """, ) self.assert_fails( ErrorCode.incompatible_return_value, """ def f() -> None: assert_is_value(g(), UNRESOLVED_VALUE) return g() def g(): pass """, ) @skip_before((3, 0)) def test_incompatible_default(self): self.assert_fails( ErrorCode.incompatible_default, """ def capybara(x: int = None) -> None: pass """, ) @skip_before((3, 0)) def test_property(self): self.assert_passes( """ class Capybara: def __init__(self, x): self.x = x @property def f(self) -> int: return self.x def get_g(self) -> int: return self.x * 2 g = property(get_g) def user(c: Capybara) -> None: assert_is_value(c.f, TypedValue(int)) assert_is_value(c.get_g(), TypedValue(int)) assert_is_value(c.g, TypedValue(int)) """ ) @skip_before((3, 0)) def test_annotations_override_return(self): self.assert_passes( """ from typing import Any def f() -> Any: return 0 def g(): return 0 def capybara(): assert_is_value(f(), UNRESOLVED_VALUE) assert_is_value(g(), KnownValue(0)) """ ) @skip_before((3, 0)) def test_cached_classmethod(self): # just test that this doesn't crash self.assert_passes( """ from functools import lru_cache class Capybara: @classmethod @lru_cache() def f(cls) -> int: return 3 """ ) @skip_before((3, 6)) def test_annassign(self): self.assert_passes( """ def capybara(y): x: int = y assert_is_value(y, UNRESOLVED_VALUE) assert_is_value(x, TypedValue(int)) """ ) self.assert_fails( ErrorCode.incompatible_assignment, """ def capybara(y: str): x: int = y """, ) @skip_before((3, 5)) def test_tuples(self): self.assert_passes( """ from typing import Tuple, Union def capybara(x: Tuple[int, ...], y: Tuple[int], z: Tuple[str, int], omega: Union[Tuple[str, int], None]) -> None: assert_is_value(x, GenericValue(tuple, [TypedValue(int)])) assert_is_value(y, SequenceIncompleteValue(tuple, [TypedValue(int)])) assert_is_value(z, SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)])) assert_is_value(omega, MultiValuedValue([ SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)]), KnownValue(None), ])) """ ) @skip_before((3, 0)) def test_invalid_annotation(self): self.assert_fails( ErrorCode.invalid_annotation, """ def f(x: 1): pass """, ) @skip_before((3, 0)) def test_forward_ref(self): self.assert_fails( ErrorCode.undefined_name, """ def f(x: "NoSuchType"): pass """, ) self.assert_passes( """ import typing from typing import Optional def capybara(x: "X", y: "Optional[X]", z: "typing.Optional[X]"): assert_is_value(x, TypedValue(X)) assert_is_value(y, MultiValuedValue([KnownValue(None), TypedValue(X)])) assert_is_value(z, MultiValuedValue([KnownValue(None), TypedValue(X)])) class X: pass """ ) self.assert_passes( """ from typing import List def capybara(x: "List[int]") -> "List[str]": assert_is_value(x, GenericValue(list, [TypedValue(int)])) assert_is_value(capybara(x), GenericValue(list, [TypedValue(str)])) return [] """ ) self.assert_fails( ErrorCode.incompatible_return_value, """ def f() -> "int": return "" """, ) @skip_before((3, 0)) def test_pattern(self): self.assert_passes( """ from typing import Pattern import re _Pattern = type(re.compile("")) def capybara(x: Pattern[str]): assert_is_value(x, GenericValue(_Pattern, [TypedValue(str)])) """ ) @skip_before((3, 6)) def test_final(self): self.assert_passes( """ from typing_extensions import Final x: Final = 3 def capybara(): y: Final = 4 assert_is_value(x, KnownValue(3)) assert_is_value(y, KnownValue(4)) """ ) @skip_before((3, 6)) def test_type(self): self.assert_passes( """ from typing import Type def capybara(x: Type[str], y: "Type[int]"): assert_is_value(x, SubclassValue(str)) assert_is_value(y, SubclassValue(int)) """ )
'''https://projecteuler.net/problem=3''' '''Please see the README document for details''' def run(upper_bound): if(upper_bound%2 == 0): upper_bound = upper_bound-1 for decrementor in range(upper_bound, 0,-2): print str(decrementor)+", ", counter = 2 while(counter < decrementor): if(decrementor%counter == 0): break counter = counter+1 if(counter == decrementor): print "Highest Prime lower that "+str(upper_bound)+" is "+str(decrementor) return if __name__ == "__main__": print "https://projecteuler.net/problem=2"
# coding: utf-8 """ Browse API <p>The Browse API has the following resources:</p> <ul> <li><b> item_summary: </b> Lets shoppers search for specific items by keyword, GTIN, category, charity, product, or item aspects and refine the results by using filters, such as aspects, compatibility, and fields values.</li> <li><b> search_by_image: </b><a href=\"https://developer.ebay.com/api-docs/static/versioning.html#API\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" />&nbsp;(Experimental)</a> Lets shoppers search for specific items by image. You can refine the results by using URI parameters and filters.</li> <li><b> item: </b> <ul><li>Lets you retrieve the details of a specific item or all the items in an item group, which is an item with variations such as color and size and check if a product is compatible with the specified item, such as if a specific car is compatible with a specific part.</li> <li>Provides a bridge between the eBay legacy APIs, such as <b> Finding</b>, and the RESTful APIs, which use different formats for the item IDs.</li> </ul> </li> <li> <b> shopping_cart: </b> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#API\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" />&nbsp;(Experimental)</a> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#Limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> Provides the ability for eBay members to see the contents of their eBay cart, and add, remove, and change the quantity of items in their eBay cart.&nbsp;&nbsp;<b> Note: </b> This resource is not available in the eBay API Explorer.</li></ul> <p>The <b> item_summary</b>, <b> search_by_image</b>, and <b> item</b> resource calls require an <a href=\"/api-docs/static/oauth-client-credentials-grant.html\">Application access token</a>. The <b> shopping_cart</b> resource calls require a <a href=\"/api-docs/static/oauth-authorization-code-grant.html\">User access token</a>.</p> # noqa: E501 OpenAPI spec version: v1.8.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class PaymentMethod(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'payment_instructions': 'list[str]', 'payment_method_brands': 'list[PaymentMethodBrand]', 'payment_method_type': 'str', 'seller_instructions': 'list[str]' } attribute_map = { 'payment_instructions': 'paymentInstructions', 'payment_method_brands': 'paymentMethodBrands', 'payment_method_type': 'paymentMethodType', 'seller_instructions': 'sellerInstructions' } def __init__(self, payment_instructions=None, payment_method_brands=None, payment_method_type=None, seller_instructions=None): # noqa: E501 """PaymentMethod - a model defined in Swagger""" # noqa: E501 self._payment_instructions = None self._payment_method_brands = None self._payment_method_type = None self._seller_instructions = None self.discriminator = None if payment_instructions is not None: self.payment_instructions = payment_instructions if payment_method_brands is not None: self.payment_method_brands = payment_method_brands if payment_method_type is not None: self.payment_method_type = payment_method_type if seller_instructions is not None: self.seller_instructions = seller_instructions @property def payment_instructions(self): """Gets the payment_instructions of this PaymentMethod. # noqa: E501 The payment instructions for the buyer, such as cash in person or contact seller. # noqa: E501 :return: The payment_instructions of this PaymentMethod. # noqa: E501 :rtype: list[str] """ return self._payment_instructions @payment_instructions.setter def payment_instructions(self, payment_instructions): """Sets the payment_instructions of this PaymentMethod. The payment instructions for the buyer, such as cash in person or contact seller. # noqa: E501 :param payment_instructions: The payment_instructions of this PaymentMethod. # noqa: E501 :type: list[str] """ self._payment_instructions = payment_instructions @property def payment_method_brands(self): """Gets the payment_method_brands of this PaymentMethod. # noqa: E501 The payment method brands, including the payment method brand type and logo image. # noqa: E501 :return: The payment_method_brands of this PaymentMethod. # noqa: E501 :rtype: list[PaymentMethodBrand] """ return self._payment_method_brands @payment_method_brands.setter def payment_method_brands(self, payment_method_brands): """Sets the payment_method_brands of this PaymentMethod. The payment method brands, including the payment method brand type and logo image. # noqa: E501 :param payment_method_brands: The payment_method_brands of this PaymentMethod. # noqa: E501 :type: list[PaymentMethodBrand] """ self._payment_method_brands = payment_method_brands @property def payment_method_type(self): """Gets the payment_method_type of this PaymentMethod. # noqa: E501 The payment method type, such as credit card or cash. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/browse/types/gct:PaymentMethodTypeEnum'>eBay API documentation</a> # noqa: E501 :return: The payment_method_type of this PaymentMethod. # noqa: E501 :rtype: str """ return self._payment_method_type @payment_method_type.setter def payment_method_type(self, payment_method_type): """Sets the payment_method_type of this PaymentMethod. The payment method type, such as credit card or cash. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/browse/types/gct:PaymentMethodTypeEnum'>eBay API documentation</a> # noqa: E501 :param payment_method_type: The payment_method_type of this PaymentMethod. # noqa: E501 :type: str """ self._payment_method_type = payment_method_type @property def seller_instructions(self): """Gets the seller_instructions of this PaymentMethod. # noqa: E501 The seller instructions to the buyer, such as accepts credit cards or see description. # noqa: E501 :return: The seller_instructions of this PaymentMethod. # noqa: E501 :rtype: list[str] """ return self._seller_instructions @seller_instructions.setter def seller_instructions(self, seller_instructions): """Sets the seller_instructions of this PaymentMethod. The seller instructions to the buyer, such as accepts credit cards or see description. # noqa: E501 :param seller_instructions: The seller_instructions of this PaymentMethod. # noqa: E501 :type: list[str] """ self._seller_instructions = seller_instructions def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(PaymentMethod, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PaymentMethod): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
# vim:fileencoding=utf-8:noet """ python method """ # Copyright (c) 2010 - 2019, © Badassops LLC / Luc Suryo # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #* #* File : bao_signal_handler.py #* Description : function to handle interrupts #* Author : Luc Suryo <luc@badassops.com> #* Version : 0.2 #* Date : Feb 21, 2019 #* #* History : #* Date: Author: Info: #* Jun 1, 2010 LIS First Release #* Feb 21, 2019 LIS refactored import signal import sys def signal_handler(signum, frame): """ signal/interrupts handler @param signum {int} The interrupt ID according to signal.h. @param frame {string} Memory frame where the interrupted was called. """ if signum is int(signal.SIGHUP): print('Received -HUP, app does not support reload. {}'.format(frame)) elif signum is int(signal.SIGINT): print('Received ctrl-c, aborted on your request. {}'.format(frame)) elif signum is int(signal.SIGTERM): print('Received kill -TERM, terminating. {}'.format(frame)) else: print('Received unknwon interrupt : {}'.format(signum)) sys.exit(128 + signum) def install_int_handler(): """ Install signal/interrupts handler, we capture only SIGHUP, SIGINT and TERM """ signal.signal(signal.SIGHUP, signal_handler) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler)
import hashlib import random import lycanthropy.sql.interface import lycanthropy.crypto import jwt def decodeToken(token,config): rawData = jwt.decode( token, config['secret'], algorithms=['HS256'] ) return rawData def monitoringToken(user,config,remote,identity): userData = lycanthropy.sql.interface.filterUser({'username':user})[0] token = jwt.encode({ 'user':user, '_wolfmon':identity, 'campaigns':userData['campaigns'], 'roles':userData['roles'], '_host':remote }, config['secret'], algorithm='HS256' ).decode('utf-8') return token def apiToken(user,config,remote): userData = lycanthropy.sql.interface.filterUser({'username':user})[0] token = jwt.encode({ 'user':user, 'campaigns':userData['campaigns'], 'roles':userData['roles'], '_host':remote }, config['secret'], algorithm='HS256' ).decode('utf-8') return token def getCampaignAccess(user,config,token,remote,wolfmon): decoded = decodeToken(token,config) if decoded['user'] == user and decoded['_host'] == remote and wolfmon == decoded['_wolfmon']: userData = lycanthropy.sql.interface.filterUser({'username': user})[0] return userData['campaigns'].split(',') else: return 'error' def verifyToken(user,config,token,remote): decoded = decodeToken(token,config) if decoded['user'] == user and decoded['_host'] == remote: return True else: return False def verifyAuth(user,password): userData = lycanthropy.sql.interface.filterUser({'username':user})[0] print(userData) if userData == []: return False else: reconstruct = mkHash(password,userData['password'].split('.')[0]) print(reconstruct) if reconstruct == userData['password']: return True else: return False def mkHash(password,salt): passHmac = hashlib.pbkdf2_hmac('sha256',password.encode('utf-8'),salt.encode('utf-8'),100000) return '{}.{}'.format(salt,passHmac.hex()) def mkSalt(): alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890" strOut = [] for i in range(32): strOut.append( alpha[random.randint( 0, len(alpha)-1 )] ) return "".join(strOut) def mkUser(user,password): pwdSalt = mkSalt() passObj = mkHash(password,pwdSalt) return passObj
# # Copyright (C) 2019 UAVCAN Development Team <info@zubax.com>. # Author: Pavel Kirienko <pavel.kirienko@zubax.com> # from .. import app from ..model import devel_feed, forum_feed, adopters from flask import render_template FEED_LENGTH = 15 TITLE = 'UAVCAN - a lightweight protocol designed for reliable communication ' \ 'in aerospace and robotic applications over robust vehicular networks' # noinspection PyBroadException @app.route('/') def _index(): try: development_feed_entries = devel_feed.get(max_items=FEED_LENGTH) except Exception: development_feed_entries = None app.logger.exception('Devel feed error') try: forum_feed_entries = forum_feed.get(max_items=FEED_LENGTH) except Exception: forum_feed_entries = None app.logger.exception('Forum feed error') adopter_list = adopters.get_list() return render_template('index.html', title=TITLE, development_feed_entries=development_feed_entries, forum_feed_entries=forum_feed_entries, adopters=adopter_list)
from __future__ import unicode_literals import unittest try: from unittest import mock except ImportError: import mock try: from .base import BaseTestCase, BasePlatformTestCase except (ValueError, ImportError): from pynextcaller.tests.base import BaseTestCase, BasePlatformTestCase ADDRESS_JSON_RESULT_EXAMPLE = ''' { "records": [ { "id": "97d949a413f4ea8b85e9586e1f2d9a", "first_name": "Jerry", "last_name": "Seinfeld", "name": "Jerry Seinfeld", "language": "English", "fraud_threat": "low", "spoof": "false", "phone": [ { "number": "2125558383", "carrier": "Verizon Wireless", "line_type": "LAN" } ], "address": [ { "city": "New York", "extended_zip": "", "country": "USA", "line2": "Apt 5a", "line1": "129 West 81st Street", "state": "NY", "zip_code": "10024" } ], "email": "demo@nextcaller.com", "social_links": [ { "followers": 1, "type": "twitter", "url": "https://twitter.com/nextcaller" }, { "type": "facebook", "url": "https://www.facebook.com/nextcaller" }, { "type": "linkedin", "url": "https://www.linkedin.com/company/next-caller" } ], "age": "45-54", "gender": "Male", "household_income": "50k-75k", "marital_status": "Single", "presence_of_children": "No", "home_owner_status": "Rent", "market_value": "350k-500k", "length_of_residence": "12 Years", "high_net_worth": "No", "occupation": "Entertainer", "education": "Completed College", "department": "not specified" } ] } ''' WRONG_ADDRESS_DATA = { 'first_name': 'Jerry', 'last_name': 'Seinfeld', 'address': '129 West 81st Street', 'city': 'New York', } WRONG_ADDRESS_ZIP_DATA = { 'first_name': 'Jerry', 'last_name': 'Seinfeld', 'address': '129 West 81st Street', 'city': 'New York', 'state': 'NY', 'zip_code': '1002', } WRONG_ADDRESS_FIELDS_DATA = { 'first_name': 'Jerry', 'last_name': 'Seinfeld', 'address': '129 West 81st Street', 'city': 'New York', 'state': 'NY', 'zip_code': '10024', 'test_field': 'xx', } ADDRESS_DATA = { 'first_name': 'Jerry', 'last_name': 'Seinfeld', 'address': '129 West 81st Street', 'city': 'New York', 'state': 'NY', 'zip_code': '10024', } class AddressTestCase(BaseTestCase): def test_address_by_not_full_address(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) self.assertRaises( ValueError, self.client.get_by_address_name, WRONG_ADDRESS_DATA) def test_address_by_wrong_zip(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) self.assertRaises( ValueError, self.client.get_by_address_name, WRONG_ADDRESS_ZIP_DATA) def test_address_by_wrong_fields(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) self.assertRaises( ValueError, self.client.get_by_address_name, WRONG_ADDRESS_FIELDS_DATA) def test_by_address(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) res = self.client.get_by_address_name(ADDRESS_DATA) self.assertTrue(res['records']) self.assertEqual(res['records'][0]['email'], 'demo@nextcaller.com') self.assertEqual(res['records'][0]['first_name'], 'Jerry') self.assertEqual(res['records'][0]['last_name'], 'Seinfeld') class PlatformAddressTestCase(BasePlatformTestCase): def test_address_by_not_full_address(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) self.assertRaises( ValueError, self.client.get_by_address_name, WRONG_ADDRESS_DATA, self.platform_username) def test_address_by_wrong_zip(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) self.assertRaises( ValueError, self.client.get_by_address_name, WRONG_ADDRESS_ZIP_DATA, self.platform_username) def test_address_by_wrong_fields(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) self.assertRaises( ValueError, self.client.get_by_address_name, WRONG_ADDRESS_FIELDS_DATA, self.platform_username) def test_by_address(self): self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE) res = self.client.get_by_address_name(ADDRESS_DATA, self.platform_username) self.assertTrue(res['records']) self.assertEqual(res['records'][0]['email'], 'demo@nextcaller.com') self.assertEqual(res['records'][0]['first_name'], 'Jerry') self.assertEqual(res['records'][0]['last_name'], 'Seinfeld') if __name__ == '__main__': unittest.main()
import collections import random from sympy.assumptions import Q from sympy.core.add import Add from sympy.core.compatibility import range from sympy.core.function import (Function, diff) from sympy.core.numbers import (E, Float, I, Integer, oo, pi) from sympy.core.relational import (Eq, Lt) from sympy.core.singleton import S from sympy.core.symbol import (Symbol, symbols) from sympy.functions.elementary.complexes import Abs from sympy.functions.elementary.exponential import exp from sympy.functions.elementary.miscellaneous import (Max, Min, sqrt) from sympy.functions.elementary.piecewise import Piecewise from sympy.functions.elementary.trigonometric import (cos, sin, tan) from sympy.logic.boolalg import (And, Or) from sympy.matrices.common import (ShapeError, MatrixError, NonSquareMatrixError, _MinimalMatrix, MatrixShaping, MatrixProperties, MatrixOperations, MatrixArithmetic, MatrixSpecial) from sympy.matrices.matrices import (MatrixDeterminant, MatrixReductions, MatrixSubspaces, MatrixEigen, MatrixCalculus) from sympy.matrices import (Matrix, diag, eye, matrix_multiply_elementwise, ones, zeros, SparseMatrix) from sympy.polys.polytools import Poly from sympy.simplify.simplify import simplify from sympy.simplify.trigsimp import trigsimp from sympy.utilities.exceptions import SymPyDeprecationWarning from sympy.utilities.iterables import flatten from sympy.utilities.pytest import (raises, XFAIL, slow, skip, warns_deprecated_sympy) from sympy.abc import a, b, c, d, x, y, z # classes to test the basic matrix classes class ShapingOnlyMatrix(_MinimalMatrix, MatrixShaping): pass def eye_Shaping(n): return ShapingOnlyMatrix(n, n, lambda i, j: int(i == j)) def zeros_Shaping(n): return ShapingOnlyMatrix(n, n, lambda i, j: 0) class PropertiesOnlyMatrix(_MinimalMatrix, MatrixProperties): pass def eye_Properties(n): return PropertiesOnlyMatrix(n, n, lambda i, j: int(i == j)) def zeros_Properties(n): return PropertiesOnlyMatrix(n, n, lambda i, j: 0) class OperationsOnlyMatrix(_MinimalMatrix, MatrixOperations): pass def eye_Operations(n): return OperationsOnlyMatrix(n, n, lambda i, j: int(i == j)) def zeros_Operations(n): return OperationsOnlyMatrix(n, n, lambda i, j: 0) class ArithmeticOnlyMatrix(_MinimalMatrix, MatrixArithmetic): pass def eye_Arithmetic(n): return ArithmeticOnlyMatrix(n, n, lambda i, j: int(i == j)) def zeros_Arithmetic(n): return ArithmeticOnlyMatrix(n, n, lambda i, j: 0) class DeterminantOnlyMatrix(_MinimalMatrix, MatrixDeterminant): pass def eye_Determinant(n): return DeterminantOnlyMatrix(n, n, lambda i, j: int(i == j)) def zeros_Determinant(n): return DeterminantOnlyMatrix(n, n, lambda i, j: 0) class ReductionsOnlyMatrix(_MinimalMatrix, MatrixReductions): pass def eye_Reductions(n): return ReductionsOnlyMatrix(n, n, lambda i, j: int(i == j)) def zeros_Reductions(n): return ReductionsOnlyMatrix(n, n, lambda i, j: 0) class SpecialOnlyMatrix(_MinimalMatrix, MatrixSpecial): pass class SubspaceOnlyMatrix(_MinimalMatrix, MatrixSubspaces): pass class EigenOnlyMatrix(_MinimalMatrix, MatrixEigen): pass class CalculusOnlyMatrix(_MinimalMatrix, MatrixCalculus): pass def test__MinimalMatrix(): x = _MinimalMatrix(2, 3, [1, 2, 3, 4, 5, 6]) assert x.rows == 2 assert x.cols == 3 assert x[2] == 3 assert x[1, 1] == 5 assert list(x) == [1, 2, 3, 4, 5, 6] assert list(x[1, :]) == [4, 5, 6] assert list(x[:, 1]) == [2, 5] assert list(x[:, :]) == list(x) assert x[:, :] == x assert _MinimalMatrix(x) == x assert _MinimalMatrix([[1, 2, 3], [4, 5, 6]]) == x assert _MinimalMatrix(([1, 2, 3], [4, 5, 6])) == x assert _MinimalMatrix([(1, 2, 3), (4, 5, 6)]) == x assert _MinimalMatrix(((1, 2, 3), (4, 5, 6))) == x assert not (_MinimalMatrix([[1, 2], [3, 4], [5, 6]]) == x) # ShapingOnlyMatrix tests def test_vec(): m = ShapingOnlyMatrix(2, 2, [1, 3, 2, 4]) m_vec = m.vec() assert m_vec.cols == 1 for i in range(4): assert m_vec[i] == i + 1 def test_tolist(): lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]] flat_lst = [S.One, S.Half, x*y, S.Zero, x, y, z, x**2, y, -S.One, z*x, 3] m = ShapingOnlyMatrix(3, 4, flat_lst) assert m.tolist() == lst def test_row_col_del(): e = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9]) raises(ValueError, lambda: e.row_del(5)) raises(ValueError, lambda: e.row_del(-5)) raises(ValueError, lambda: e.col_del(5)) raises(ValueError, lambda: e.col_del(-5)) assert e.row_del(2) == e.row_del(-1) == Matrix([[1, 2, 3], [4, 5, 6]]) assert e.col_del(2) == e.col_del(-1) == Matrix([[1, 2], [4, 5], [7, 8]]) assert e.row_del(1) == e.row_del(-2) == Matrix([[1, 2, 3], [7, 8, 9]]) assert e.col_del(1) == e.col_del(-2) == Matrix([[1, 3], [4, 6], [7, 9]]) def test_get_diag_blocks1(): a = Matrix([[1, 2], [2, 3]]) b = Matrix([[3, x], [y, 3]]) c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]]) assert a.get_diag_blocks() == [a] assert b.get_diag_blocks() == [b] assert c.get_diag_blocks() == [c] def test_get_diag_blocks2(): a = Matrix([[1, 2], [2, 3]]) b = Matrix([[3, x], [y, 3]]) c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]]) A, B, C, D = diag(a, b, b), diag(a, b, c), diag(a, c, b), diag(c, c, b) A = ShapingOnlyMatrix(A.rows, A.cols, A) B = ShapingOnlyMatrix(B.rows, B.cols, B) C = ShapingOnlyMatrix(C.rows, C.cols, C) D = ShapingOnlyMatrix(D.rows, D.cols, D) assert A.get_diag_blocks() == [a, b, b] assert B.get_diag_blocks() == [a, b, c] assert C.get_diag_blocks() == [a, c, b] assert D.get_diag_blocks() == [c, c, b] def test_shape(): m = ShapingOnlyMatrix(1, 2, [0, 0]) m.shape == (1, 2) def test_reshape(): m0 = eye_Shaping(3) assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1)) m1 = ShapingOnlyMatrix(3, 4, lambda i, j: i + j) assert m1.reshape( 4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5))) assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5))) def test_row_col(): m = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9]) assert m.row(0) == Matrix(1, 3, [1, 2, 3]) assert m.col(0) == Matrix(3, 1, [1, 4, 7]) def test_row_join(): assert eye_Shaping(3).row_join(Matrix([7, 7, 7])) == \ Matrix([[1, 0, 0, 7], [0, 1, 0, 7], [0, 0, 1, 7]]) def test_col_join(): assert eye_Shaping(3).col_join(Matrix([[7, 7, 7]])) == \ Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1], [7, 7, 7]]) def test_row_insert(): r4 = Matrix([[4, 4, 4]]) for i in range(-4, 5): l = [1, 0, 0] l.insert(i, 4) assert flatten(eye_Shaping(3).row_insert(i, r4).col(0).tolist()) == l def test_col_insert(): c4 = Matrix([4, 4, 4]) for i in range(-4, 5): l = [0, 0, 0] l.insert(i, 4) assert flatten(zeros_Shaping(3).col_insert(i, c4).row(0).tolist()) == l # issue 13643 assert eye_Shaping(6).col_insert(3, Matrix([[2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]])) == \ Matrix([[1, 0, 0, 2, 2, 0, 0, 0], [0, 1, 0, 2, 2, 0, 0, 0], [0, 0, 1, 2, 2, 0, 0, 0], [0, 0, 0, 2, 2, 1, 0, 0], [0, 0, 0, 2, 2, 0, 1, 0], [0, 0, 0, 2, 2, 0, 0, 1]]) def test_extract(): m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j) assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10]) assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11]) assert m.extract(range(4), range(3)) == m raises(IndexError, lambda: m.extract([4], [0])) raises(IndexError, lambda: m.extract([0], [3])) def test_hstack(): m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j) m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j) assert m == m.hstack(m) assert m.hstack(m, m, m) == ShapingOnlyMatrix.hstack(m, m, m) == Matrix([ [0, 1, 2, 0, 1, 2, 0, 1, 2], [3, 4, 5, 3, 4, 5, 3, 4, 5], [6, 7, 8, 6, 7, 8, 6, 7, 8], [9, 10, 11, 9, 10, 11, 9, 10, 11]]) raises(ShapeError, lambda: m.hstack(m, m2)) assert Matrix.hstack() == Matrix() # test regression #12938 M1 = Matrix.zeros(0, 0) M2 = Matrix.zeros(0, 1) M3 = Matrix.zeros(0, 2) M4 = Matrix.zeros(0, 3) m = ShapingOnlyMatrix.hstack(M1, M2, M3, M4) assert m.rows == 0 and m.cols == 6 def test_vstack(): m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j) m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j) assert m == m.vstack(m) assert m.vstack(m, m, m) == ShapingOnlyMatrix.vstack(m, m, m) == Matrix([ [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]) raises(ShapeError, lambda: m.vstack(m, m2)) assert Matrix.vstack() == Matrix() # PropertiesOnlyMatrix tests def test_atoms(): m = PropertiesOnlyMatrix(2, 2, [1, 2, x, 1 - 1/x]) assert m.atoms() == {S(1),S(2),S(-1), x} assert m.atoms(Symbol) == {x} def test_free_symbols(): assert PropertiesOnlyMatrix([[x], [0]]).free_symbols == {x} def test_has(): A = PropertiesOnlyMatrix(((x, y), (2, 3))) assert A.has(x) assert not A.has(z) assert A.has(Symbol) A = PropertiesOnlyMatrix(((2, y), (2, 3))) assert not A.has(x) def test_is_anti_symmetric(): x = symbols('x') assert PropertiesOnlyMatrix(2, 1, [1, 2]).is_anti_symmetric() is False m = PropertiesOnlyMatrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0]) assert m.is_anti_symmetric() is True assert m.is_anti_symmetric(simplify=False) is False assert m.is_anti_symmetric(simplify=lambda x: x) is False m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in m]) assert m.is_anti_symmetric(simplify=False) is True m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in [S.One] + list(m)[1:]]) assert m.is_anti_symmetric() is False def test_diagonal_symmetrical(): m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0]) assert not m.is_diagonal() assert m.is_symmetric() assert m.is_symmetric(simplify=False) m = PropertiesOnlyMatrix(2, 2, [1, 0, 0, 1]) assert m.is_diagonal() m = PropertiesOnlyMatrix(3, 3, diag(1, 2, 3)) assert m.is_diagonal() assert m.is_symmetric() m = PropertiesOnlyMatrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3]) assert m == diag(1, 2, 3) m = PropertiesOnlyMatrix(2, 3, zeros(2, 3)) assert not m.is_symmetric() assert m.is_diagonal() m = PropertiesOnlyMatrix(((5, 0), (0, 6), (0, 0))) assert m.is_diagonal() m = PropertiesOnlyMatrix(((5, 0, 0), (0, 6, 0))) assert m.is_diagonal() m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3]) assert m.is_symmetric() assert not m.is_symmetric(simplify=False) assert m.expand().is_symmetric(simplify=False) def test_is_hermitian(): a = PropertiesOnlyMatrix([[1, I], [-I, 1]]) assert a.is_hermitian a = PropertiesOnlyMatrix([[2*I, I], [-I, 1]]) assert a.is_hermitian is False a = PropertiesOnlyMatrix([[x, I], [-I, 1]]) assert a.is_hermitian is None a = PropertiesOnlyMatrix([[x, 1], [-I, 1]]) assert a.is_hermitian is False def test_is_Identity(): assert eye_Properties(3).is_Identity assert not PropertiesOnlyMatrix(zeros(3)).is_Identity assert not PropertiesOnlyMatrix(ones(3)).is_Identity # issue 6242 assert not PropertiesOnlyMatrix([[1, 0, 0]]).is_Identity def test_is_symbolic(): a = PropertiesOnlyMatrix([[x, x], [x, x]]) assert a.is_symbolic() is True a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, 7, 8]]) assert a.is_symbolic() is False a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, x, 8]]) assert a.is_symbolic() is True a = PropertiesOnlyMatrix([[1, x, 3]]) assert a.is_symbolic() is True a = PropertiesOnlyMatrix([[1, 2, 3]]) assert a.is_symbolic() is False a = PropertiesOnlyMatrix([[1], [x], [3]]) assert a.is_symbolic() is True a = PropertiesOnlyMatrix([[1], [2], [3]]) assert a.is_symbolic() is False def test_is_upper(): a = PropertiesOnlyMatrix([[1, 2, 3]]) assert a.is_upper is True a = PropertiesOnlyMatrix([[1], [2], [3]]) assert a.is_upper is False def test_is_lower(): a = PropertiesOnlyMatrix([[1, 2, 3]]) assert a.is_lower is False a = PropertiesOnlyMatrix([[1], [2], [3]]) assert a.is_lower is True def test_is_square(): m = PropertiesOnlyMatrix([[1],[1]]) m2 = PropertiesOnlyMatrix([[2,2],[2,2]]) assert not m.is_square assert m2.is_square def test_is_symmetric(): m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0]) assert m.is_symmetric() m = PropertiesOnlyMatrix(2, 2, [0, 1, 0, 1]) assert not m.is_symmetric() def test_is_hessenberg(): A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]]) assert A.is_upper_hessenberg A = PropertiesOnlyMatrix(3, 3, [3, 2, 0, 4, 4, 1, 1, 5, 2]) assert A.is_lower_hessenberg A = PropertiesOnlyMatrix(3, 3, [3, 2, -1, 4, 4, 1, 1, 5, 2]) assert A.is_lower_hessenberg is False assert A.is_upper_hessenberg is False A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]]) assert not A.is_upper_hessenberg def test_is_zero(): assert PropertiesOnlyMatrix(0, 0, []).is_zero assert PropertiesOnlyMatrix([[0, 0], [0, 0]]).is_zero assert PropertiesOnlyMatrix(zeros(3, 4)).is_zero assert not PropertiesOnlyMatrix(eye(3)).is_zero assert PropertiesOnlyMatrix([[x, 0], [0, 0]]).is_zero == None assert PropertiesOnlyMatrix([[x, 1], [0, 0]]).is_zero == False a = Symbol('a', nonzero=True) assert PropertiesOnlyMatrix([[a, 0], [0, 0]]).is_zero == False def test_values(): assert set(PropertiesOnlyMatrix(2,2,[0,1,2,3]).values()) == set([1,2,3]) x = Symbol('x', real=True) assert set(PropertiesOnlyMatrix(2,2,[x,0,0,1]).values()) == set([x,1]) # OperationsOnlyMatrix tests def test_applyfunc(): m0 = OperationsOnlyMatrix(eye(3)) assert m0.applyfunc(lambda x: 2*x) == eye(3)*2 assert m0.applyfunc(lambda x: 0) == zeros(3) assert m0.applyfunc(lambda x: 1) == ones(3) def test_adjoint(): dat = [[0, I], [1, 0]] ans = OperationsOnlyMatrix([[0, 1], [-I, 0]]) assert ans.adjoint() == Matrix(dat) def test_as_real_imag(): m1 = OperationsOnlyMatrix(2,2,[1,2,3,4]) m3 = OperationsOnlyMatrix(2,2,[1+S.ImaginaryUnit,2+2*S.ImaginaryUnit,3+3*S.ImaginaryUnit,4+4*S.ImaginaryUnit]) a,b = m3.as_real_imag() assert a == m1 assert b == m1 def test_conjugate(): M = OperationsOnlyMatrix([[0, I, 5], [1, 2, 0]]) assert M.T == Matrix([[0, 1], [I, 2], [5, 0]]) assert M.C == Matrix([[0, -I, 5], [1, 2, 0]]) assert M.C == M.conjugate() assert M.H == M.T.C assert M.H == Matrix([[ 0, 1], [-I, 2], [ 5, 0]]) def test_doit(): a = OperationsOnlyMatrix([[Add(x,x, evaluate=False)]]) assert a[0] != 2*x assert a.doit() == Matrix([[2*x]]) def test_evalf(): a = OperationsOnlyMatrix(2, 1, [sqrt(5), 6]) assert all(a.evalf()[i] == a[i].evalf() for i in range(2)) assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2)) assert all(a.n(2)[i] == a[i].n(2) for i in range(2)) def test_expand(): m0 = OperationsOnlyMatrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]]) # Test if expand() returns a matrix m1 = m0.expand() assert m1 == Matrix( [[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]]) a = Symbol('a', real=True) assert OperationsOnlyMatrix(1, 1, [exp(I*a)]).expand(complex=True) == \ Matrix([cos(a) + I*sin(a)]) def test_refine(): m0 = OperationsOnlyMatrix([[Abs(x)**2, sqrt(x**2)], [sqrt(x**2)*Abs(y)**2, sqrt(y**2)*Abs(x)**2]]) m1 = m0.refine(Q.real(x) & Q.real(y)) assert m1 == Matrix([[x**2, Abs(x)], [y**2*Abs(x), x**2*Abs(y)]]) m1 = m0.refine(Q.positive(x) & Q.positive(y)) assert m1 == Matrix([[x**2, x], [x*y**2, x**2*y]]) m1 = m0.refine(Q.negative(x) & Q.negative(y)) assert m1 == Matrix([[x**2, -x], [-x*y**2, -x**2*y]]) def test_replace(): F, G = symbols('F, G', cls=Function) K = OperationsOnlyMatrix(2, 2, lambda i, j: G(i+j)) M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j)) N = M.replace(F, G) assert N == K def test_replace_map(): F, G = symbols('F, G', cls=Function) K = OperationsOnlyMatrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1) \ : G(1)}), (G(2), {F(2): G(2)})]) M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j)) N = M.replace(F, G, True) assert N == K def test_simplify(): n = Symbol('n') f = Function('f') M = OperationsOnlyMatrix([[ 1/x + 1/y, (x + x*y) / x ], [ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]]) assert M.simplify() == Matrix([[ (x + y)/(x * y), 1 + y ], [ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]]) eq = (1 + x)**2 M = OperationsOnlyMatrix([[eq]]) assert M.simplify() == Matrix([[eq]]) assert M.simplify(ratio=oo) == Matrix([[eq.simplify(ratio=oo)]]) def test_subs(): assert OperationsOnlyMatrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]]) assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \ Matrix([[-1, 2], [-3, 4]]) assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \ Matrix([[-1, 2], [-3, 4]]) assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \ Matrix([[-1, 2], [-3, 4]]) assert OperationsOnlyMatrix([[x*y]]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \ Matrix([[(x - 1)*(y - 1)]]) def test_trace(): M = OperationsOnlyMatrix([[1, 0, 0], [0, 5, 0], [0, 0, 8]]) assert M.trace() == 14 def test_xreplace(): assert OperationsOnlyMatrix([[1, x], [x, 4]]).xreplace({x: 5}) == \ Matrix([[1, 5], [5, 4]]) assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).xreplace({x: -1, y: -2}) == \ Matrix([[-1, 2], [-3, 4]]) def test_permute(): a = OperationsOnlyMatrix(3, 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) raises(IndexError, lambda: a.permute([[0,5]])) b = a.permute_rows([[0, 2], [0, 1]]) assert a.permute([[0, 2], [0, 1]]) == b == Matrix([ [5, 6, 7, 8], [9, 10, 11, 12], [1, 2, 3, 4]]) b = a.permute_cols([[0, 2], [0, 1]]) assert a.permute([[0, 2], [0, 1]], orientation='cols') == b ==\ Matrix([ [ 2, 3, 1, 4], [ 6, 7, 5, 8], [10, 11, 9, 12]]) b = a.permute_cols([[0, 2], [0, 1]], direction='backward') assert a.permute([[0, 2], [0, 1]], orientation='cols', direction='backward') == b ==\ Matrix([ [ 3, 1, 2, 4], [ 7, 5, 6, 8], [11, 9, 10, 12]]) assert a.permute([1, 2, 0, 3]) == Matrix([ [5, 6, 7, 8], [9, 10, 11, 12], [1, 2, 3, 4]]) from sympy.combinatorics import Permutation assert a.permute(Permutation([1, 2, 0, 3])) == Matrix([ [5, 6, 7, 8], [9, 10, 11, 12], [1, 2, 3, 4]]) # ArithmeticOnlyMatrix tests def test_abs(): m = ArithmeticOnlyMatrix([[1, -2], [x, y]]) assert abs(m) == ArithmeticOnlyMatrix([[1, 2], [Abs(x), Abs(y)]]) def test_add(): m = ArithmeticOnlyMatrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]]) assert m + m == ArithmeticOnlyMatrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]]) n = ArithmeticOnlyMatrix(1, 2, [1, 2]) raises(ShapeError, lambda: m + n) def test_multiplication(): a = ArithmeticOnlyMatrix(( (1, 2), (3, 1), (0, 6), )) b = ArithmeticOnlyMatrix(( (1, 2), (3, 0), )) raises(ShapeError, lambda: b*a) raises(TypeError, lambda: a*{}) c = a*b assert c[0, 0] == 7 assert c[0, 1] == 2 assert c[1, 0] == 6 assert c[1, 1] == 6 assert c[2, 0] == 18 assert c[2, 1] == 0 try: eval('c = a @ b') except SyntaxError: pass else: assert c[0, 0] == 7 assert c[0, 1] == 2 assert c[1, 0] == 6 assert c[1, 1] == 6 assert c[2, 0] == 18 assert c[2, 1] == 0 h = a.multiply_elementwise(c) assert h == matrix_multiply_elementwise(a, c) assert h[0, 0] == 7 assert h[0, 1] == 4 assert h[1, 0] == 18 assert h[1, 1] == 6 assert h[2, 0] == 0 assert h[2, 1] == 0 raises(ShapeError, lambda: a.multiply_elementwise(b)) c = b * Symbol("x") assert isinstance(c, ArithmeticOnlyMatrix) assert c[0, 0] == x assert c[0, 1] == 2*x assert c[1, 0] == 3*x assert c[1, 1] == 0 c2 = x * b assert c == c2 c = 5 * b assert isinstance(c, ArithmeticOnlyMatrix) assert c[0, 0] == 5 assert c[0, 1] == 2*5 assert c[1, 0] == 3*5 assert c[1, 1] == 0 try: eval('c = 5 @ b') except SyntaxError: pass else: assert isinstance(c, ArithmeticOnlyMatrix) assert c[0, 0] == 5 assert c[0, 1] == 2*5 assert c[1, 0] == 3*5 assert c[1, 1] == 0 def test_matmul(): a = Matrix([[1, 2], [3, 4]]) assert a.__matmul__(2) == NotImplemented assert a.__rmatmul__(2) == NotImplemented #This is done this way because @ is only supported in Python 3.5+ #To check 2@a case try: eval('2 @ a') except SyntaxError: pass except TypeError: #TypeError is raised in case of NotImplemented is returned pass #Check a@2 case try: eval('a @ 2') except SyntaxError: pass except TypeError: #TypeError is raised in case of NotImplemented is returned pass def test_power(): raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2) A = ArithmeticOnlyMatrix([[2, 3], [4, 5]]) assert (A**5)[:] == (6140, 8097, 10796, 14237) A = ArithmeticOnlyMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]]) assert (A**3)[:] == (290, 262, 251, 448, 440, 368, 702, 954, 433) assert A**0 == eye(3) assert A**1 == A assert (ArithmeticOnlyMatrix([[2]]) ** 100)[0, 0] == 2**100 assert ArithmeticOnlyMatrix([[1, 2], [3, 4]])**Integer(2) == ArithmeticOnlyMatrix([[7, 10], [15, 22]]) def test_neg(): n = ArithmeticOnlyMatrix(1, 2, [1, 2]) assert -n == ArithmeticOnlyMatrix(1, 2, [-1, -2]) def test_sub(): n = ArithmeticOnlyMatrix(1, 2, [1, 2]) assert n - n == ArithmeticOnlyMatrix(1, 2, [0, 0]) def test_div(): n = ArithmeticOnlyMatrix(1, 2, [1, 2]) assert n/2 == ArithmeticOnlyMatrix(1, 2, [S(1)/2, S(2)/2]) # DeterminantOnlyMatrix tests def test_det(): a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6]) raises(NonSquareMatrixError, lambda: a.det()) z = zeros_Determinant(2) ey = eye_Determinant(2) assert z.det() == 0 assert ey.det() == 1 x = Symbol('x') a = DeterminantOnlyMatrix(0,0,[]) b = DeterminantOnlyMatrix(1,1,[5]) c = DeterminantOnlyMatrix(2,2,[1,2,3,4]) d = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,8]) e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14]) # the method keyword for `det` doesn't kick in until 4x4 matrices, # so there is no need to test all methods on smaller ones assert a.det() == 1 assert b.det() == 5 assert c.det() == -2 assert d.det() == 3 assert e.det() == 4*x - 24 assert e.det(method='bareiss') == 4*x - 24 assert e.det(method='berkowitz') == 4*x - 24 raises(ValueError, lambda: e.det(iszerofunc="test")) def test_adjugate(): x = Symbol('x') e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14]) adj = Matrix([ [ 4, -8, 4, 0], [ 76, -14*x - 68, 14*x - 8, -4*x + 24], [-122, 17*x + 142, -21*x + 4, 8*x - 48], [ 48, -4*x - 72, 8*x, -4*x + 24]]) assert e.adjugate() == adj assert e.adjugate(method='bareiss') == adj assert e.adjugate(method='berkowitz') == adj a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6]) raises(NonSquareMatrixError, lambda: a.adjugate()) def test_cofactor_and_minors(): x = Symbol('x') e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14]) m = Matrix([ [ x, 1, 3], [ 2, 9, 11], [12, 13, 14]]) cm = Matrix([ [ 4, 76, -122, 48], [-8, -14*x - 68, 17*x + 142, -4*x - 72], [ 4, 14*x - 8, -21*x + 4, 8*x], [ 0, -4*x + 24, 8*x - 48, -4*x + 24]]) sub = Matrix([ [x, 1, 2], [4, 5, 6], [2, 9, 10]]) assert e.minor_submatrix(1,2) == m assert e.minor_submatrix(-1,-1) == sub assert e.minor(1,2) == -17*x - 142 assert e.cofactor(1,2) == 17*x + 142 assert e.cofactor_matrix() == cm assert e.cofactor_matrix(method="bareiss") == cm assert e.cofactor_matrix(method="berkowitz") == cm raises(ValueError, lambda: e.cofactor(4,5)) raises(ValueError, lambda: e.minor(4,5)) raises(ValueError, lambda: e.minor_submatrix(4,5)) a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6]) assert a.minor_submatrix(0,0) == Matrix([[5, 6]]) raises(ValueError, lambda: DeterminantOnlyMatrix(0,0,[]).minor_submatrix(0,0)) raises(NonSquareMatrixError, lambda: a.cofactor(0,0)) raises(NonSquareMatrixError, lambda: a.minor(0,0)) raises(NonSquareMatrixError, lambda: a.cofactor_matrix()) def test_charpoly(): x, y = Symbol('x'), Symbol('y') m = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,9]) assert eye_Determinant(3).charpoly(x) == Poly((x - 1)**3, x) assert eye_Determinant(3).charpoly(y) == Poly((y - 1)**3, y) assert m.charpoly() == Poly(x**3 - 15*x**2 - 18*x, x) raises(NonSquareMatrixError, lambda: Matrix([[1], [2]]).charpoly()) # ReductionsOnlyMatrix tests def test_row_op(): e = eye_Reductions(3) raises(ValueError, lambda: e.elementary_row_op("abc")) raises(ValueError, lambda: e.elementary_row_op()) raises(ValueError, lambda: e.elementary_row_op('n->kn', row=5, k=5)) raises(ValueError, lambda: e.elementary_row_op('n->kn', row=-5, k=5)) raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=5)) raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=5, row2=1)) raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=-5, row2=1)) raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=-5)) raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=5, k=5)) raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=5, row2=1, k=5)) raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=-5, row2=1, k=5)) raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=-5, k=5)) raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=1, k=5)) # test various ways to set arguments assert e.elementary_row_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]]) assert e.elementary_row_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]]) assert e.elementary_row_op("n->kn", row=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]]) assert e.elementary_row_op("n->kn", row1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]]) assert e.elementary_row_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) assert e.elementary_row_op("n<->m", row1=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) assert e.elementary_row_op("n<->m", row=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) assert e.elementary_row_op("n->n+km", 0, 5, 1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]]) assert e.elementary_row_op("n->n+km", row=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]]) assert e.elementary_row_op("n->n+km", row1=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]]) # make sure the matrix doesn't change size a = ReductionsOnlyMatrix(2, 3, [0]*6) assert a.elementary_row_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6) assert a.elementary_row_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6) assert a.elementary_row_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6) def test_col_op(): e = eye_Reductions(3) raises(ValueError, lambda: e.elementary_col_op("abc")) raises(ValueError, lambda: e.elementary_col_op()) raises(ValueError, lambda: e.elementary_col_op('n->kn', col=5, k=5)) raises(ValueError, lambda: e.elementary_col_op('n->kn', col=-5, k=5)) raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=5)) raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=5, col2=1)) raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=-5, col2=1)) raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=-5)) raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=5, k=5)) raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=5, col2=1, k=5)) raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=-5, col2=1, k=5)) raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=-5, k=5)) raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=1, k=5)) # test various ways to set arguments assert e.elementary_col_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]]) assert e.elementary_col_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]]) assert e.elementary_col_op("n->kn", col=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]]) assert e.elementary_col_op("n->kn", col1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]]) assert e.elementary_col_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) assert e.elementary_col_op("n<->m", col1=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) assert e.elementary_col_op("n<->m", col=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]) assert e.elementary_col_op("n->n+km", 0, 5, 1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]]) assert e.elementary_col_op("n->n+km", col=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]]) assert e.elementary_col_op("n->n+km", col1=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]]) # make sure the matrix doesn't change size a = ReductionsOnlyMatrix(2, 3, [0]*6) assert a.elementary_col_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6) assert a.elementary_col_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6) assert a.elementary_col_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6) def test_is_echelon(): zro = zeros_Reductions(3) ident = eye_Reductions(3) assert zro.is_echelon assert ident.is_echelon a = ReductionsOnlyMatrix(0, 0, []) assert a.is_echelon a = ReductionsOnlyMatrix(2, 3, [3, 2, 1, 0, 0, 6]) assert a.is_echelon a = ReductionsOnlyMatrix(2, 3, [0, 0, 6, 3, 2, 1]) assert not a.is_echelon x = Symbol('x') a = ReductionsOnlyMatrix(3, 1, [x, 0, 0]) assert a.is_echelon a = ReductionsOnlyMatrix(3, 1, [x, x, 0]) assert not a.is_echelon a = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0]) assert not a.is_echelon def test_echelon_form(): # echelon form is not unique, but the result # must be row-equivalent to the original matrix # and it must be in echelon form. a = zeros_Reductions(3) e = eye_Reductions(3) # we can assume the zero matrix and the identity matrix shouldn't change assert a.echelon_form() == a assert e.echelon_form() == e a = ReductionsOnlyMatrix(0, 0, []) assert a.echelon_form() == a a = ReductionsOnlyMatrix(1, 1, [5]) assert a.echelon_form() == a # now we get to the real tests def verify_row_null_space(mat, rows, nulls): for v in nulls: assert all(t.is_zero for t in a_echelon*v) for v in rows: if not all(t.is_zero for t in v): assert not all(t.is_zero for t in a_echelon*v.transpose()) a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9]) nulls = [Matrix([ [ 1], [-2], [ 1]])] rows = [a[i,:] for i in range(a.rows)] a_echelon = a.echelon_form() assert a_echelon.is_echelon verify_row_null_space(a, rows, nulls) a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 8]) nulls = [] rows = [a[i,:] for i in range(a.rows)] a_echelon = a.echelon_form() assert a_echelon.is_echelon verify_row_null_space(a, rows, nulls) a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 2, 1, 3]) nulls = [Matrix([ [-S(1)/2], [ 1], [ 0]]), Matrix([ [-S(3)/2], [ 0], [ 1]])] rows = [a[i,:] for i in range(a.rows)] a_echelon = a.echelon_form() assert a_echelon.is_echelon verify_row_null_space(a, rows, nulls) # this one requires a row swap a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 1, 1, 3]) nulls = [Matrix([ [ 0], [ -3], [ 1]])] rows = [a[i,:] for i in range(a.rows)] a_echelon = a.echelon_form() assert a_echelon.is_echelon verify_row_null_space(a, rows, nulls) a = ReductionsOnlyMatrix(3, 3, [0, 3, 3, 0, 2, 2, 0, 1, 1]) nulls = [Matrix([ [1], [0], [0]]), Matrix([ [ 0], [-1], [ 1]])] rows = [a[i,:] for i in range(a.rows)] a_echelon = a.echelon_form() assert a_echelon.is_echelon verify_row_null_space(a, rows, nulls) a = ReductionsOnlyMatrix(2, 3, [2, 2, 3, 3, 3, 0]) nulls = [Matrix([ [-1], [1], [0]])] rows = [a[i,:] for i in range(a.rows)] a_echelon = a.echelon_form() assert a_echelon.is_echelon verify_row_null_space(a, rows, nulls) def test_rref(): e = ReductionsOnlyMatrix(0, 0, []) assert e.rref(pivots=False) == e e = ReductionsOnlyMatrix(1, 1, [1]) a = ReductionsOnlyMatrix(1, 1, [5]) assert e.rref(pivots=False) == a.rref(pivots=False) == e a = ReductionsOnlyMatrix(3, 1, [1, 2, 3]) assert a.rref(pivots=False) == Matrix([[1], [0], [0]]) a = ReductionsOnlyMatrix(1, 3, [1, 2, 3]) assert a.rref(pivots=False) == Matrix([[1, 2, 3]]) a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9]) assert a.rref(pivots=False) == Matrix([ [1, 0, -1], [0, 1, 2], [0, 0, 0]]) a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 1, 2, 3, 1, 2, 3]) b = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 0, 0, 0, 0, 0, 0]) c = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0]) d = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 0, 0, 0, 1, 2, 3]) assert a.rref(pivots=False) == \ b.rref(pivots=False) == \ c.rref(pivots=False) == \ d.rref(pivots=False) == b e = eye_Reductions(3) z = zeros_Reductions(3) assert e.rref(pivots=False) == e assert z.rref(pivots=False) == z a = ReductionsOnlyMatrix([ [ 0, 0, 1, 2, 2, -5, 3], [-1, 5, 2, 2, 1, -7, 5], [ 0, 0, -2, -3, -3, 8, -5], [-1, 5, 0, -1, -2, 1, 0]]) mat, pivot_offsets = a.rref() assert mat == Matrix([ [1, -5, 0, 0, 1, 1, -1], [0, 0, 1, 0, 0, -1, 1], [0, 0, 0, 1, 1, -2, 1], [0, 0, 0, 0, 0, 0, 0]]) assert pivot_offsets == (0, 2, 3) a = ReductionsOnlyMatrix([[S(1)/19, S(1)/5, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [ 12, 13, 14, 15]]) assert a.rref(pivots=False) == Matrix([ [1, 0, 0, -S(76)/157], [0, 1, 0, -S(5)/157], [0, 0, 1, S(238)/157], [0, 0, 0, 0]]) x = Symbol('x') a = ReductionsOnlyMatrix(2, 3, [x, 1, 1, sqrt(x), x, 1]) for i, j in zip(a.rref(pivots=False), [1, 0, sqrt(x)*(-x + 1)/(-x**(S(5)/2) + x), 0, 1, 1/(sqrt(x) + x + 1)]): assert simplify(i - j).is_zero # SpecialOnlyMatrix tests def test_eye(): assert list(SpecialOnlyMatrix.eye(2,2)) == [1, 0, 0, 1] assert list(SpecialOnlyMatrix.eye(2)) == [1, 0, 0, 1] assert type(SpecialOnlyMatrix.eye(2)) == SpecialOnlyMatrix assert type(SpecialOnlyMatrix.eye(2, cls=Matrix)) == Matrix def test_ones(): assert list(SpecialOnlyMatrix.ones(2,2)) == [1, 1, 1, 1] assert list(SpecialOnlyMatrix.ones(2)) == [1, 1, 1, 1] assert SpecialOnlyMatrix.ones(2,3) == Matrix([[1, 1, 1], [1, 1, 1]]) assert type(SpecialOnlyMatrix.ones(2)) == SpecialOnlyMatrix assert type(SpecialOnlyMatrix.ones(2, cls=Matrix)) == Matrix def test_zeros(): assert list(SpecialOnlyMatrix.zeros(2,2)) == [0, 0, 0, 0] assert list(SpecialOnlyMatrix.zeros(2)) == [0, 0, 0, 0] assert SpecialOnlyMatrix.zeros(2,3) == Matrix([[0, 0, 0], [0, 0, 0]]) assert type(SpecialOnlyMatrix.zeros(2)) == SpecialOnlyMatrix assert type(SpecialOnlyMatrix.zeros(2, cls=Matrix)) == Matrix def test_diag_make(): diag = SpecialOnlyMatrix.diag a = Matrix([[1, 2], [2, 3]]) b = Matrix([[3, x], [y, 3]]) c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]]) assert diag(a, b, b) == Matrix([ [1, 2, 0, 0, 0, 0], [2, 3, 0, 0, 0, 0], [0, 0, 3, x, 0, 0], [0, 0, y, 3, 0, 0], [0, 0, 0, 0, 3, x], [0, 0, 0, 0, y, 3], ]) assert diag(a, b, c) == Matrix([ [1, 2, 0, 0, 0, 0, 0], [2, 3, 0, 0, 0, 0, 0], [0, 0, 3, x, 0, 0, 0], [0, 0, y, 3, 0, 0, 0], [0, 0, 0, 0, 3, x, 3], [0, 0, 0, 0, y, 3, z], [0, 0, 0, 0, x, y, z], ]) assert diag(a, c, b) == Matrix([ [1, 2, 0, 0, 0, 0, 0], [2, 3, 0, 0, 0, 0, 0], [0, 0, 3, x, 3, 0, 0], [0, 0, y, 3, z, 0, 0], [0, 0, x, y, z, 0, 0], [0, 0, 0, 0, 0, 3, x], [0, 0, 0, 0, 0, y, 3], ]) a = Matrix([x, y, z]) b = Matrix([[1, 2], [3, 4]]) c = Matrix([[5, 6]]) # this "wandering diagonal" is what makes this # a block diagonal where each block is independent # of the others assert diag(a, 7, b, c) == Matrix([ [x, 0, 0, 0, 0, 0], [y, 0, 0, 0, 0, 0], [z, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0], [0, 0, 1, 2, 0, 0], [0, 0, 3, 4, 0, 0], [0, 0, 0, 0, 5, 6]]) raises(ValueError, lambda: diag(a, 7, b, c, rows=5)) assert diag(1) == Matrix([[1]]) assert diag(1, rows=2) == Matrix([[1, 0], [0, 0]]) assert diag(1, cols=2) == Matrix([[1, 0], [0, 0]]) assert diag(1, rows=3, cols=2) == Matrix([[1, 0], [0, 0], [0, 0]]) assert diag(*[2, 3]) == Matrix([ [2, 0], [0, 3]]) assert diag(Matrix([2, 3])) == Matrix([ [2], [3]]) assert diag([1, [2, 3], 4], unpack=False) == \ diag([[1], [2, 3], [4]], unpack=False) == Matrix([ [1, 0], [2, 3], [4, 0]]) assert type(diag(1)) == SpecialOnlyMatrix assert type(diag(1, cls=Matrix)) == Matrix assert Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3) assert Matrix.diag([1, 2, 3], unpack=False).shape == (3, 1) assert Matrix.diag([[1, 2, 3]]).shape == (3, 1) assert Matrix.diag([[1, 2, 3]], unpack=False).shape == (1, 3) assert Matrix.diag([[[1, 2, 3]]]).shape == (1, 3) # kerning can be used to move the starting point assert Matrix.diag(ones(0, 2), 1, 2) == Matrix([ [0, 0, 1, 0], [0, 0, 0, 2]]) assert Matrix.diag(ones(2, 0), 1, 2) == Matrix([ [0, 0], [0, 0], [1, 0], [0, 2]]) def test_diagonal(): m = Matrix(3, 3, range(9)) d = m.diagonal() assert d == m.diagonal(0) assert tuple(d) == (0, 4, 8) assert tuple(m.diagonal(1)) == (1, 5) assert tuple(m.diagonal(-1)) == (3, 7) assert tuple(m.diagonal(2)) == (2,) assert type(m.diagonal()) == type(m) s = SparseMatrix(3, 3, {(1, 1): 1}) assert type(s.diagonal()) == type(s) assert type(m) != type(s) raises(ValueError, lambda: m.diagonal(3)) raises(ValueError, lambda: m.diagonal(-3)) raises(ValueError, lambda: m.diagonal(pi)) def test_jordan_block(): assert SpecialOnlyMatrix.jordan_block(3, 2) == SpecialOnlyMatrix.jordan_block(3, eigenvalue=2) \ == SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) \ == SpecialOnlyMatrix.jordan_block(3, 2, band='upper') \ == SpecialOnlyMatrix.jordan_block( size=3, eigenval=2, eigenvalue=2) \ == Matrix([ [2, 1, 0], [0, 2, 1], [0, 0, 2]]) assert SpecialOnlyMatrix.jordan_block(3, 2, band='lower') == Matrix([ [2, 0, 0], [1, 2, 0], [0, 1, 2]]) # missing eigenvalue raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(2)) # non-integral size raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(3.5, 2)) # size not specified raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(eigenvalue=2)) # inconsistent eigenvalue raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block( eigenvalue=2, eigenval=4)) # Deprecated feature raises(SymPyDeprecationWarning, lambda: SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2)) raises(SymPyDeprecationWarning, lambda: SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2)) with warns_deprecated_sympy(): assert SpecialOnlyMatrix.jordan_block(3, 2) == \ SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2) == \ SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2) with warns_deprecated_sympy(): assert SpecialOnlyMatrix.jordan_block( rows=4, cols=3, eigenvalue=2) == \ Matrix([ [2, 1, 0], [0, 2, 1], [0, 0, 2], [0, 0, 0]]) # Using alias keyword assert SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) == \ SpecialOnlyMatrix.jordan_block(size=3, eigenval=2) # SubspaceOnlyMatrix tests def test_columnspace(): m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5], [-2, -5, 1, -1, -8], [ 0, -3, 3, 4, 1], [ 3, 6, 0, -7, 2]]) basis = m.columnspace() assert basis[0] == Matrix([1, -2, 0, 3]) assert basis[1] == Matrix([2, -5, -3, 6]) assert basis[2] == Matrix([2, -1, 4, -7]) assert len(basis) == 3 assert Matrix.hstack(m, *basis).columnspace() == basis def test_rowspace(): m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5], [-2, -5, 1, -1, -8], [ 0, -3, 3, 4, 1], [ 3, 6, 0, -7, 2]]) basis = m.rowspace() assert basis[0] == Matrix([[1, 2, 0, 2, 5]]) assert basis[1] == Matrix([[0, -1, 1, 3, 2]]) assert basis[2] == Matrix([[0, 0, 0, 5, 5]]) assert len(basis) == 3 def test_nullspace(): m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5], [-2, -5, 1, -1, -8], [ 0, -3, 3, 4, 1], [ 3, 6, 0, -7, 2]]) basis = m.nullspace() assert basis[0] == Matrix([-2, 1, 1, 0, 0]) assert basis[1] == Matrix([-1, -1, 0, -1, 1]) # make sure the null space is really gets zeroed assert all(e.is_zero for e in m*basis[0]) assert all(e.is_zero for e in m*basis[1]) def test_orthogonalize(): m = Matrix([[1, 2], [3, 4]]) assert m.orthogonalize(Matrix([[2], [1]])) == [Matrix([[2], [1]])] assert m.orthogonalize(Matrix([[2], [1]]), normalize=True) == [Matrix([[2*sqrt(5)/5], [sqrt(5)/5]])] assert m.orthogonalize(Matrix([[1], [2]]), Matrix([[-1], [4]])) == [Matrix([[1], [2]]), Matrix([[-S(12)/5], [S(6)/5]])] assert m.orthogonalize(Matrix([[0], [0]]), Matrix([[-1], [4]])) == [Matrix([[-1], [4]])] assert m.orthogonalize(Matrix([[0], [0]])) == [] n = Matrix([[9, 1, 9], [3, 6, 10], [8, 5, 2]]) vecs = [Matrix([[-5], [1]]), Matrix([[-5], [2]]), Matrix([[-5], [-2]])] assert n.orthogonalize(*vecs) == [Matrix([[-5], [1]]), Matrix([[S(5)/26], [S(25)/26]])] # EigenOnlyMatrix tests def test_eigenvals(): M = EigenOnlyMatrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]]) assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1} # if we cannot factor the char poly, we raise an error m = Matrix([ [3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]]) raises(MatrixError, lambda: m.eigenvals()) def test_eigenvects(): M = EigenOnlyMatrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]]) vecs = M.eigenvects() for val, mult, vec_list in vecs: assert len(vec_list) == 1 assert M*vec_list[0] == val*vec_list[0] def test_left_eigenvects(): M = EigenOnlyMatrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]]) vecs = M.left_eigenvects() for val, mult, vec_list in vecs: assert len(vec_list) == 1 assert vec_list[0]*M == val*vec_list[0] def test_diagonalize(): m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0]) raises(MatrixError, lambda: m.diagonalize(reals_only=True)) P, D = m.diagonalize() assert D.is_diagonal() assert D == Matrix([ [-I, 0], [ 0, I]]) # make sure we use floats out if floats are passed in m = EigenOnlyMatrix(2, 2, [0, .5, .5, 0]) P, D = m.diagonalize() assert all(isinstance(e, Float) for e in D.values()) assert all(isinstance(e, Float) for e in P.values()) _, D2 = m.diagonalize(reals_only=True) assert D == D2 def test_is_diagonalizable(): a, b, c = symbols('a b c') m = EigenOnlyMatrix(2, 2, [a, c, c, b]) assert m.is_symmetric() assert m.is_diagonalizable() assert not EigenOnlyMatrix(2, 2, [1, 1, 0, 1]).is_diagonalizable() m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0]) assert m.is_diagonalizable() assert not m.is_diagonalizable(reals_only=True) def test_jordan_form(): m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10]) raises(NonSquareMatrixError, lambda: m.jordan_form()) # the next two tests test the cases where the old # algorithm failed due to the fact that the block structure can # *NOT* be determined from algebraic and geometric multiplicity alone # This can be seen most easily when one lets compute the J.c.f. of a matrix that # is in J.c.f already. m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, 2 ]) P, J = m.jordan_form() assert m == J m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0, 0, 2, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2 ]) P, J = m.jordan_form() assert m == J A = Matrix([[ 2, 4, 1, 0], [-4, 2, 0, 1], [ 0, 0, 2, 4], [ 0, 0, -4, 2]]) P, J = A.jordan_form() assert simplify(P*J*P.inv()) == A assert EigenOnlyMatrix(1,1,[1]).jordan_form() == (Matrix([1]), Matrix([1])) assert EigenOnlyMatrix(1,1,[1]).jordan_form(calc_transform=False) == Matrix([1]) # make sure if we cannot factor the characteristic polynomial, we raise an error m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]]) raises(MatrixError, lambda: m.jordan_form()) # make sure that if the input has floats, the output does too m = Matrix([ [ 0.6875, 0.125 + 0.1875*sqrt(3)], [0.125 + 0.1875*sqrt(3), 0.3125]]) P, J = m.jordan_form() assert all(isinstance(x, Float) or x == 0 for x in P) assert all(isinstance(x, Float) or x == 0 for x in J) def test_singular_values(): x = Symbol('x', real=True) A = EigenOnlyMatrix([[0, 1*I], [2, 0]]) # if singular values can be sorted, they should be in decreasing order assert A.singular_values() == [2, 1] A = eye(3) A[1, 1] = x A[2, 2] = 5 vals = A.singular_values() # since Abs(x) cannot be sorted, test set equality assert set(vals) == set([5, 1, Abs(x)]) A = EigenOnlyMatrix([[sin(x), cos(x)], [-cos(x), sin(x)]]) vals = [sv.trigsimp() for sv in A.singular_values()] assert vals == [S(1), S(1)] A = EigenOnlyMatrix([ [2, 4], [1, 3], [0, 0], [0, 0] ]) assert A.singular_values() == \ [sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221))] assert A.T.singular_values() == \ [sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221)), 0, 0] # CalculusOnlyMatrix tests @XFAIL def test_diff(): x, y = symbols('x y') m = CalculusOnlyMatrix(2, 1, [x, y]) # TODO: currently not working as ``_MinimalMatrix`` cannot be sympified: assert m.diff(x) == Matrix(2, 1, [1, 0]) def test_integrate(): x, y = symbols('x y') m = CalculusOnlyMatrix(2, 1, [x, y]) assert m.integrate(x) == Matrix(2, 1, [x**2/2, y*x]) def test_jacobian2(): rho, phi = symbols("rho,phi") X = CalculusOnlyMatrix(3, 1, [rho*cos(phi), rho*sin(phi), rho**2]) Y = CalculusOnlyMatrix(2, 1, [rho, phi]) J = Matrix([ [cos(phi), -rho*sin(phi)], [sin(phi), rho*cos(phi)], [ 2*rho, 0], ]) assert X.jacobian(Y) == J m = CalculusOnlyMatrix(2, 2, [1, 2, 3, 4]) m2 = CalculusOnlyMatrix(4, 1, [1, 2, 3, 4]) raises(TypeError, lambda: m.jacobian(Matrix([1,2]))) raises(TypeError, lambda: m2.jacobian(m)) def test_limit(): x, y = symbols('x y') m = CalculusOnlyMatrix(2, 1, [1/x, y]) assert m.limit(x, 5) == Matrix(2, 1, [S(1)/5, y]) def test_issue_13774(): M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) v = [1,1,1] raises(TypeError, lambda: M*v) raises(TypeError, lambda: v*M) def test___eq__(): assert (EigenOnlyMatrix( [[0, 1, 1], [1, 0, 0], [1, 1, 1]]) == {}) is False
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft and contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class SubtaskInformation(Model): """ Information about an Azure Batch subtask. :param id: The id of the subtask. :type id: int :param node_info: Information about the compute node on which the subtask ran. :type node_info: :class:`ComputeNodeInformation <azure.batch.models.ComputeNodeInformation>` :param start_time: The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. :type start_time: datetime :param end_time: The time at which the subtask completed. This property is set only if the subtask is in the Completed state. :type end_time: datetime :param exit_code: The exit code of the subtask. This property is set only if the subtask is in the Completed state. :type exit_code: int :param scheduling_error: Details of any error encountered scheduling the subtask. :type scheduling_error: :class:`TaskSchedulingError <azure.batch.models.TaskSchedulingError>` :param state: The current state of the subtask. Possible values include: 'active', 'preparing', 'running', 'completed' :type state: str or :class:`TaskState <azure.batch.models.TaskState>` :param state_transition_time: The time at which the subtask entered its current state. :type state_transition_time: datetime :param previous_state: The previous state of the subtask. This property is not set if the subtask is in its initial Active state. Possible values include: 'active', 'preparing', 'running', 'completed' :type previous_state: str or :class:`TaskState <azure.batch.models.TaskState>` :param previous_state_transition_time: The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial Active state. :type previous_state_transition_time: datetime """ _attribute_map = { 'id': {'key': 'id', 'type': 'int'}, 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, 'exit_code': {'key': 'exitCode', 'type': 'int'}, 'scheduling_error': {'key': 'schedulingError', 'type': 'TaskSchedulingError'}, 'state': {'key': 'state', 'type': 'TaskState'}, 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, } def __init__(self, id=None, node_info=None, start_time=None, end_time=None, exit_code=None, scheduling_error=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None): self.id = id self.node_info = node_info self.start_time = start_time self.end_time = end_time self.exit_code = exit_code self.scheduling_error = scheduling_error self.state = state self.state_transition_time = state_transition_time self.previous_state = previous_state self.previous_state_transition_time = previous_state_transition_time
import logging import traceback import config import pathlib class Logger(logging.getLoggerClass()): def __init__(self, name, level=logging.NOTSET): super().__init__(name, level=logging.DEBUG) formatter = logging.Formatter('%(levelname)s %(asctime)s [ %(name)s ] %(message)s') self.sh = logging.StreamHandler() self.sh.setFormatter(formatter) if 'db' in config.runtime_mode: self.sh.setLevel(logging.DEBUG) else: self.sh.setLevel(logging.INFO) self.addHandler(self.sh) # \TODO: Maybe break up the logging file if it goes over 1MB # get file size # if over 1MB, then rename current logging file to '{start_date}_{end_date}_{logger_name}.log' # cut-paste into logging folder named '{logger_name}' self.fh = logging.FileHandler(str(config.log_path / (name + '.log'))) self.fh.setFormatter(formatter) self.fh.setLevel(logging.INFO) self.addHandler(self.fh) def __del__(self): self.sh.close(); self.removeHandler(self.sh) self.fh.close(); self.removeHandler(self.fh) ''' def error(self, msg): msg = msg.strip() if msg == 'None' or msg == 'N/A' or len(msg) == 0: self.exception(msg) else: self.error(msg) def critical(self, msg): msg = msg.strip() if msg == 'None' or msg == 'N/A' or len(msg) == 0: self.exception(msg) else: self.critical(msg) ''' def exception(self, msg): msg = msg.strip() msg += '\n' + traceback.format_exc() self.error(msg) def testbench(self, msg): if 'tb' not in config.runtime_mode: return self.debug(msg)
# Pyrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2021 Dan <https://github.com/delivrance> # # This file is part of Pyrogram. # # Pyrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. import html import re from typing import Optional import pyrogram from . import utils from .html import HTML BOLD_DELIM = "**" ITALIC_DELIM = "__" UNDERLINE_DELIM = "--" STRIKE_DELIM = "~~" CODE_DELIM = "`" PRE_DELIM = "```" MARKDOWN_RE = re.compile(r"({d})|\[(.+?)\]\((.+?)\)".format( d="|".join( ["".join(i) for i in [ [rf"\{j}" for j in i] for i in [ PRE_DELIM, CODE_DELIM, STRIKE_DELIM, UNDERLINE_DELIM, ITALIC_DELIM, BOLD_DELIM ] ]] ))) OPENING_TAG = "<{}>" CLOSING_TAG = "</{}>" URL_MARKUP = '<a href="{}">{}</a>' FIXED_WIDTH_DELIMS = [CODE_DELIM, PRE_DELIM] class Markdown: def __init__(self, client: Optional["pyrogram.Client"]): self.html = HTML(client) async def parse(self, text: str, strict: bool = False): if strict: text = html.escape(text) delims = set() is_fixed_width = False for i, match in enumerate(re.finditer(MARKDOWN_RE, text)): start, _ = match.span() delim, text_url, url = match.groups() full = match.group(0) if delim in FIXED_WIDTH_DELIMS: is_fixed_width = not is_fixed_width if is_fixed_width and delim not in FIXED_WIDTH_DELIMS: continue if text_url: text = utils.replace_once(text, full, URL_MARKUP.format(url, text_url), start) continue if delim == BOLD_DELIM: tag = "b" elif delim == ITALIC_DELIM: tag = "i" elif delim == UNDERLINE_DELIM: tag = "u" elif delim == STRIKE_DELIM: tag = "s" elif delim == CODE_DELIM: tag = "code" elif delim == PRE_DELIM: tag = "pre" else: continue if delim not in delims: delims.add(delim) tag = OPENING_TAG.format(tag) else: delims.remove(delim) tag = CLOSING_TAG.format(tag) text = utils.replace_once(text, delim, tag, start) return await self.html.parse(text) @staticmethod def unparse(text: str, entities: list): text = utils.add_surrogates(text) entities_offsets = [] for entity in entities: entity_type = entity.type start = entity.offset end = start + entity.length if entity_type == "bold": start_tag = end_tag = BOLD_DELIM elif entity_type == "italic": start_tag = end_tag = ITALIC_DELIM elif entity_type == "underline": start_tag = end_tag = UNDERLINE_DELIM elif entity_type == "strikethrough": start_tag = end_tag = STRIKE_DELIM elif entity_type == "code": start_tag = end_tag = CODE_DELIM elif entity_type in ("pre", "blockquote"): start_tag = end_tag = PRE_DELIM elif entity_type == "text_link": url = entity.url start_tag = "[" end_tag = f"]({url})" elif entity_type == "text_mention": user = entity.user start_tag = "[" end_tag = f"](tg://user?id={user.id})" else: continue entities_offsets.append((start_tag, start,)) entities_offsets.append((end_tag, end,)) # sorting by offset (desc) entities_offsets.sort(key=lambda x: -x[1]) for entity, offset in entities_offsets: text = text[:offset] + entity + text[offset:] return utils.remove_surrogates(text)
# Copyright 2009-2017 Ram Rachum. # This program is distributed under the MIT license. from python_toolbox import caching from python_toolbox import sequence_tools # (`PermSpace` exported to here from `perm_space.py` to avoid import loop.) class _VariationAddingMixin: '''Mixin for `PermSpace` to add variations to a perm space.''' def get_rapplied(self, sequence): '''Get a version of this `PermSpace` that has a range of `sequence`.''' if self.is_rapplied: raise TypeError('This space is already rapplied, to rapply it to a ' 'different sequence please use `.unrapplied` ' 'first.') sequence = \ sequence_tools.ensure_iterable_is_immutable_sequence(sequence) if len(sequence) != self.sequence_length: raise Exception return PermSpace( sequence, n_elements=self.n_elements, domain=self.domain, fixed_map={key: sequence[value] for key, value in self.fixed_map.items()}, degrees=self.degrees, slice_=self.canonical_slice, is_combination=self.is_combination, perm_type=self.perm_type ) # There's no `.get_recurrented` because we can't know which sequence you'd # want. If you want a recurrent perm space you need to use `.get_rapplied` # with a recurrent sequence. def get_partialled(self, n_elements): '''Get a partialled version of this `PermSpace`.''' if self.is_sliced: raise TypeError( "Can't get partial of sliced `PermSpace` directly, because " "the number of items would be different. Use `.unsliced` " "first." ) return PermSpace( self.sequence, n_elements=n_elements, domain=self.domain, fixed_map=self.fixed_map, degrees=self.degrees, slice_=None, is_combination=self.is_combination, perm_type=self.perm_type ) @caching.CachedProperty def combinationed(self): '''Get a combination version of this perm space.''' from .comb import Comb if self.is_sliced: raise TypeError( "Can't get a combinationed version of a sliced `PermSpace`" "directly, because the number of items would be different. " "Use `.unsliced` first." ) if self.is_typed: raise TypeError( "Can't convert typed `PermSpace` directly to " "combinationed, because the perm class would not be a " "subclass of `Comb`." ) if self.is_degreed: raise TypeError("Can't use degrees with combination spaces.") return PermSpace( self.sequence, n_elements=self.n_elements, domain=self.domain, fixed_map=self.fixed_map, is_combination=True, perm_type=Comb ) def get_dapplied(self, domain): '''Get a version of this `PermSpace` that has a domain of `domain`.''' from . import variations if self.is_combination: raise variations.UnallowedVariationSelectionException( {variations.Variation.DAPPLIED: True, variations.Variation.COMBINATION: True,} ) domain = sequence_tools.ensure_iterable_is_immutable_sequence(domain) if len(domain) != self.n_elements: raise Exception return PermSpace( self.sequence, n_elements=self.n_elements, domain=domain, fixed_map={domain[key]: value for key, value in self._undapplied_fixed_map}, degrees=self.degrees, slice_=self.canonical_slice, is_combination=self.is_combination, perm_type=self.perm_type ) def get_fixed(self, fixed_map): '''Get a fixed version of this `PermSpace`.''' if self.is_sliced: raise TypeError( "Can't be used on sliced perm spaces. Try " "`perm_space.unsliced.get_fixed(...)`. You may then re-slice " "the resulting space." ) combined_fixed_map = dict(self.fixed_map) for key, value in fixed_map.items(): if key in self.fixed_map: assert self.fixed_map[key] == value combined_fixed_map[key] = value return PermSpace( self.sequence, n_elements=self.n_elements, domain=self.domain, fixed_map=combined_fixed_map, degrees=self.degrees, slice_=None, is_combination=self.is_combination, perm_type=self.perm_type ) def get_degreed(self, degrees): '''Get a version of this `PermSpace` restricted to certain degrees.''' from . import variations if self.is_sliced: raise TypeError( "Can't be used on sliced perm spaces. Try " "`perm_space.unsliced.get_degreed(...)`. You may then " "re-slice the resulting space." ) if self.is_combination: raise variations.UnallowedVariationSelectionException( {variations.Variation.DEGREED: True, variations.Variation.COMBINATION: True,} ) degrees = sequence_tools.to_tuple(degrees, item_type=int) if not degrees: return self degrees_to_use = \ degrees if not self.is_degreed else set(degrees) & set(self.degrees) return PermSpace( self.sequence, n_elements=self.n_elements, domain=self.domain, fixed_map=self.fixed_map, degrees=degrees_to_use, is_combination=self.is_combination, perm_type=self.perm_type ) # There's no `get_sliced` because slicing is done using Python's normal # slice notation, e.g. perm_space[4:-7]. def get_typed(self, perm_type): ''' Get a version of this `PermSpace` where perms are of a custom type. ''' return PermSpace( self.sequence, n_elements=self.n_elements, domain=self.domain, fixed_map=self.fixed_map, degrees=self.degrees, slice_=self.canonical_slice, is_combination=self.is_combination, perm_type=perm_type )
import turbofastcrypto # The source code for this module is only available for part 2 of this challenge :) while 1: plaintext = input('> ') ciphertext = turbofastcrypto.encrypt(plaintext) print('Encrypted: ' + str(ciphertext))
from abc import abstractmethod import rastervision as rv from rastervision.core import (Config, ConfigBuilder) class AugmentorConfig(Config): def __init__(self, augmentor_type): self.augmentor_type = augmentor_type @abstractmethod def create_augmentor(self): """Create the Augmentor that this configuration represents""" pass def to_builder(self, augmentor_type): return rv._registry.get_config_builder(rv.AUGMENTOR, self.augmentor_type)(self) @staticmethod def builder(augmentor_type): return rv._registry.get_config_builder(rv.AUGMENTOR, augmentor_type)() @staticmethod def from_proto(msg): """Creates a AugmentorConfig from the specificed protobuf message """ return rv._registry.get_config_builder(rv.AUGMENTOR, msg.augmentor_type)() \ .from_proto(msg) \ .build() def update_for_command(self, command_type, experiment_config, context=[]): # Generally augmentors do not have an affect on the IO. return (self, rv.core.CommandIODefinition()) class AugmentorConfigBuilder(ConfigBuilder): pass
import os import pytest import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE'] ).get_hosts('all') @pytest.mark.skip(reason='Scenario tests not implemented yet') def test_hostname(host): assert 'instance' == host.check_output('hostname -s') @pytest.mark.skip(reason='Scenario tests not implemented yet') def test_etc_molecule_directory(host): f = host.file('/etc/molecule') assert f.is_directory assert f.user == 'root' assert f.group == 'root' assert f.mode == 0o755 @pytest.mark.skip(reason='Scenario tests not implemented yet') def test_etc_molecule_ansible_hostname_file(host): f = host.file('/etc/molecule/instance') assert f.is_file assert f.user == 'root' assert f.group == 'root' assert f.mode == 0o644
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Megatron arguments.""" import argparse import collections import os import re import time import torch import deepspeed from megatron.enums import PositionEmbeddingType import megatron from megatron.logging import log_levels def parse_args(extra_args_provider=None, defaults={}, ignore_unknown_args=False): """Parse all arguments.""" parser = argparse.ArgumentParser(description='Megatron-LM Arguments', allow_abbrev=False) # Standard arguments. parser = _add_network_size_args(parser) parser = _add_regularization_args(parser) parser = _add_training_args(parser) parser = _add_initialization_args(parser) parser = _add_learning_rate_args(parser) parser = _add_checkpointing_args(parser) parser = _add_mixed_precision_args(parser) parser = _add_distributed_args(parser) parser = _add_validation_args(parser) parser = _add_data_args(parser) parser = _add_autoresume_args(parser) parser = _add_biencoder_args(parser) parser = _add_vit_args(parser) parser = _add_logging_args(parser) parser = _add_zero_args(parser) parser = _add_memoryopt_args(parser) parser = _add_activation_checkpoint_args(parser) # Custom arguments. if extra_args_provider is not None: parser = extra_args_provider(parser) parser = deepspeed.add_config_arguments(parser) # Parse. if ignore_unknown_args: args, _ = parser.parse_known_args() else: args = parser.parse_args() # Distributed args. args.rank = int(os.getenv('RANK', '0')) args.world_size = int(os.getenv("WORLD_SIZE", '1')) # Tensor model parallel size. args.tensor_model_parallel_size = min( args.tensor_model_parallel_size, args.world_size) assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\ ' ({}) is not divisible by tensor model parallel size ({})'.format( args.world_size, args.tensor_model_parallel_size) # Pipeline model parallel size. args.pipeline_model_parallel_size = min( args.pipeline_model_parallel_size, (args.world_size // args.tensor_model_parallel_size)) # Checks. model_parallel_size = args.pipeline_model_parallel_size * \ args.tensor_model_parallel_size assert args.world_size % model_parallel_size == 0, 'world size is not'\ ' divisible by tensor parallel size ({}) times pipeline parallel ' \ 'size ({})'.format(args.world_size, args.tensor_model_parallel_size, args.pipeline_model_parallel_size) args.data_parallel_size = args.world_size // model_parallel_size if args.rank == 0: print('using world size: {}, data-parallel-size: {}, ' 'tensor-model-parallel size: {}, ' 'pipeline-model-parallel size: {} '.format( args.world_size, args.data_parallel_size, args.tensor_model_parallel_size, args.pipeline_model_parallel_size), flush=True) # --data-path and --train-weighted-splits-paths message = "Data loading Mode 1: --data-path and --split "\ "and Mode 2: --(train|valid|test)-weighted-split-paths"\ "are mutually exclusive i.e. cannot be set together." if args.data_path: assert args.train_weighted_split_paths is None, message setattr(args, "valid_weighted_split_names", None) setattr(args, "valid_weighted_split_weights", None) setattr(args, "valid_weighted_split_splits", None) setattr(args, "test_weighted_split_names", None) setattr(args, "test_weighted_split_weights", None) setattr(args, "test_weighted_split_splits", None) # args.split default value in the args is None it is set here in order # to check that it does not to overlap with the 2nd mode of data loading if args.split is None: args.split = "969, 30, 1" if args.train_weighted_split_paths or args.valid_weighted_split_paths or \ args.test_weighted_split_paths: assert args.data_path is None and args.split is None, message # Deprecated arguments assert args.batch_size is None, '--batch-size argument is no longer ' \ 'valid, use --micro-batch-size instead' del args.batch_size assert args.warmup is None, '--warmup argument is no longer valid, use ' \ '--lr-warmup-fraction instead' del args.warmup assert args.model_parallel_size is None, '--model-parallel-size is no ' \ 'longer valid, use --tensor-model-parallel-size instead' del args.model_parallel_size # Set input defaults. for key in defaults: # For default to be valid, it should not be provided in the # arguments that are passed to the program. We check this by # ensuring the arg is set to None. if getattr(args, key) is not None: if args.rank == 0: print('WARNING: overriding default arguments for {key}:{v} \ with {key}:{v2}'.format(key=key, v=defaults[key], v2=getattr(args, key)), flush=True) else: setattr(args, key, defaults[key]) # Batch size. assert args.micro_batch_size is not None assert args.micro_batch_size > 0 if args.global_batch_size is None: args.global_batch_size = args.micro_batch_size * args.data_parallel_size if args.rank == 0: print('setting global batch size to {}'.format( args.global_batch_size), flush=True) assert args.global_batch_size > 0 if args.num_layers_per_virtual_pipeline_stage is not None: assert args.pipeline_model_parallel_size > 2, \ 'pipeline-model-parallel size should be greater than 2 with ' \ 'interleaved schedule' assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \ 'number of layers is not divisible by number of layers per virtual ' \ 'pipeline stage' args.virtual_pipeline_model_parallel_size = \ (args.num_layers // args.pipeline_model_parallel_size) // \ args.num_layers_per_virtual_pipeline_stage else: args.virtual_pipeline_model_parallel_size = None # Parameters dtype. args.params_dtype = torch.float if args.fp16: assert not args.bf16 args.params_dtype = torch.half if args.bf16: assert not args.fp16 args.params_dtype = torch.bfloat16 # bfloat16 requires gradient accumulation and all-reduce to # be done in fp32. if not args.accumulate_allreduce_grads_in_fp32: args.accumulate_allreduce_grads_in_fp32 = True if args.rank == 0: print('accumulate and all-reduce gradients in fp32 for ' 'bfloat16 data type.', flush=True) if args.rank == 0: print('using {} for parameters ...'.format(args.params_dtype), flush=True) # If we do accumulation and all-reduces in fp32, we need to have # local DDP and we should set the use-contiguous-buffers-in-ddp. if args.accumulate_allreduce_grads_in_fp32: assert args.DDP_impl == 'local' args.use_contiguous_buffers_in_ddp = True if args.dataloader_type is None: args.dataloader_type = 'single' # Consumed tokens. args.consumed_train_samples = 0 args.consumed_valid_samples = 0 args.consumed_train_tokens = 0 args.gigaflos_no_embeds = 0 # Iteration-based training. if args.train_iters: # If we use iteration-based training, make sure the # sample-based options are off. assert args.train_samples is None, \ 'expected iteration-based training' assert args.lr_decay_samples is None, \ 'expected iteration-based learning rate decay' assert args.lr_warmup_samples == 0, \ 'expected iteration-based learning rate warmup' assert args.rampup_batch_size is None, \ 'expected no batch-size rampup for iteration-based training' if args.lr_warmup_fraction is not None: assert args.lr_warmup_iters == 0, \ 'can only specify one of lr-warmup-fraction and lr-warmup-iters' # Sample-based training. if args.train_samples: # If we use sample-based training, make sure the # iteration-based options are off. assert args.train_iters is None, \ 'expected sample-based training' assert args.lr_decay_iters is None, \ 'expected sample-based learning rate decay' assert args.lr_warmup_iters == 0, \ 'expected sample-based learnig rate warmup' if args.lr_warmup_fraction is not None: assert args.lr_warmup_samples == 0, \ 'can only specify one of lr-warmup-fraction ' \ 'and lr-warmup-samples' # Check required arguments. required_args = ['num_layers', 'hidden_size', 'num_attention_heads'] for req_arg in required_args: _check_arg_is_not_none(args, req_arg) # Checks. if args.ffn_hidden_size is None: args.ffn_hidden_size = 4 * args.hidden_size if args.kv_channels is None: assert args.hidden_size % args.num_attention_heads == 0 args.kv_channels = args.hidden_size // args.num_attention_heads if args.seq_length is not None: assert args.encoder_seq_length is None args.encoder_seq_length = args.seq_length else: assert args.encoder_seq_length is not None args.seq_length = args.encoder_seq_length if args.position_embedding_type == PositionEmbeddingType.absolute or args.position_embedding_type == PositionEmbeddingType.alibi: assert args.max_position_embeddings is not None if args.seq_length is not None: assert args.max_position_embeddings >= args.seq_length if args.decoder_seq_length is not None: assert args.max_position_embeddings >= args.decoder_seq_length else: assert args.max_position_embeddings is None if args.lr is not None: assert args.min_lr <= args.lr if args.save is not None: assert args.save_interval is not None # Mixed precision checks. if args.fp16_lm_cross_entropy: assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.' if args.fp32_residual_connection: assert args.fp16 or args.bf16, \ 'residual connection in fp32 only supported when using fp16 or bf16.' # Activation checkpointing. if args.distribute_checkpointed_activations: assert args.checkpoint_activations, \ 'for distribute-checkpointed-activations to work you '\ 'need to enable checkpoint-activations' args.curriculum_learning = False # Activation function if args.glu_activation is not None and args.bias_gelu_fusion: raise ValueError("if glu-activation is used, please set --no-bias-gelu-fusion") # Skip train iterations if args.skip_train_iteration_range is not None: args.skip_train_iteration_range = [ list(map(int, range_.split("-"))) for range_ in args.skip_train_iteration_range ] args.skip_train_iteration_range.sort() skip_train_iteration_range = collections.deque() for range_ in args.skip_train_iteration_range: if len(range_) == 2: start, end = range_ assert end >= start, \ "end of skip range cannot be smaller than start of skip range" # merge overlapping intervals (e.g. 1-5 2-6 -> 1-6) if not skip_train_iteration_range: skip_train_iteration_range.append([start, end]) elif skip_train_iteration_range[-1][1] >= start: skip_train_iteration_range[-1][1] = max(end, skip_train_iteration_range[-1][1]) else: skip_train_iteration_range.append([start, end]) else: raise ValueError( "skip train iterations should be specified as two numbers, i.e. start-end" ) args.skip_train_iteration_range = skip_train_iteration_range if args.use_bnb_optimizer: try: import bitsandbytes as bnb except ModuleNotFoundError: raise ModuleNotFoundError("Please install bitsandbytes from https://github.com/facebookresearch/bitsandbytes.") _print_args(args) return args def _print_args(args): """Print arguments.""" if args.rank == 0: print('------------------------ arguments ------------------------', flush=True) str_list = [] for arg in vars(args): dots = '.' * (48 - len(arg)) str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg))) if args.log_path is not None: with open(os.path.join(args.log_path,f'args_{time.strftime("%Y-%m-%dT%H:%M:%S")}.txt'), 'w') as f: for arg in sorted(str_list, key=lambda x: x.lower()): f.write(arg+"\n") print(arg, flush=True) else: for arg in sorted(str_list, key=lambda x: x.lower()): print(arg, flush=True) print('-------------------- end of arguments ---------------------', flush=True) def _check_arg_is_not_none(args, arg): assert getattr(args, arg) is not None, '{} argument is None'.format(arg) def _add_network_size_args(parser): group = parser.add_argument_group(title='network size') group.add_argument('--num-layers', type=int, default=None, help='Number of transformer layers.') group.add_argument('--hidden-size', type=int, default=None, help='Tansformer hidden size.') group.add_argument('--ffn-hidden-size', type=int, default=None, help='Transformer Feed-Forward Network hidden size. ' 'This is set to 4*hidden-size if not provided') group.add_argument('--num-attention-heads', type=int, default=None, help='Number of transformer attention heads.') group.add_argument('--kv-channels', type=int, default=None, help='Projection weights dimension in multi-head ' 'attention. This is set to ' ' args.hidden_size // args.num_attention_heads ' 'if not provided.') group.add_argument('--max-position-embeddings', type=int, default=None, help='Maximum number of position embeddings to use. ' 'This is the size of position embedding.') group.add_argument('--make-vocab-size-divisible-by', type=int, default=128, help='Pad the vocab size to be divisible by this value.' 'This is added for computational efficieny reasons.') group.add_argument('--layernorm-epsilon', type=float, default=1e-5, help='Layer norm epsilon.') group.add_argument('--apply-residual-connection-post-layernorm', action='store_true', help='If set, use original BERT residula connection ' 'ordering.') group.add_argument('--embed-layernorm', action='store_true', help='use layernorm for embedding') group.add_argument('--openai-gelu', action='store_true', help='Use OpenAIs GeLU implementation. This option' 'should not be used unless for backward compatibility' 'reasons.') group.add_argument('--onnx-safe', type=bool, required=False, help='Use workarounds for known problems with ' 'Torch ONNX exporter') group.add_argument('--bert-no-binary-head', action='store_false', help='Disable BERT binary head.', dest='bert_binary_head') group.add_argument('--position-embedding-type', type=lambda x: PositionEmbeddingType[x], choices=list(PositionEmbeddingType), default=PositionEmbeddingType.absolute, help='Define position embedding type ("absolute" | "rotary" | "alibi"). "absolute" by default.' ) group.add_argument('--glu-activation', type=str, choices=megatron.model.glu_activations.GLU_ACTIVATIONS.keys(), help='GLU activations to use.' ) group.add_argument('--kill-switch-path', type=str, help='path to look for a kill switch, which if found will automatically exit the program' ) group.add_argument('--log-level', type=str, choices=list(log_levels.keys()), help="Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', " "'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the " "application set the level." ) group.add_argument('--log-level-replica', type=str, choices=list(log_levels.keys()), help="Logger log level to use on replicas. Same choices as ``log_level``" ) return parser def _add_logging_args(parser): group = parser.add_argument_group(title='logging') group.add_argument('--log-params-norm', action='store_true', help='If set, calculate and log parameters norm.') group.add_argument('--log-num-zeros-in-grad', action='store_true', help='If set, calculate and log the number of zeros in gradient.') group.add_argument('--tensorboard-log-interval', type=int, default=1, help='Report to tensorboard interval.') group.add_argument('--tensorboard-queue-size', type=int, default=1000, help='Size of the tensorboard queue for pending events ' 'and summaries before one of the ‘add’ calls forces a ' 'flush to disk.') group.add_argument('--log-timers-to-tensorboard', action='store_true', help='If set, write timers to tensorboard.') group.add_argument('--log-batch-size-to-tensorboard', action='store_true', help='If set, write batch-size to tensorboard.') group.add_argument('--no-log-learnig-rate-to-tensorboard', action='store_false', help='Disable learning rate logging to tensorboard.', dest='log_learning_rate_to_tensorboard') group.add_argument('--no-log-loss-scale-to-tensorboard', action='store_false', help='Disable loss-scale logging to tensorboard.', dest='log_loss_scale_to_tensorboard') group.add_argument('--log-validation-ppl-to-tensorboard', action='store_true', help='If set, write validation perplexity to ' 'tensorboard.') return parser def _add_regularization_args(parser): group = parser.add_argument_group(title='regularization') group.add_argument('--attention-dropout', type=float, default=0.1, help='Post attention dropout probability.') group.add_argument('--hidden-dropout', type=float, default=0.1, help='Dropout probability for hidden state transformer.') group.add_argument('--weight-decay', type=float, default=0.01, help='Weight decay coefficient for L2 regularization.') group.add_argument('--clip-grad', type=float, default=1.0, help='Gradient clipping based on global L2 norm.') group.add_argument('--adam-beta1', type=float, default=0.9, help='First coefficient for computing running averages ' 'of gradient and its square') group.add_argument('--adam-beta2', type=float, default=0.999, help='Second coefficient for computing running averages ' 'of gradient and its square') group.add_argument('--adam-eps', type=float, default=1e-08, help='Term added to the denominator to improve' 'numerical stability') group.add_argument('--sgd-momentum', type=float, default=0.9, help='Momentum factor for sgd') return parser def _add_training_args(parser): group = parser.add_argument_group(title='training') group.add_argument('--micro-batch-size', type=int, default=None, help='Batch size per model instance (local batch size). ' 'Global batch size is local batch size times data ' 'parallel size times number of micro batches.') group.add_argument('--batch-size', type=int, default=None, help='Old batch size parameter, do not use. ' 'Use --micro-batch-size instead') group.add_argument('--global-batch-size', type=int, default=None, help='Training batch size. If set, it should be a ' 'multiple of micro-batch-size times data-parallel-size. ' 'If this value is None, then ' 'use micro-batch-size * data-parallel-size as the ' 'global batch size. This choice will result in 1 for ' 'number of micro-batches.') group.add_argument('--rampup-batch-size', nargs='*', default=None, help='Batch size ramp up with the following values:' ' --rampup-batch-size <start batch size> ' ' <batch size increment> ' ' <ramp-up samples> ' 'For example: ' ' --rampup-batch-size 16 8 300000 ' ' --global-batch-size 1024 ' 'will start with global batch size 16 and over ' ' (1024 - 16) / 8 = 126 intervals will increase ' 'the batch size linearly to 1024. In each interval ' 'we will use approximately 300000 / 126 = 2380 samples.') group.add_argument('--checkpoint-activations', action='store_true', help='Checkpoint activation to allow for training ' 'with larger models, sequences, and batch sizes.') group.add_argument('--distribute-checkpointed-activations', action='store_true', help='If set, distribute checkpointed activations ' 'across model parallel group.') group.add_argument('--checkpoint-num-layers', type=int, default=1, help='chunk size (number of layers) for checkpointing.') group.add_argument('--train-iters', type=int, default=None, help='Total number of iterations to train over all ' 'training runs. Note that either train-iters or ' 'train-samples should be provided.') group.add_argument('--train-samples', type=int, default=None, help='Total number of samples to train over all ' 'training runs. Note that either train-iters or ' 'train-samples should be provided.') group.add_argument('--train-tokens', type=int, default=None, help='Total number of tokens to train over all ' 'training runs.') group.add_argument('--log-interval', type=int, default=100, help='Report loss and timing interval.') group.add_argument('--exit-interval', type=int, default=None, help='Exit the program after the iteration is divisible ' 'by this value.') group.add_argument('--exit-duration-in-mins', type=int, default=None, help='Exit the program after this many minutes.') group.add_argument('--tensorboard-dir', type=str, default=None, help='Write TensorBoard logs to this directory.') group.add_argument('--no-masked-softmax-fusion', action='store_false', help='Disable fusion of query_key_value scaling, ' 'masking, and softmax.', dest='masked_softmax_fusion') group.add_argument('--no-bias-gelu-fusion', action='store_false', help='Disable bias and gelu fusion.', dest='bias_gelu_fusion') group.add_argument('--no-bias-dropout-fusion', action='store_false', help='Disable bias and dropout fusion.', dest='bias_dropout_fusion') group.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'sgd'], help='Optimizer function') group.add_argument('--use-bnb-optimizer', action='store_true', help='Use bitsandbytes optimizer for efficient training,' 'please refer https://github.com/facebookresearch/bitsandbytes.', dest='use_bnb_optimizer') group.add_argument('--dataloader-type', type=str, default=None, choices=['single', 'cyclic'], help='Single pass vs multiple pass data loader') group.add_argument('--cpu-optimizer', action='store_true', help='Run optimizer on CPU') group.add_argument('--cpu_torch_adam', action='store_true', help='Use Torch Adam as optimizer on CPU.') group.add_argument('--codecarbon-dir', type=str, default=None, help='Write CodeCarbon logs to this directory.') group.add_argument('--eval-only', type=bool, required=False, help='If set to True, no train step will be performed.' 'and only the evaluation on the `valid` and `test` sets ' 'will be performed' ) group.add_argument('--skip-train-iteration-range', type=str, nargs='+', default=None, help='Iteration ranges to skip. The values are one or more dash-separated ranges. e.g., 101-200 251-300.') group.add_argument('--abort-on-unmet-fused-kernel-constraints', action='store_true', help="If set to True, the program will abort if the constraints for loading a fused kernel aren't met") return parser def _add_initialization_args(parser): group = parser.add_argument_group(title='initialization') group.add_argument('--seed', type=int, default=1234, help='Random seed used for python, numpy, ' 'pytorch, and cuda.') group.add_argument('--init-method-std', type=float, default=0.02, help='Standard deviation of the zero mean normal ' 'distribution used for weight initialization.') group.add_argument('--init-method-xavier-uniform', action='store_true', help='Enable Xavier uniform parameter initialization') return parser def _add_learning_rate_args(parser): group = parser.add_argument_group(title='learning rate') group.add_argument('--lr', type=float, default=None, help='Initial learning rate. Depending on decay style ' 'and initial warmup, the learing rate at each ' 'iteration would be different.') group.add_argument('--lr-decay-style', type=str, default='linear', choices=['constant', 'linear', 'cosine'], help='Learning rate decay function.') group.add_argument('--lr-decay-iters', type=int, default=None, help='number of iterations to decay learning rate over,' ' If None defaults to `--train-iters`') group.add_argument('--lr-decay-samples', type=int, default=None, help='number of samples to decay learning rate over,' ' If None defaults to `--train-samples`') group.add_argument('--lr-decay-tokens', type=int, default=None, help='number of tokens to decay learning rate over,' ' If not None will override iter/sample-based decay') group.add_argument('--lr-warmup-fraction', type=float, default=None, help='fraction of lr-warmup-(iters/samples) to use ' 'for warmup (as a float)') group.add_argument('--lr-warmup-iters', type=int, default=0, help='number of iterations to linearly warmup ' 'learning rate over.') group.add_argument('--lr-warmup-samples', type=int, default=0, help='number of samples to linearly warmup ' 'learning rate over.') group.add_argument('--warmup', type=int, default=None, help='Old lr warmup argument, do not use. Use one of the' '--lr-warmup-* arguments above') group.add_argument('--min-lr', type=float, default=0.0, help='Minumum value for learning rate. The scheduler' 'clip values below this threshold.') group.add_argument('--override-lr-scheduler', action='store_true', help='Reset the values of the scheduler (learning rate,' 'warmup iterations, minimum learning rate, maximum ' 'number of iterations, and decay style from input ' 'arguments and ignore values from checkpoints. Note' 'that all the above values will be reset.') group.add_argument('--use-checkpoint-lr-scheduler', action='store_true', help='Use checkpoint to set the values of the scheduler ' '(learning rate, warmup iterations, minimum learning ' 'rate, maximum number of iterations, and decay style ' 'from checkpoint and ignore input arguments.') return parser def _add_checkpointing_args(parser): group = parser.add_argument_group(title='checkpointing') group.add_argument('--save', type=str, default=None, help='Output directory to save checkpoints to.') group.add_argument('--save-interval', type=int, default=None, help='Number of iterations between checkpoint saves.') group.add_argument('--no-save-optim', action='store_true', default=None, help='Do not save current optimizer.') group.add_argument('--no-save-rng', action='store_true', default=None, help='Do not save current rng state.') group.add_argument('--load', type=str, default=None, help='Directory containing a model checkpoint.') group.add_argument('--no-load-optim', action='store_true', default=None, help='Do not load optimizer when loading checkpoint.') group.add_argument('--no-load-rng', action='store_true', default=None, help='Do not load rng state when loading checkpoint.') group.add_argument('--finetune', action='store_true', help='Load model for finetuning. Do not load optimizer ' 'or rng state from checkpoint and set iteration to 0. ' 'Assumed when loading a release checkpoint.') return parser def _add_mixed_precision_args(parser): group = parser.add_argument_group(title='mixed precision') group.add_argument('--fp16', action='store_true', help='Run model in fp16 mode.') group.add_argument('--bf16', action='store_true', help='Run model in bfloat16 mode.') group.add_argument('--loss-scale', type=float, default=None, help='Static loss scaling, positive power of 2 ' 'values can improve fp16 convergence. If None, dynamic' 'loss scaling is used.') group.add_argument('--initial-loss-scale', type=float, default=2**32, help='Initial loss-scale for dynamic loss scaling.') group.add_argument('--min-loss-scale', type=float, default=1.0, help='Minimum loss scale for dynamic loss scale.') group.add_argument('--loss-scale-window', type=float, default=1000, help='Window over which to raise/lower dynamic scale.') group.add_argument('--hysteresis', type=int, default=2, help='hysteresis for dynamic loss scaling') group.add_argument('--fp32-residual-connection', action='store_true', help='Move residual connections to fp32.') group.add_argument('--no-query-key-layer-scaling', action='store_false', help='Do not scale Q * K^T by 1 / layer-number.', dest='apply_query_key_layer_scaling') group.add_argument('--attention-softmax-in-fp32', action='store_true', help='Run attention masking and softmax in fp32. ' 'This flag is ignored unless ' '--no-query-key-layer-scaling is specified.') group.add_argument('--accumulate-allreduce-grads-in-fp32', action='store_true', help='Gradient accumulation and all-reduce in fp32.') group.add_argument('--fp16-lm-cross-entropy', action='store_true', help='Move the cross entropy unreduced loss calculation' 'for lm head to fp16.') return parser def _add_distributed_args(parser): group = parser.add_argument_group(title='distributed') group.add_argument('--tensor-model-parallel-size', type=int, default=1, help='Degree of tensor model parallelism.') group.add_argument('--pipeline-model-parallel-size', type=int, default=1, help='Degree of pipeline model parallelism.') group.add_argument('--model-parallel-size', type=int, default=None, help='Old model parallel argument, do not use. Use ' '--tensor-model-parallel-size instead.') group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None, help='Number of layers per virtual pipeline stage') group.add_argument('--distributed-backend', default='nccl', choices=['nccl', 'gloo'], help='Which backend to use for distributed training.') group.add_argument('--DDP-impl', default='local', choices=['local', 'torch'], help='which DistributedDataParallel implementation ' 'to use.') group.add_argument('--use-contiguous-buffers-in-ddp', action='store_true', help='If set, use contiguous buffer in DDP. Note that ' 'this option only works woth local DDP.' ) group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false', help='Use scatter/gather to optimize communication of tensors in pipeline', dest='scatter_gather_tensors_in_pipeline') group.add_argument('--local_rank', type=int, default=None, help='local rank passed from distributed launcher.') group.add_argument('--lazy-mpu-init', type=bool, required=False, help='If set to True, initialize_megatron() ' 'skips DDP initialization and returns function to ' 'complete it instead.Also turns on ' '--use-cpu-initialization flag. This is for ' 'external DDP manager.' ) group.add_argument('--use-cpu-initialization', action='store_true', default=None, help='If set, affine parallel weights ' 'initialization uses CPU' ) return parser def _add_validation_args(parser): group = parser.add_argument_group(title='validation') group.add_argument('--eval-iters', type=int, default=100, help='Number of iterations to run for evaluation' 'validation/test for.') group.add_argument('--eval-interval', type=int, default=1000, help='Interval between running evaluation on ' 'validation set.') return parser def _add_data_args(parser): group = parser.add_argument_group(title='data and dataloader') # option 1 for data loading (mutually exclusive with option2) group.add_argument('--data-path', nargs='*', default=None, help='Path to the training dataset. Accepted format:' '1) a single data path, 2) multiple datasets in the' 'form: dataset1-weight dataset1-path dataset2-weight ' 'dataset2-path ...') group.add_argument('--split', type=str, default=None, help='Comma-separated list of proportions for training,' ' validation, and test split. For example the split ' '`90,5,5` will use 90%% of data for training, 5%% for ' 'validation and 5%% for test.') # option 2 for data loading (mutually exclusive with option1) # helper class to parse the --xxx-weighted-split-paths # note here two args are set: extra valid dataset paths and names class parse_data_paths(argparse.Action): def __call__(self, parser, args, values, option_string=None): if option_string == "--train-weighted-split-paths": assert len(values) == 1, 'Only 1 dataset group is allowed to' 'be passed for the argument --train-weighted-split-paths' # make sure string given in the correct format err_message = 'Each data group should be input on the following format' '"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"' 'where START < END' for v in values: # each prefix consists several datasets separated by commas prefix = ":".join(v.split(":")[1:]) # remove GIVEN_NAME datasets = prefix.split(",") # check if each dataset is formatted like `WEIGHT START:END PATH` for d in datasets: assert len(d.split()) == 3, err_message start, end = d.split()[1].split(":") assert float(start) < float(end), err_message names = [v.split(":")[0] for v in values] prefixes = [":".join(v.split(":")[1:]).strip() for v in values] weights = [[d.split()[0] for d in p.split(",")] for p in prefixes] splits = [[d.split()[1] for d in p.split(",")] for p in prefixes] paths = [[d.split()[2] for d in p.split(",")] for p in prefixes] # # to keep consistency with Option 1 of data loading (through --data-path) # # paths will contain strings on the following form # # "WEIGHTS1 PATH1 WEIGHTS2 PATH2 WEIGHTS3 PATH3" for each dataset group # # while data will be parsed in additional arguments below # paths_option1_style = [] # for p, w in zip(paths, weights): # paths_option1_style.append(" ".join([f"{w_i} {p_i}" for p_i, w_i in zip(p,w)])) # setattr(args, self.dest, paths_option1_style) setattr(args, self.dest, paths) setattr(args, self.dest.replace("paths", "weights"), weights) setattr(args, self.dest.replace("paths", "splits"), splits) setattr(args, self.dest.replace("paths","names"), names) group.add_argument('--train-weighted-split-paths', nargs='*', default=None, help='Weights, splits and paths to groups of datasets' 'Accepted format: ONE dataset groups could be' 'submitted in the following form between double quotes' '"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"' 'e.g.: "NAME_ABC: 0.6 0:0.6 A, 0.3 0:1 B, 0.1 0:1 C" ' 'WEIGHT is used to up and down sample each dataset A,B,C in the group' 'START:END indicates the split portion of the dataset', action=parse_data_paths) group.add_argument('--valid-weighted-split-paths', nargs='*', default=None, help='Weights, splits and paths to groups of datasets' 'Accepted format: one or many dataset groups could be' 'submitted in the following form each between double quotes' '"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"' 'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" ' '"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" ' 'validation will be run on each of those groups independently', action=parse_data_paths) group.add_argument('--test-weighted-split-paths', nargs='*', default=None, help='Weights, splits and paths to groups of datasets' 'Accepted format: one or many dataset groups could be' 'submitted in the following form each between double quotes' '"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"' 'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" ' '"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" ' 'test will be run on each of those groups independently', action=parse_data_paths) group.add_argument('--log-path', type=str, default=None, help='Path to the save arguments file.') group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file.') group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file.') group.add_argument('--vocab-extra-ids', type=int, default=0, help='Number of additional vocabulary tokens. ' 'They are used for span masking in the T5 model') group.add_argument('--seq-length', type=int, default=None, help='Maximum sequence length to process.') group.add_argument('--encoder-seq-length', type=int, default=None, help='Maximum encoder sequence length to process.' 'This should be exclusive of --seq-length') group.add_argument('--decoder-seq-length', type=int, default=None, help="Maximum decoder sequence length to process.") group.add_argument('--retriever-seq-length', type=int, default=256, help='Maximum sequence length for the biencoder model ' ' for retriever') group.add_argument('--sample-rate', type=float, default=1.0, help='sample rate for training data. Supposed to be 0 ' ' < sample_rate < 1') group.add_argument('--mask-prob', type=float, default=0.15, help='Probability of replacing a token with mask.') group.add_argument('--short-seq-prob', type=float, default=0.1, help='Probability of producing a short sequence.') group.add_argument('--mmap-warmup', action='store_true', help='Warm up mmap files.') group.add_argument('--num-workers', type=int, default=2, help="Dataloader number of workers.") group.add_argument('--tokenizer-type', type=str, default=None, choices=['BertWordPieceLowerCase', 'BertWordPieceCase', 'GPT2BPETokenizer', 'PretrainedFromHF'], help='What type of tokenizer to use.') group.add_argument("--tokenizer-name-or-path", type=str, default=None, help="Name or path of the huggingface tokenizer.") group.add_argument('--data-impl', type=str, default='infer', choices=['lazy', 'cached', 'mmap', 'infer'], help='Implementation of indexed datasets.') group.add_argument('--reset-position-ids', action='store_true', help='Reset posistion ids after end-of-document token.') group.add_argument('--reset-attention-mask', action='store_true', help='Reset self attention maske after ' 'end-of-document token. Attention between tokens from different documents is null.') group.add_argument('--eod-mask-loss', action='store_true', help='Mask loss for the end of document tokens.') group.add_argument('--loss-on-targets-only', action='store_true', help='Mask loss on input sequence.') group.add_argument('--reweight-loss-based-on-position-frequency', action="store_true", help='Some objectives require us to sample loss_mask. This might introduce bias towards ' 'specific positions. This option tries to un-bias the loss by reweighting loss on specific ' 'positions based on how frequently we train on that position.' 'This is mostly used for prefix_lm training') return parser def _add_autoresume_args(parser): group = parser.add_argument_group(title='autoresume') group.add_argument('--adlr-autoresume', action='store_true', help='Enable autoresume on adlr cluster.') group.add_argument('--adlr-autoresume-interval', type=int, default=1000, help='Intervals over which check for autoresume' 'termination signal') return parser def _add_biencoder_args(parser): group = parser.add_argument_group(title='biencoder') # network size group.add_argument('--ict-head-size', type=int, default=None, help='Size of block embeddings to be used in ICT and ' 'REALM (paper default: 128)') group.add_argument('--biencoder-projection-dim', type=int, default=0, help='Size of projection head used in biencoder (paper' ' default: 128)') group.add_argument('--biencoder-shared-query-context-model', action='store_true', help='Whether to share the parameters of the query ' 'and context models or not') # checkpointing group.add_argument('--ict-load', type=str, default=None, help='Directory containing an ICTBertModel checkpoint') group.add_argument('--bert-load', type=str, default=None, help='Directory containing an BertModel checkpoint ' '(needed to start ICT and REALM)') # data group.add_argument('--titles-data-path', type=str, default=None, help='Path to titles dataset used for ICT') group.add_argument('--query-in-block-prob', type=float, default=0.1, help='Probability of keeping query in block for ' 'ICT dataset') group.add_argument('--use-one-sent-docs', action='store_true', help='Whether to use one sentence documents in ICT') group.add_argument('--evidence-data-path', type=str, default=None, help='Path to Wikipedia Evidence frm DPR paper') # training group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int, default=[], help="Which top-k accuracies to report " "(e.g. '1 5 20')") group.add_argument('--retriever-score-scaling', action='store_true', help='Whether to scale retriever scores by inverse ' 'square root of hidden size') # faiss index group.add_argument('--block-data-path', type=str, default=None, help='Where to save/load BlockData to/from') group.add_argument('--embedding-path', type=str, default=None, help='Where to save/load Open-Retrieval Embedding' ' data to/from') # indexer group.add_argument('--indexer-batch-size', type=int, default=128, help='How large of batches to use when doing indexing ' 'jobs') group.add_argument('--indexer-log-interval', type=int, default=1000, help='After how many batches should the indexer ' 'report progress') return parser def _add_vit_args(parser): group = parser.add_argument_group(title="vit") group.add_argument('--num-classes', type=int, default=1000, help='num of classes in vision classificaiton task') group.add_argument('--img-dim', type=int, default=224, help='Image size for vision classification task') group.add_argument('--num-channels', type=int, default=3, help='Number of channels in input image data') group.add_argument('--patch-dim', type=int, default=16, help='patch dimension used in vit') return parser def _add_zero_args(parser): """Text generate arguments.""" group = parser.add_argument_group('ZeRO configurations', 'configurations') group.add_argument("--zero-stage", type=int, default=1.0) group.add_argument('--zero-reduce-scatter', action='store_true', help='Use reduce scatter if specified') group.add_argument('--zero-contigious-gradients', action='store_true', help='Use contigious memory optimizaiton if specified') group.add_argument("--zero-reduce-bucket-size", type=int, default=0.0) group.add_argument("--zero-allgather-bucket-size", type=int, default=0.0) group.add_argument('--remote-device', type=str, default='none', choices=['none', 'cpu', 'nvme'], help='Remote device for ZeRO-3 initialized parameters.') group.add_argument('--use-pin-memory', action='store_true', help='Use pinned CPU memory for ZeRO-3 initialized model parameters.') return parser def _add_memoryopt_args(parser): """Memory optimization arguments.""" group = parser.add_argument_group('Memory optimizations', 'configurations') group.add_argument("--scattered-embeddings", action='store_true', help='Save memory by scattering embedding activations. ' 'Introduces dropout differences across MP configurations.') group.add_argument("--split-transformers", action='store_true', help='Save memory by splitting transformer layers into two parts, ' 'allowing for more frequent activation checkpoint savings.') group.add_argument("--memory-centric-tiled-linear", action="store_true", help='Save memory by tiling with deepspeed.zero.TiledLinear.') group.add_argument("--tile-factor", type=int, default=1, help='Make all linear layers the same size of [hidden/tile_factor, hidden/tile_factor]. ' 'Must be enabled with --memory-centric-tiled-linear. ' 'Example A: if tile_factor=1, the qkv layer [hidden, 3* hidden] would be converted into [1,3] tiles of size [hidden,hidden]. ' 'Example B: if tile_factor=2, the intermediate layer [4*hidden, hidden] will be converted into [8, 2] tiles of size [hidden/2, hidden/2]. ' 'Default is 1.') return parser def _add_activation_checkpoint_args(parser): group = parser.add_argument_group('Activation Checkpointing', 'Checkpointing Configurations') group.add_argument('--deepspeed-activation-checkpointing', action='store_true', help='uses activation checkpointing from deepspeed') group.add_argument('--partition-activations', action='store_true', help='partition Activations across GPUs before checkpointing.') group.add_argument('--contigious-checkpointing', action='store_true', help='Contigious memory checkpointing for activatoins.') group.add_argument('--checkpoint-in-cpu', action='store_true', help='Move the activation checkpoints to CPU.') group.add_argument('--synchronize-each-layer', action='store_true', help='does a synchronize at the beginning and end of each checkpointed layer.') group.add_argument('--profile-backward', action='store_true', help='Enables backward pass profiling for checkpointed layers.') return parser
# copyright openpyxlzip 2014 import datetime import pytest from openpyxlzip.tests.helper import compare_xml from openpyxlzip.xml.constants import DCTERMS_PREFIX, DCTERMS_NS, XSI_NS from openpyxlzip.xml.functions import ( fromstring, tostring, register_namespace, NS_REGEX, ) @pytest.fixture() def SampleProperties(): from .. core import DocumentProperties props = DocumentProperties() props.keywords = "one, two, three" props.created = datetime.datetime(2010, 4, 1, 20, 30, 00) props.modified = datetime.datetime(2010, 4, 5, 14, 5, 30) props.lastPrinted = datetime.datetime(2014, 10, 14, 10, 30) props.category = "The category" props.contentStatus = "The status" props.creator = 'TEST_USER' props.lastModifiedBy = "SOMEBODY" props.revision = "0" props.version = "2.5" props.description = "The description" props.identifier = "The identifier" props.language = "The language" props.subject = "The subject" props.title = "The title" return props def test_ctor(SampleProperties): expected = """ <coreProperties xmlns="http://schemas.openxmlformats.org/package/2006/metadata/core-properties" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <dc:creator>TEST_USER</dc:creator> <dc:title>The title</dc:title> <dc:description>The description</dc:description> <dc:subject>The subject</dc:subject> <dc:identifier>The identifier</dc:identifier> <dc:language>The language</dc:language> <dcterms:created xsi:type="dcterms:W3CDTF">2010-04-01T20:30:00Z</dcterms:created> <dcterms:modified xsi:type="dcterms:W3CDTF">2010-04-05T14:05:30Z</dcterms:modified> <lastModifiedBy>SOMEBODY</lastModifiedBy> <category>The category</category> <contentStatus>The status</contentStatus> <version>2.5</version> <revision>0</revision> <keywords>one, two, three</keywords> <lastPrinted>2014-10-14T10:30:00Z</lastPrinted> </coreProperties> """ xml = tostring(SampleProperties.to_tree()) diff = compare_xml(xml, expected) assert diff is None, diff def test_from_tree(datadir, SampleProperties): datadir.chdir() with open("core.xml") as src: content = src.read() content = fromstring(content) props = SampleProperties.from_tree(content) assert props == SampleProperties def test_qualified_datetime(): from ..core import QualifiedDateTime dt = QualifiedDateTime() tree = dt.to_tree("time", datetime.datetime(2015, 7, 20, 12, 30)) xml = tostring(tree) expected = """ <time xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="dcterms:W3CDTF"> 2015-07-20T12:30:00Z </time>""" diff = compare_xml(xml, expected) assert diff is None, diff @pytest.fixture(params=['abc', 'dct', 'dcterms', 'xyz']) def dcterms_prefix(request): register_namespace(request.param, DCTERMS_NS) yield request.param register_namespace(DCTERMS_PREFIX, DCTERMS_NS) @pytest.mark.no_pypy def test_qualified_datetime_ns(dcterms_prefix): from ..core import QualifiedDateTime dt = QualifiedDateTime() tree = dt.to_tree("time", datetime.datetime(2015, 7, 20, 12, 30)) xml = tostring(tree) # serialise to make remove QName tree = fromstring(xml) xsi = tree.attrib["{%s}type" % XSI_NS] prefix = xsi.split(":")[0] assert prefix == dcterms_prefix
from gumo.task.application.repository import GumoTaskRepository from gumo.task.infrastructure.repository import GumoTaskRepositoryImpl def task_bind(binder): binder.bind(GumoTaskRepository, to=GumoTaskRepositoryImpl)
def main(): global originalNum global base ipl=input("Enter the input") originalNum=ipl[0:len(ipl)-3] base=ipl[len(ipl)-2:] def ABCD(num): num=str(num) if(num=='1'): return '1' if(num=='2'): return '2' if(num=='3'): return '3' if(num=='4'): return '4' if(num=='5'): return '5' if(num=='6'): return '6' if(num=='7'): return '7' if(num=='8'): return '8' if(num=='9'): return '9' if(num=='A'): return '10' if(num=='B'): return '11' if(num=='C'): return '12' if(num=='D'): return '13' if(num=='E'): return '14' if(num=='F'): return '15' if(num=='0'): return '0' def ABCD_reverse(num): num=int(num) if(num==1): return '1' if(num==2): return '2' if(num==3): return '3' if(num==4): return '4' if(num==5): return '5' if(num==6): return '6' if(num==7): return '7' if(num==8): return '8' if(num==9): return '9' if(num==10): return 'A' if(num==11): return 'B' if(num==12): return 'C' if(num==13): return 'D' if(num==14): return 'E' if(num==15): return 'F' if(num==0): return '0' def edit(l): l=list(l) result="" for i5 in range(len(l)): result+=str(ABCD_reverse(l[i5])) return add_reverse(result) def add(input_number): intbase=int(base) input_number=str(input_number) if(intbase==10): input_number=int(input_number) return input_number+int(add_reverse(str(input_number))) else: reverse=add_reverse(input_number) reduncy=[0] index=[] for i3 in range(len(input_number)): sum1=int(ABCD(input_number[i3]))+int(ABCD(reverse[i3])) if(sum1>(intbase-1)): sum1=sum1-intbase+reduncy[i3] index.append(sum1) reduncy.append(1) elif(sum1==(intbase-1)): sum1=sum1-intbase+reduncy[i3] if(sum1>=0): index.append(sum1) reduncy.append(1) else: index.append(sum1++intbase-reduncy[i3]) reduncy.append(0) else: sum1=sum1+reduncy[i3] reduncy.append(0) index.append(sum1) if(reduncy[len(reduncy)-1]==1): index.append(1) return edit(index) def add_reverse(input_string): input_string=str(input_string) reverse_str="" count=1 for i in range(len(input_string)): reverse_str+=(input_string[len(input_string)-count]) count+=1 return reverse_str def is_check(input_string): input_string=str(input_string) if(input_string[:int(len(input_string)/2)]==add_reverse(input_string[int(len(input_string)/2):])): return True elif(input_string[:(int(len(input_string)/2)+1)]==add_reverse(input_string[(int(len(input_string)/2)-0):])): return True else: return False def execute(): count=0 intial_number=originalNum intial_number=str(intial_number) while(count<10 and is_check(intial_number)==False): intial_number=add(intial_number) is_check(intial_number) count+=1 if(is_check(intial_number)): return intial_number else: return 'None,'+str(intial_number) main() print(execute())
""" Downloading input files from URIs, with plugin modules for different URI schemes Download URI plugins are installed & registered using the setuptools entry point group "miniwdl.plugin.file_download", with name equal to the URI scheme (e.g. "gs" or "s3"). The plugin entry point should be a context manager, which the runtime keeps open for the duration of the download operation. Given the desired URI, it should quickly yield a tuple with: 1. source code of a WDL 1.0 task to perform the download 2. dict of Cromwell-style JSON inputs to give to the task miniwdl then executes this specified operation, expecting it to produce an output "File file" with the downloaded file. By doing the heavy lifting in a WDL task, the operation gets to inherit all the functionality of miniwdl's task runtime, e.g. pulling docker image with binary dependencies, resource scheduling & isolation, logging, error/signal handling, retry, etc. The Python context manager itself might be used to obtain and manage the lifetime of any needed security credentials. """ import os import logging import traceback import tempfile import hashlib import importlib_metadata from contextlib import ExitStack from typing import Optional, List, Generator, Dict, Any, Tuple, Callable from . import config from .cache import CallCache from .._util import compose_coroutines from .._util import StructuredLogMessage as _ def _load(cfg: config.Loader): table = getattr(cfg, "_downloaders", None) if table: return table # default public URI downloaders table = { "https": aria2c_downloader, "http": aria2c_downloader, "ftp": aria2c_downloader, "s3": awscli_downloader, } # plugins for plugin_name, plugin_fn in config.load_plugins(cfg, "file_download"): table[plugin_name] = plugin_fn setattr(cfg, "_downloaders", table) return table def _downloader( cfg: config.Loader, uri: str, ) -> Optional[Callable[..., Generator[Dict[str, Any], Dict[str, Any], None]]]: _load(cfg) colon = uri.find(":") if colon <= 0: return None scheme = uri[:colon] return getattr(cfg, "_downloaders").get(scheme, None) def able(cfg: config.Loader, uri: str) -> bool: """ Returns True if uri appears to be a URI we know how to download """ return _downloader(cfg, uri) is not None def run(cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs) -> str: """ Download the URI and return the local filename. kwargs are passed through to ``run_local_task``, so ``run_dir`` and ``logger_prefix`` may be useful in particular. """ from .error import RunFailed, DownloadFailed, Terminated, error_json from .task import run_local_task from .. import parse_document, values_from_json, values_to_json, Walker gen = _downloader(cfg, uri) assert gen try: with compose_coroutines([lambda kwargs: gen(cfg, logger, **kwargs)], {"uri": uri}) as cor: recv = next(cor) if "task_wdl" in recv: task_wdl, inputs = (recv[k] for k in ["task_wdl", "inputs"]) doc = parse_document(task_wdl, version="1.0") # pyre-ignore assert len(doc.tasks) == 1 and not doc.workflow doc.typecheck() Walker.SetParents()(doc) task = doc.tasks[0] inputs = values_from_json(inputs, task.available_inputs) # pyre-ignore subdir, outputs_env = run_local_task( cfg, task, inputs, run_id=("download-" + task.name), **kwargs ) recv = cor.send( {"outputs": values_to_json(outputs_env), "dir": subdir} # pyre-ignore ) ans = recv["outputs"]["file"] assert isinstance(ans, str) and os.path.isfile(ans) return ans except RunFailed as exn: if isinstance(exn.__cause__, Terminated): raise exn.__cause__ from None raise DownloadFailed(uri) from exn.__cause__ except Exception as exn: logger.debug(traceback.format_exc()) logger.error(_("downloader error", uri=uri, **error_json(exn))) raise DownloadFailed(uri) from exn def run_cached( cfg, logger: logging.Logger, cache: CallCache, uri: str, run_dir: str, **kwargs ) -> Tuple[bool, str]: """ Cached download logic: returns the file from the cache if available; otherwise, runs the download and puts it into the cache before returning """ cached = cache.get_download(uri, logger=logger) if cached: return True, cached if not cfg["download_cache"].get_bool("put") or not cache.download_path(uri): return False, run(cfg, logger, uri, run_dir=run_dir, **kwargs) # run the download within the cache directory run_dir = os.path.join(cfg["download_cache"]["dir"], "ops") filename = run(cfg, logger, uri, run_dir=run_dir, **kwargs) return False, cache.put_download(uri, os.path.realpath(filename), logger=logger) # WDL tasks for downloading a file based on its URI scheme def aria2c_downloader( cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs ) -> Generator[Dict[str, Any], Dict[str, Any], None]: wdl = r""" task aria2c { input { String uri Int connections = 10 } command <<< set -euxo pipefail mkdir __out cd __out aria2c -x ~{connections} -s ~{connections} \ --file-allocation=none --retry-wait=2 --stderr=true --enable-color=false \ "~{uri}" >>> output { File file = glob("__out/*")[0] } runtime { cpu: 4 memory: "1G" docker: "hobbsau/aria2" } } """ recv = yield {"task_wdl": wdl, "inputs": {"uri": uri}} yield recv # pyre-ignore def awscli_downloader( cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs ) -> Generator[Dict[str, Any], Dict[str, Any], None]: # get AWS credentials from boto3 (unless prevented by configuration) host_aws_credentials = None if cfg["download_awscli"].get_bool("host_credentials"): try: import boto3 # pyre-fixme b3creds = boto3.session.Session().get_credentials() host_aws_credentials = "\n".join( f"export {k}='{v}'" for (k, v) in { "AWS_ACCESS_KEY_ID": b3creds.access_key, "AWS_SECRET_ACCESS_KEY": b3creds.secret_key, "AWS_SESSION_TOKEN": b3creds.token, }.items() if v ) except Exception: pass inputs = {"uri": uri} with ExitStack() as cleanup: if host_aws_credentials: # write credentials to temp file that'll self-destruct afterwards aws_credentials_file = cleanup.enter_context( tempfile.NamedTemporaryFile( prefix=hashlib.sha256(host_aws_credentials.encode()).hexdigest(), delete=True, mode="w", ) ) print(host_aws_credentials, file=aws_credentials_file, flush=True) # make file group-readable to ensure it'll be usable if the docker image runs as non-root os.chmod(aws_credentials_file.name, os.stat(aws_credentials_file.name).st_mode | 0o40) inputs["aws_credentials"] = aws_credentials_file.name logger.getChild("awscli_downloader").info("loaded host AWS credentials") else: logger.getChild("awscli_downloader").info( "no AWS credentials available via host awscli/boto3; if needed, " "configure them and set [download_awscli] host_credentials=true. " "(On EC2: awscli might still assume role from instance metadata " "service.)" ) wdl = r""" task aws_s3_cp { input { String uri File? aws_credentials } command <<< set -euo pipefail if [ -n "~{aws_credentials}" ]; then source "~{aws_credentials}" fi args="" if ! aws sts get-caller-identity >&2 ; then # no credentials or instance role; add --no-sign-request to allow requests for # PUBLIC objects to proceed. args="--no-sign-request" fi mkdir __out cd __out aws s3 cp $args "~{uri}" . >>> output { File file = glob("__out/*")[0] } runtime { cpu: 4 memory: "1G" docker: "amazon/aws-cli" } } """ recv = yield { "task_wdl": wdl, "inputs": inputs, } yield recv # pyre-ignore def gsutil_downloader( cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs ) -> Generator[Dict[str, Any], Dict[str, Any], None]: """ Built-in downloader plugin for public gs:// URIs; registered by setup.cfg entry_points section TODO: adopt security credentials from runtime environment """ if uri == "gs://8675309": # hook for test coverage of exception handler raise RuntimeError("don't change your number") wdl = r""" task gsutil_cp { input { String uri } command <<< set -euxo pipefail mkdir __out/ gsutil -q cp "~{uri}" __out/ >>> output { File file = glob("__out/*")[0] } runtime { cpu: 4 memory: "1G" docker: "google/cloud-sdk:slim" } } """ yield (yield {"task_wdl": wdl, "inputs": {"uri": uri}}) # pyre-ignore
s = input("Please enter your own String : ") if(s == s[:: - 1]): print("Palindrome") else: print("Not a Palindrome string")
# -*- coding: utf-8 -*- """ module for mul and mulfix class: fund combination management """ import logging import pandas as pd from pyecharts import options as opts from pyecharts.charts import Pie, ThemeRiver from xalpha.cons import convert_date, myround, yesterdaydash, yesterdayobj from xalpha.evaluate import evaluate from xalpha.exceptions import FundTypeError, TradeBehaviorError from xalpha.record import record, irecord from xalpha.indicator import indicator from xalpha.info import cashinfo, fundinfo, mfundinfo, get_fund_holdings from xalpha.trade import ( bottleneck, trade, turnoverrate, vtradevolume, xirrcal, itrade, vtradecost, ) from xalpha.universal import get_fund_type, ttjjcode, get_rt, get_industry_fromxq import xalpha.universal as xu logger = logging.getLogger(__name__) class mul: """ multiple fund positions manage class :param fundtradeobj: list of trade obj which you want to analyse together :param status: the status table of trade, all code in this table would be considered. one must provide one of the two paramters, if both are offered, status will be overlooked 可以是场内记账单 DataFrame,也可以是 record 对象。 :param istatus: 场内交易账单,也可以是 irecord 对象。 若提供,则场内外交易联合统计展示。该选项只保证 ``combsummary`` 方法可正常使用,不保证 ``mul`` 类的其他方法可用。 :param property: Dict[fundcode, property_number]. property number 的解释: int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值处理(暂时只支持货币基金,事实上无法精确支持按份额赎回的净值型基金)。将想要的性质数值相加即可,类似 *nix 上的 xwr 系统。 :param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init :param save: boolean, when open the save option, info classes automatically save the class to files :param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database :param form: string, the format of IO, options including: 'csv','sql' """ def __init__( self, *fundtradeobj, status=None, istatus=None, property=None, fetch=False, save=False, path="", form="csv" ): if isinstance(status, record): if not property: property = getattr(status, "property", {}) status = status.status elif not property: property = {} self.is_in = False if fundtradeobj: for t in fundtradeobj: if isinstance(t, itrade): self.is_in = True break else: fundtradeobj = [] # warning: not a very good way to automatic generate these fund obj # because there might be some funds use round_down for share calculation, ie, label=2 must be given # unless you are sure corresponding funds are added to the droplist fundcodelist = [f.code for f in fundtradeobj] if status is not None: for code in status.columns: if code == "date": continue # r1, d2, v4 p = r+d+v if code in fundcodelist: continue p = property.get(code, 0) round_label = p % 2 dividend_label = ((p - round_label) / 2) % 2 value_label = ((p - round_label - dividend_label) / 4) % 2 try: fundtradeobj.append( trade( fundinfo( code, round_label=round_label, dividend_label=dividend_label, fetch=fetch, save=save, path=path, form=form, ), status, ) ) except FundTypeError: fundtradeobj.append( trade( mfundinfo( code, round_label=round_label, value_label=value_label, fetch=fetch, save=save, path=path, form=form, ), status, ) ) if istatus is not None: self.is_in = True if isinstance(istatus, irecord): istatus = istatus.status for code in istatus.code.unique(): if code not in fundcodelist and not code.startswith("#"): fundtradeobj.append(itrade(code, istatus)) self.fundtradeobj = tuple(fundtradeobj) self.totcftable = self._mergecftb() def tot(self, prop="基金现值", date=yesterdayobj()): """ sum of all the values from one prop of fund daily report, of coures many of the props make no sense to sum :param prop: string defined in the daily report dict, typical one is 'currentvalue' or 'originalpurchase' """ res = 0 for fund in self.fundtradeobj: res += fund.dailyreport().iloc[0][prop] return res def combsummary(self, date=yesterdayobj()): """ brief report table of every funds and the combination investment :param date: string or obj of date, show info of the date given :returns: empty dict if nothing is remaining that date dict of various data on the trade positions """ date = convert_date(date) columns = [ "基金名称", "基金代码", "当日净值", "单位成本", "持有份额", "基金现值", "基金总申购", "历史最大占用", "基金持有成本", "基金分红与赎回", "换手率", "基金收益总额", "投资收益率", ] summarydf = pd.DataFrame([], columns=columns) for fund in self.fundtradeobj: summarydf = summarydf.append( fund.dailyreport(date), ignore_index=True, sort=True ) tname = "总计" tcode = "total" tunitvalue = float("NaN") tunitcost = float("NaN") tholdshare = float("NaN") tcurrentvalue = summarydf["基金现值"].sum() tpurchase = summarydf["基金总申购"].sum() tbtnk = bottleneck(self.totcftable[self.totcftable["date"] <= date]) tcost = summarydf["基金持有成本"].sum() toutput = summarydf["基金分红与赎回"].sum() tturnover = turnoverrate(self.totcftable[self.totcftable["date"] <= date], date) # 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率 tearn = summarydf["基金收益总额"].sum() trate = round(tearn / tbtnk * 100, 4) trow = pd.DataFrame( [ [ tname, tcode, tunitvalue, tunitcost, tholdshare, tcurrentvalue, tpurchase, tbtnk, tcost, toutput, tturnover, tearn, trate, ] ], columns=columns, ) summarydf = summarydf.append(trow, ignore_index=True, sort=True) return summarydf[columns].sort_values(by="基金现值", ascending=False) summary = combsummary def _mergecftb(self): """ merge the different cftable for different funds into one table """ dtlist = [] for fund in self.fundtradeobj: dtlist2 = [] for _, row in fund.cftable.iterrows(): dtlist2.append((row["date"], row["cash"])) dtlist.extend(dtlist2) nndtlist = set([item[0] for item in dtlist]) nndtlist = sorted(list(nndtlist), key=lambda x: x) reslist = [] for date in nndtlist: reslist.append(sum([item[1] for item in dtlist if item[0] == date])) df = pd.DataFrame(data={"date": nndtlist, "cash": reslist}) df = df[df["cash"] != 0] df = df.reset_index(drop=True) return df def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01): """ xirr rate evauation of the whole invest combination :param date: string or obj of datetime, the virtually sell-all date :param startdate: string or obj of datetime, the beginning date of calculation, default from first buy """ return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess) def evaluation(self, start=None): """ give the evaluation object to analysis funds properties themselves instead of trades :returns: :class:`xalpha.evaluate.evaluate` object, with referenced funds the same as funds we invested """ if self.is_in: raise NotImplementedError() case = evaluate( *[fundtrade.aim for fundtrade in self.fundtradeobj], start=start ) return case def get_stock_holdings( self, year=None, season=None, date=yesterdayobj(), threhold=100 ): """ 获取整个基金组合的底层股票持仓总和和细节,组合穿透 :param year: 基于的基金季报年份 :param season: 基于的基金季报季度 :param date: 默认昨天 :param threhold: 默认100。小于100元的底层股票将不在最后的结果中展示 :return: pd.DataFrame column: name, code, value, ratio """ d = {} if year is None or season is None: rd = convert_date(date) - pd.Timedelta(days=120) if not year: year = rd.year if not season: season = int((rd.month - 0.1) / 3) + 1 logger.debug("use %s, %s for fund report" % (year, season)) for f in self.fundtradeobj: if isinstance(f, itrade): if f.get_type() == "股票": code = f.code elif f.get_type() == "场内基金": code = f.code[2:] else: continue else: code = f.code value = f.briefdailyreport(date).get("currentvalue", 0) if value > 0: if code.startswith("SH") or code.startswith("SZ"): stock = code d[stock] = d.get(stock, 0) + value elif code == "mf": continue else: df = get_fund_holdings(code, year, season) if df is None: continue for _, row in df.iterrows(): stock = row["code"] stock = ttjjcode(stock) d[stock] = d.get(stock, 0) + row["ratio"] / 100 * value # print("%s has %s contribution from %s" %(stock, row["ratio"] / 100 * value, f.name)) l = [] for code, value in sorted(d.items(), key=lambda item: -item[1]): if value >= threhold: try: name = get_rt(code)["name"] except: name = code l.append([name, code, value]) fdf = pd.DataFrame(l, columns=["name", "code", "value"]) fdf["ratio"] = fdf["value"] / fdf["value"].sum() return fdf def get_portfolio(self, date=yesterdayobj()): """ 获取基金组合底层资产大类配置的具体值 :param date: :return: Dict[str, float]. stock,bond,cash 对应总值的字典 """ d = {"stock": 0, "bond": 0, "cash": 0} date = convert_date(date) for f in self.fundtradeobj: value = f.briefdailyreport(date).get("currentvalue", 0) if value > 0: if isinstance(f, itrade): if f.get_type() == "股票": d["stock"] += value continue elif f.get_type() in ["可转债", "债券"]: d["bond"] += value continue elif f.get_type() == "货币基金": d["cash"] += value continue elif f.get_type() == "场内基金": code = f.code[2:] else: continue else: code = f.code if code == "mf": d["cash"] += value continue if get_fund_type(code) == "货币基金": d["cash"] += value continue df = xu.get_daily("pt-F" + code, end=date.strftime("%Y%m%d")) if df is None or len(df) == 0: logger.warning("empty portfolio info for %s" % code) row = df.iloc[-1] if row["bond_ratio"] + row["stock_ratio"] < 10: # 联接基金 d["stock"] += ( (100 - row["bond_ratio"] - row["cash_ratio"]) * value / 100 ) d["bond"] += row["bond_ratio"] * value / 100 d["cash"] += row["cash_ratio"] * value / 100 else: d["stock"] += row["stock_ratio"] * value / 100 d["bond"] += row["bond_ratio"] * value / 100 d["cash"] += row["cash_ratio"] * value / 100 return d get_portfolio_holdings = get_portfolio def get_industry(self, date=yesterdayobj()): """ 获取基金组合持仓的行业占比信息,底层为非 A 股持仓的暂不支持 :param date: :return: Dict """ # TODO: hard coded 一个字典来合并一些二级行业 d = {} date = convert_date(date) rd = date - pd.Timedelta(days=120) year = rd.year season = int((rd.month - 0.1) / 3) + 1 for f in self.fundtradeobj: value = f.briefdailyreport(date).get("currentvalue", 0) if value > 0: if isinstance(f, itrade): if f.get_type() == "股票": industry = get_industry_fromxq(f.code).get("industryname", "") if industry.strip(): d[industry] = d.get(industry, 0) + value continue elif f.get_type() in ["可转债", "债券", "货币基金"]: # 现在简化实现可转债暂时不按正股记行业 continue elif f.get_type() == "场内基金": code = f.code[2:] else: continue else: code = f.code if code == "mf": continue if get_fund_type(code) == "货币基金": continue ## 以下为持有股票的基金处理 ## fundinfo 有点浪费,不过简化实现暂时如此 fobj = fundinfo(code) industry_dict = fobj.get_industry_holdings(year=year, season=season) if industry_dict is None: continue ## 这里行业占比需要做个 scaling sv = sum([v for _, v in industry_dict.items()]) if sv < 1.0: # 只有极少数持仓存在行业信息 continue stock_ratio = fobj.get_portfolio_holdings(date.strftime("%Y%m%d"))[ "stock_ratio" ] scale = stock_ratio / sv print(scale) for k, v in industry_dict.items(): if k.strip(): d[k] = d.get(k, 0) + value * v / 100 * scale return d get_industry_holdings = get_industry def v_positions(self, date=yesterdayobj(), rendered=True): """ pie chart visualization of positions ratio in combination """ sdata = sorted( [ (fob.name, fob.briefdailyreport(date).get("currentvalue", 0)) for fob in self.fundtradeobj ], key=lambda x: x[1], reverse=True, ) pie = Pie() pie.add( series_name="总值占比", data_pair=sdata, label_opts=opts.LabelOpts(is_show=False, position="center"), ).set_global_opts( legend_opts=opts.LegendOpts( pos_left="left", type_="scroll", orient="vertical" ) ).set_series_opts( tooltip_opts=opts.TooltipOpts( trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)" ), ) if rendered: return pie.render_notebook() else: return pie def v_category_positions(self, date=yesterdayobj(), rendered=True): """ 资产分类扇形图,按大类资产求和绘制 :param date: :param rendered: bool. default true for notebook, for plain pyechart obj to return, set rendered=False :return: """ d = {} for f in self.fundtradeobj: if isinstance(f, itrade): t = f.get_type() if t == "场内基金": t = get_fund_type(f.code[2:]) elif f.code == "mf": t = "货币基金" else: t = get_fund_type(f.code) if t == "其他": logger.warning( "%s has category others which should be double checked" % f.code ) d[t] = d.get(t, 0) + f.briefdailyreport(date).get("currentvalue", 0) sdata = sorted([(k, round(v, 2)) for k, v in d.items()]) pie = Pie() pie.add( series_name="总值占比", data_pair=sdata, label_opts=opts.LabelOpts(is_show=False, position="center"), ).set_global_opts( legend_opts=opts.LegendOpts( pos_left="left", type_="scroll", orient="vertical" ) ).set_series_opts( tooltip_opts=opts.TooltipOpts( trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)" ), ) if rendered: return pie.render_notebook() else: return pie def v_positions_history(self, end=yesterdaydash(), rendered=True): """ river chart visulization of positions ratio history use text size to avoid legend overlap in some sense, eg. legend_text_size=8 """ start = self.totcftable.iloc[0].date times = pd.date_range(start, end) tdata = [] for date in times: sdata = sorted( [ (date, fob.briefdailyreport(date).get("currentvalue", 0), fob.name,) for fob in self.fundtradeobj ], key=lambda x: x[1], reverse=True, ) tdata.extend(sdata) tr = ThemeRiver() tr.add( series_name=[foj.name for foj in self.fundtradeobj], data=tdata, label_opts=opts.LabelOpts(is_show=False), singleaxis_opts=opts.SingleAxisOpts(type_="time", pos_bottom="10%"), ) if rendered: return tr.render_notebook() else: return tr def v_tradevolume(self, freq="D", rendered=True): """ visualization on trade summary of the funds combination :param freq: one character string, frequency label, now supporting D for date, W for week and M for month, namely the trade volume is shown based on the time unit :returns: ``pyecharts.Bar()`` """ return vtradevolume(self.totcftable, freq=freq, rendered=rendered) class mulfix(mul, indicator): """ introduce cash to make a closed investment system, where netvalue analysis can be applied namely the totcftable only has one row at the very beginning :param fundtradeobj: trade obj to be include :param status: status table, if no trade obj is provided, it will include all fund based on code in status table :param property: Dict[fundcode, property_number]. property number 的解释: int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值 :param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init :param save: boolean, when open the save option, info classes automatically save the class to files :param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database :param form: string, the format of IO, options including: 'csv','sql' :param totmoney: positive float, the total money as the input at the beginning :param cashobj: cashinfo object, which is designed to balance the cash in and out """ def __init__( self, *fundtradeobj, status=None, istatus=None, property=None, fetch=False, save=False, path="", form="csv", totmoney=100000, cashobj=None ): super().__init__( *fundtradeobj, status=status, istatus=istatus, property=property, fetch=fetch, save=save, path=path, form=form ) if cashobj is None: cashobj = cashinfo() self.totmoney = totmoney nst = mulfix._vcash(totmoney, self.totcftable, cashobj) cashtrade = trade(cashobj, nst) # super().__init__(*self.fundtradeobj, cashtrade) self.cashobj = cashobj self.fundtradeobj = list(self.fundtradeobj) self.fundtradeobj.append(cashtrade) self.fundtradeobj = tuple(self.fundtradeobj) btnk = bottleneck(self.totcftable) if btnk > totmoney: raise TradeBehaviorError("the initial total cash is too low") self.totcftable = pd.DataFrame( data={"date": [nst.iloc[0].date], "cash": [-totmoney]} ) @staticmethod def _vcash(totmoney, totcftable, cashobj): """ return a virtue status table with a mf(cash) column based on the given tot money and cftable """ cashl = [] cashl.append(totmoney + totcftable.iloc[0].cash) for i in range(len(totcftable) - 1): date = totcftable.iloc[i + 1].date delta = totcftable.iloc[i + 1].cash if delta < 0: cashl.append( myround( delta / cashobj.price[cashobj.price["date"] <= date].iloc[-1].netvalue ) ) else: cashl.append(delta) datadict = {"date": totcftable.loc[:, "date"], "mf": cashl} return pd.DataFrame(data=datadict) def unitvalue(self, date=yesterdayobj()): """ :returns: float at unitvalue of the whole investment combination """ date = convert_date(date) res = 0 for fund in self.fundtradeobj: res += fund.briefdailyreport(date).get("currentvalue", 0) return res / self.totmoney def v_tradecost(self, threhold=0, date=yesterdayobj(), rendered=True): if getattr(self, "price", None) is None: raise ValueError("Please generate price table by ``bcmkset()`` first") cftable = self.fundtradeobj[-1].cftable[1:] cftable = cftable[abs(cftable["cash"]) > threhold] cftable["cash"] = -cftable["cash"] return vtradecost(self, cftable, end=date, rendered=rendered) class imul(mul): def __init__(self, *fundtradeobj, status=None, istatus=None): """ 对场内投资组合进行分析的类 :param fundtradeobj: itrade objects. :param status: 场内格式记账单,或 irecord 对象。 """ if not fundtradeobj: fundtradeobj = [] if status is None: status = istatus if isinstance(status, irecord): status = status.status fundcodelist = [f.code for f in fundtradeobj] if status is not None: for code in status.code.unique(): if code not in fundcodelist and not code.startswith("#"): fundtradeobj.append(itrade(code, status)) self.fundtradeobj = tuple(fundtradeobj) self.totcftable = self._mergecftb() self.is_in = True Mul = mul MulFix = mulfix IMul = imul
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class to hold a library of OpDefs and use it to create Brain operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import six from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.core.framework import tensor_pb2 from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import constant_op from tensorflow.python.platform import logging from tensorflow.python.util import compat def _Attr(op_def, name): for attr in op_def.attr: if attr.name == name: return attr raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" % (op_def.name, name)) def _AttrValue(attr_protos, name): if name in attr_protos: return attr_protos[name] raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." % (name, attr_protos)) def _SatisfiesTypeConstraint(dtype, attr_def): if attr_def.HasField("allowed_values"): allowed_list = attr_def.allowed_values.list.type if dtype not in allowed_list: raise TypeError( "DataType %s for attr '%s' not in list of allowed values: %s" % (dtypes.as_dtype(dtype).name, attr_def.name, ", ".join(dtypes.as_dtype(x).name for x in allowed_list))) def _IsListParameter(arg): if arg.number_attr: return True elif arg.type_list_attr: return True return False def _NumTypeFields(arg): num = 0 if arg.type != types_pb2.DT_INVALID: num += 1 if arg.type_attr: num += 1 if arg.type_list_attr: num += 1 return num def _IsListValue(v): return isinstance(v, (list, tuple)) def _Flatten(l): """Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5].""" # [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]] l_of_l = [x if _IsListValue(x) else [x] for x in l] # [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5] return [item for sublist in l_of_l for item in sublist] def _Restructure(l, structure): """Returns the elements of list l structured according to the given structure. A structure is represented by a list whose elements are either `None` or a non-negative integer. `None` corresponds to a single element in the output list, and an integer N corresponds to a nested list of length N. The function returns a data structure whose shape is given by `structure`, and whose elements are taken from `l`. If `structure` is a singleton, the function returns the single data structure implied by the 0th element of `structure`. For example: _Restructure(["foo", "bar", "baz", "qux"], [None, 2, None]) -> ["foo", ["bar", "baz"], "qux"] _Restructure(["foo"], [None]) -> "foo" _Restructure(["foo"], [1]) -> ["foo"] _Restructure([], [0]) -> [] Args: l: A list. structure: A list whose elements are either `None` or a non-negative integer. Returns: The elements of `l`, restructured according to `structure`. If `structure` is a list of length 1, this function returns the single data structure implied by `structure[0]`. """ result = [] current_index = 0 for element in structure: if element is None: result.append(l[current_index]) current_index += 1 else: result.append(l[current_index:current_index+element]) current_index += element if len(result) == 1: return result[0] else: return tuple(result) def _MakeFloat(v, arg_name): if not isinstance(v, compat.real_types): raise TypeError("Expected float for argument '%s' not %s." % (arg_name, repr(v))) return float(v) def _MakeInt(v, arg_name): if isinstance(v, six.string_types): raise TypeError("Expected int for argument '%s' not %s." % (arg_name, repr(v))) try: return int(v) except (ValueError, TypeError): raise TypeError("Expected int for argument '%s' not %s." % (arg_name, repr(v))) def _MakeStr(v, arg_name): if not isinstance(v, compat.bytes_or_text_types): raise TypeError("Expected string for argument '%s' not %s." % (arg_name, repr(v))) return compat.as_bytes(v) # Convert unicode strings to bytes. def _MakeBool(v, arg_name): if not isinstance(v, bool): raise TypeError("Expected bool for argument '%s' not %s." % (arg_name, repr(v))) return v def _MakeType(v, attr_def): try: v = dtypes.as_dtype(v) except TypeError: raise TypeError("Expected DataType for argument '%s' not %s." % (attr_def.name, repr(v))) i = v.as_datatype_enum _SatisfiesTypeConstraint(i, attr_def) return i def _MakeShape(v, arg_name): """Convert v into a TensorShapeProto.""" # Args: # v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape. # arg_name: String, for error messages. # Returns: # A TensorShapeProto. if isinstance(v, tensor_shape_pb2.TensorShapeProto): for d in v.dim: if d.name: logging.warning("Warning: TensorShapeProto with a named dimension: %s", str(v)) break return v return tensor_shape.as_shape(v).as_proto() def _MakeTensor(v, arg_name): """Ensure v is a TensorProto.""" if isinstance(v, tensor_pb2.TensorProto): return v raise TypeError( "Don't know how to convert %s to a TensorProto for argument '%s'" % (repr(v), arg_name)) class _OpInfo(object): """All per-Op state we would like to precompute/validate.""" def __init__(self, op_def): self.op_def = op_def # TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it # here, instead of these checks. for arg in list(op_def.input_arg) + list(op_def.output_arg): num_type_fields = _NumTypeFields(arg) if num_type_fields != 1: raise TypeError("Arg '%s' of '%s' must have one type field not %d" % (arg.name, op_def.name, num_type_fields)) if arg.type_attr: attr_type = _Attr(op_def, arg.type_attr).type if attr_type != "type": raise TypeError("Attr '%s' of '%s' used as a type_attr " "but has type %s" % (arg.type_attr, op_def.name, attr_type)) if arg.type_list_attr: attr_type = _Attr(op_def, arg.type_list_attr).type if attr_type != "list(type)": raise TypeError( "Attr '%s' of '%s' used as a type_list_attr but has type %s" % (arg.type_attr, op_def.name, attr_type)) if arg.number_attr: attr_type = _Attr(op_def, arg.number_attr).type if attr_type != "int": raise TypeError( "Attr '%s' of '%s' used as a number_attr but has type %s" % (arg.number_attr, op_def.name, attr_type)) # pylint: disable=g-doc-return-or-yield @contextlib.contextmanager def _MaybeColocateWith(inputs): """A context manager for (maybe) colocating with a list of input tensors. Args: inputs: A list of `Tensor` or `Operation` objects. Returns: A context manager. """ if not inputs: yield else: # NOTE(mrry): The `ops.colocate_with()` function accepts only a single # op or tensor, so we create one context manager per element in the list. with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]): yield # pylint: enable=g-doc-return-or-yield class OpDefLibrary(object): """Holds a collection of OpDefs, can add the corresponding Ops to a graph.""" def __init__(self): self._ops = {} def add_op(self, op_def): """Register an OpDef. May call apply_op with the name afterwards.""" if not isinstance(op_def, op_def_pb2.OpDef): raise TypeError("%s is %s, not an op_def_pb2.OpDef" % (op_def, type(op_def))) if not op_def.name: raise ValueError("%s missing name." % op_def) if op_def.name in self._ops: raise RuntimeError("Op name %s registered twice." % op_def.name) self._ops[op_def.name] = _OpInfo(op_def) def add_op_list(self, op_list): """Register the OpDefs from an OpList.""" if not isinstance(op_list, op_def_pb2.OpList): raise TypeError("%s is %s, not an op_def_pb2.OpList" % (op_list, type(op_list))) for op_def in op_list.op: self.add_op(op_def) def apply_op(self, op_type_name, name=None, **keywords): # pylint: disable=g-doc-args """Add a node invoking a registered Op to a graph. Config proto extensions must be provided via the 'ext' keyword argument. Example usage: # input1 and input2 can be Tensors or anything ops.convert_to_tensor() # will convert to a Tensor. op_def_library.apply_op("op", input1=input1, input2=input2) # Can specify a node name. op_def_library.apply_op("op", input1=input1, name="node_name") # Must use keyword arguments, with the names specified in the OpDef. op_def_library.apply_op("op", input_name=input, attr_name=attr) All attrs must either be inferred from an input or specified. (If inferred, the attr must not be specified.) If an attr has a default value specified in the Op's OpDef, then you may pass None as the value of that attr to get the default. Args: op_type_name: string. Must match the name field of a registered Op. name: string. Optional name of the created op. **keywords: input Tensor and attr arguments specified by name, and optional parameters to pass when constructing the Operation. Returns: The Tensor(s) representing the output of the operation, or the Operation itself if there are no outputs. Raises: RuntimeError: On some errors. TypeError: On some errors. ValueError: On some errors. """ op_info = self._ops.get(op_type_name, None) if op_info is None: raise RuntimeError("Unrecognized Op name " + op_type_name) op_def = op_info.op_def # Determine the graph context. try: # Need to flatten all the arguments into a list. # pylint: disable=protected-access g = ops._get_graph_from_inputs(_Flatten(keywords.values())) # pyline: enable=protected-access except AssertionError as e: raise RuntimeError( "Cannot determine graph for Op '%s' due to: %s" % (op_type_name, e.message)) # Default name if not specified. if name is None: name = op_type_name # Check for deprecation deprecation_version = op_def.deprecation.version if deprecation_version: producer = g.graph_def_versions.producer if producer >= deprecation_version: raise NotImplementedError( ("Op %s is not available in GraphDef version %d. " "It has been removed in version %d. %s.") % (op_type_name, producer, deprecation_version, op_def.deprecation.explanation)) # Requires that op_def has passed validation (using the C++ # ValidateOpDef() from ../framework/op_def_util.h). attrs = {} inputs = [] input_types = [] with g.as_default(), ops.name_scope(name) as scope: # Perform input type inference inferred_from = {} for input_arg in op_def.input_arg: input_name = input_arg.name if input_name in keywords: values = keywords.pop(input_name) elif input_name + "_" in keywords: # Handle the case where the name is a keyword or built-in # for Python so we use the name + _ instead. input_name += "_" values = keywords.pop(input_name) else: raise TypeError("No argument for input " + input_name) # Goals: # * Convert values to Tensors if it contains constants. # * Verify that values is a list if that matches the input_arg's # type. # * If the input_arg's type is determined by attrs, either set # those attrs and validate those attr values are legal (if # they have not yet been set) or validate the input matches # the type indicated by the attrs (if they have already been # inferred via an earlier input). # * If the input_arg has an explicit type, make sure the input # conforms. if _IsListParameter(input_arg): if not _IsListValue(values): raise TypeError( "Expected list for '%s' argument to '%s' Op, not %s." % (input_name, op_type_name, values)) # In cases where we expect all elements of the list to have the # same dtype, try to cast non-Tensor elements to that type. dtype = None if input_arg.type != types_pb2.DT_INVALID: dtype = input_arg.type elif input_arg.number_attr: if input_arg.type_attr in attrs: dtype = attrs[input_arg.type_attr] else: for t in values: if isinstance(t, ops.Tensor): dtype = t.dtype break try: if not input_arg.is_ref and dtype: dtype = dtypes.as_dtype(dtype).base_dtype values = ops.convert_n_to_tensor( values, name=input_arg.name, dtype=dtype if dtype else None, as_ref=input_arg.is_ref) except (TypeError, ValueError): assert dtype is not None, "Should not fail if dtype is None" assert input_arg.number_attr, "Should be number_attr case" # What types does the conversion function think values have? values = ops.convert_n_to_tensor(values, as_ref=input_arg.is_ref) observed = ", ".join(v.dtype.base_dtype.name for v in values) prefix = ( "Tensors in list passed to '%s' of '%s' Op have types [%s]" % (input_name, op_type_name, observed)) if input_arg.type != types_pb2.DT_INVALID: raise TypeError("%s that do not match expected type %s." % (prefix, dtype.name)) elif input_arg.type_attr in attrs: raise TypeError("%s that do not match type %s inferred from " "earlier arguments." % (prefix, dtype.name)) else: raise TypeError("%s that don't all match." % prefix) types = [x.dtype for x in values] inputs.extend(values) else: # In cases where we have an expected type, try to convert non-Tensor # arguments to that type. dtype = None if input_arg.type != types_pb2.DT_INVALID: dtype = input_arg.type elif input_arg.type_attr in attrs: dtype = attrs[input_arg.type_attr] try: values = ops.convert_to_tensor( values, name=input_arg.name, dtype=dtype, as_ref=input_arg.is_ref) except ValueError: # What type does convert_to_tensor think it has? observed = ops.convert_to_tensor(values, as_ref=input_arg.is_ref).dtype.name prefix = ("Input '%s' of '%s' Op has type %s that does not match" % (input_name, op_type_name, observed)) if input_arg.type != types_pb2.DT_INVALID: raise TypeError("%s expected type of %s." % (prefix, dtypes.as_dtype(input_arg.type).name)) else: raise TypeError( "%s type %s of argument '%s'." % (prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name, inferred_from[input_arg.type_attr])) types = [values.dtype] inputs.append(values) base_types = [x.base_dtype for x in types] if input_arg.number_attr: # <number-attr> * <type> or <number-attr> * <type-attr> if input_arg.number_attr in attrs: if len(values) != attrs[input_arg.number_attr]: raise ValueError( "List argument '%s' to '%s' Op with length %d must match " "length %d of argument '%s'." % (input_name, op_type_name, len(values), attrs[input_arg.number_attr], inferred_from[input_arg.number_attr])) else: attrs[input_arg.number_attr] = len(values) inferred_from[input_arg.number_attr] = input_name num_attr = _Attr(op_def, input_arg.number_attr) if num_attr.has_minimum and len(values) < num_attr.minimum: raise ValueError( "List argument '%s' to '%s' Op with length %d shorter " "than minimum length %d." % (input_name, op_type_name, len(values), num_attr.minimum)) # All tensors must have the same base type. if any([bt != base_types[0] for bt in base_types]): raise TypeError( "All tensors passed to '%s' of '%s' Op " "must have the same type." % (input_name, op_type_name)) if input_arg.type != types_pb2.DT_INVALID: # <number-attr> * <type> case if base_types and base_types[0] != input_arg.type: assert False, "Unreachable" elif input_arg.type_attr in attrs: # <number-attr> * <type-attr> case, where <type-attr> already # has an inferred value. if base_types and base_types[0] != attrs[input_arg.type_attr]: assert False, "Unreachable" else: # <number-attr> * <type-attr> case, where we are now setting # the <type-attr> based on this input if not base_types: raise TypeError( "Don't know how to infer type variable from empty input " "list passed to input '%s' of '%s' Op." % (input_name, op_type_name)) attrs[input_arg.type_attr] = base_types[0] inferred_from[input_arg.type_attr] = input_name type_attr = _Attr(op_def, input_arg.type_attr) _SatisfiesTypeConstraint(base_types[0], type_attr) elif input_arg.type_attr: # <type-attr> attr_value = base_types[0] if input_arg.type_attr in attrs: if attrs[input_arg.type_attr] != attr_value: assert False, "Unreachable" else: for base_type in base_types: _SatisfiesTypeConstraint(base_type, _Attr(op_def, input_arg.type_attr)) attrs[input_arg.type_attr] = attr_value inferred_from[input_arg.type_attr] = input_name elif input_arg.type_list_attr: # <type-list-attr> attr_value = base_types if input_arg.type_list_attr in attrs: if attrs[input_arg.type_list_attr] != attr_value: raise TypeError( "Input '%s' of '%s' Op has type list of %s that does not " "match type list %s of argument '%s'." % (input_name, op_type_name, ", ".join(dtypes.as_dtype(x).name for x in attr_value), ", ".join(dtypes.as_dtype(x).name for x in attrs[input_arg.type_list_attr]), inferred_from[input_arg.type_list_attr])) else: for base_type in base_types: _SatisfiesTypeConstraint(base_type, _Attr(op_def, input_arg.type_list_attr)) attrs[input_arg.type_list_attr] = attr_value inferred_from[input_arg.type_list_attr] = input_name else: # single Tensor with specified type if base_types[0] != input_arg.type: assert False, "Unreachable" if input_arg.is_ref: if not all(x.is_ref_dtype for x in types): raise TypeError( "Input '%s' of '%s' Op requires l-value input" % (input_name, op_type_name)) input_types.extend(types) else: input_types.extend(base_types) # Process remaining attrs for attr in op_def.attr: # Skip attrs that have already had their values inferred if attr.name in attrs: if attr.name in keywords: raise TypeError( "Should not specify value for inferred attr '%s'." % attr.name) continue if attr.name in keywords: attrs[attr.name] = keywords.pop(attr.name) elif attr.name + "_" in keywords: # Attrs whose names match Python keywords have an extra '_' # appended, so we must check for that as well. attrs[attr.name] = keywords.pop(attr.name + "_") else: raise TypeError("No argument for attr " + attr.name) # Convert attr values to AttrValue protos. attr_protos = {} for attr_def in op_def.attr: key = attr_def.name value = attrs[key] attr_value = attr_value_pb2.AttrValue() if attr_def.HasField("default_value") and value is None: attr_value.CopyFrom(attr_def.default_value) attr_protos[key] = attr_value continue if attr_def.type.startswith("list("): if not _IsListValue(value): raise TypeError("Expected list for attr " + key) if attr_def.has_minimum: if len(value) < attr_def.minimum: raise ValueError("Attr '%s' of '%s' Op passed list of length %d " "less than minimum %d." % (key, op_type_name, len(value), attr_def.minimum)) attr_value.list.SetInParent() if attr_def.type == "string": attr_value.s = _MakeStr(value, key) if attr_def.HasField("allowed_values"): if attr_value.s not in attr_def.allowed_values.list.s: raise ValueError( "Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." % (key, op_type_name, compat.as_text(attr_value.s), '", "'.join(map(compat.as_text, attr_def.allowed_values.list.s)))) elif attr_def.type == "list(string)": attr_value.list.s.extend([_MakeStr(x, key) for x in value]) if attr_def.HasField("allowed_values"): for x in attr_value.list.s: if x not in attr_def.allowed_values.list.s: raise ValueError( "Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." % (key, op_type_name, compat.as_text(x), '", "'.join(map(compat.as_text, attr_def.allowed_values.list.s)))) elif attr_def.type == "int": attr_value.i = _MakeInt(value, key) if attr_def.has_minimum: if attr_value.i < attr_def.minimum: raise ValueError( "Attr '%s' of '%s' Op passed %d less than minimum %d." % (key, op_type_name, attr_value.i, attr_def.minimum)) elif attr_def.type == "list(int)": attr_value.list.i.extend([_MakeInt(x, key) for x in value]) elif attr_def.type == "float": attr_value.f = _MakeFloat(value, key) elif attr_def.type == "list(float)": attr_value.list.f.extend([_MakeFloat(x, key) for x in value]) elif attr_def.type == "bool": attr_value.b = _MakeBool(value, key) elif attr_def.type == "list(bool)": attr_value.list.b.extend([_MakeBool(x, key) for x in value]) elif attr_def.type == "type": attr_value.type = _MakeType(value, attr_def) elif attr_def.type == "list(type)": attr_value.list.type.extend( [_MakeType(x, attr_def) for x in value]) elif attr_def.type == "shape": attr_value.shape.CopyFrom(_MakeShape(value, key)) elif attr_def.type == "list(shape)": attr_value.list.shape.extend( [_MakeShape(x, key) for x in value]) elif attr_def.type == "tensor": attr_value.tensor.CopyFrom(_MakeTensor(value, key)) elif attr_def.type == "list(tensor)": attr_value.list.tensor.extend( [_MakeTensor(x, key) for x in value]) elif attr_def.type == "func": if not isinstance(value, compat.bytes_or_text_types): raise TypeError("Expects a string for the func name") attr_value.func.name = value else: raise TypeError("Unrecognized Attr type " + attr_def.type) attr_protos[key] = attr_value del attrs # attrs is no longer authoritative, use attr_protos instead # Determine output types (possibly using attrs) output_types = [] output_structure = [] for arg in op_def.output_arg: types = [] if arg.number_attr: n = _AttrValue(attr_protos, arg.number_attr).i if arg.type_attr: types = [_AttrValue(attr_protos, arg.type_attr).type] * n else: types = [arg.type] * n output_structure.append(n) elif arg.type_attr: t = _AttrValue(attr_protos, arg.type_attr) types = [t.type] output_structure.append(None) elif arg.type_list_attr: t = _AttrValue(attr_protos, arg.type_list_attr) types = t.list.type output_structure.append(len(t.list.type)) else: types = [arg.type] output_structure.append(None) if arg.is_ref: types = [dtypes.as_dtype(x).as_ref for x in types] output_types.extend(types) if keywords: raise TypeError("apply_op() got unexpected keyword arguments: " + ", ".join(sorted(keywords.keys()))) # NOTE(mrry): We add an explicit colocation constraint between # the newly created op and any of its reference-typed inputs. must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs) if arg.is_ref] with _MaybeColocateWith(must_colocate_inputs): # Add Op to graph if output_structure: op = g.create_op(op_type_name, inputs, output_types, name=scope, input_types=input_types, attrs=attr_protos, op_def=op_def) outputs = op.outputs return _Restructure(ops.convert_n_to_tensor(outputs), output_structure) else: return g.create_op(op_type_name, inputs, output_types, name=scope, input_types=input_types, attrs=attr_protos, op_def=op_def)
"""Tests for the Freebox config flow.""" from unittest.mock import AsyncMock, patch from aiofreepybox.exceptions import ( AuthorizationError, HttpRequestError, InvalidTokenError, ) import pytest from homeassistant import data_entry_flow from homeassistant.components.freebox.const import DOMAIN from homeassistant.config_entries import SOURCE_DISCOVERY, SOURCE_IMPORT, SOURCE_USER from homeassistant.const import CONF_HOST, CONF_PORT from tests.common import MockConfigEntry HOST = "myrouter.freeboxos.fr" PORT = 1234 @pytest.fixture(name="connect") def mock_controller_connect(): """Mock a successful connection.""" with patch("homeassistant.components.freebox.router.Freepybox") as service_mock: service_mock.return_value.open = AsyncMock() service_mock.return_value.system.get_config = AsyncMock( return_value={ "mac": "abcd", "model_info": {"pretty_name": "Pretty Model"}, "firmware_version": "123", } ) service_mock.return_value.lan.get_hosts_list = AsyncMock() service_mock.return_value.connection.get_status = AsyncMock() service_mock.return_value.close = AsyncMock() yield service_mock async def test_user(hass): """Test user config.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "user" # test with all provided result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: HOST, CONF_PORT: PORT}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "link" async def test_import(hass): """Test import step.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: HOST, CONF_PORT: PORT}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "link" async def test_discovery(hass): """Test discovery step.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_DISCOVERY}, data={CONF_HOST: HOST, CONF_PORT: PORT}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "link" async def test_link(hass, connect): """Test linking.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: HOST, CONF_PORT: PORT}, ) result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["result"].unique_id == HOST assert result["title"] == HOST assert result["data"][CONF_HOST] == HOST assert result["data"][CONF_PORT] == PORT async def test_abort_if_already_setup(hass): """Test we abort if component is already setup.""" MockConfigEntry( domain=DOMAIN, data={CONF_HOST: HOST, CONF_PORT: PORT}, unique_id=HOST ).add_to_hass(hass) # Should fail, same HOST (import) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: HOST, CONF_PORT: PORT}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" # Should fail, same HOST (flow) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: HOST, CONF_PORT: PORT}, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured" async def test_on_link_failed(hass): """Test when we have errors during linking the router.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: HOST, CONF_PORT: PORT}, ) with patch( "homeassistant.components.freebox.router.Freepybox.open", side_effect=AuthorizationError(), ): result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {"base": "register_failed"} with patch( "homeassistant.components.freebox.router.Freepybox.open", side_effect=HttpRequestError(), ): result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {"base": "cannot_connect"} with patch( "homeassistant.components.freebox.router.Freepybox.open", side_effect=InvalidTokenError(), ): result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["errors"] == {"base": "unknown"}
from random import randint import playsound from time import sleep print('-=-' * 20) print('Vou pensar em um número entre 0 e 5. Tente advinhar... ') print('-=-' * 20) jogador = int(input('Em que número você pensou? ')) print('PROCESSANDO... ') sleep(3) computador = randint(0, 5) if jogador == computador: print('PARABÉNS! Você acertou! Eu escolhi {} e você escolheu {} também! '.format(computador, jogador)) playsound.playsound('ex028.mp3') else: print('VOCÊ ERRROU! Eu escolhi {} e você escolheu {}'.format(computador, jogador)) playsound.playsound('errou.mp3') print('Foi muito bom jogar com você!')
""" PI power 5V on pin GND on pin The GPIO mode is set to BCM H-Bridge Motor Driver Pin Configuration in1 -> BCM 05 (board pin 29 or GPIO 5) in2 -> BCM 06 (board pin 31 or GPIO 6) enable -> BCM 13 (board pin 33 or GPIO 13, PWM) PCA9685 (16-Channel Servo Driver) Pin Configuration SDA -> BCM 2 (board pin 3, GPIO 2) SCL -> BCM 3 (board pin 5, GPIO 3) VCC -> Board Pin 1 (3.3v) GND -> Board Pin 9 HC-SR04 (Sonar Distance Sensor) Trig -> BCM 23 (board pin 16 or GPIO 23) Echo -> BCM 24 (board pin 18 or GPIO 24) VCC -> Board Pin 17 (3.3v) GND -> Board Pin 20 """ from adafruit_servokit import ServoKit from gpiozero import Motor, PWMOutputDevice from time import sleep from enum import Enum class ServoCh(Enum): STEERING = 0 CAM_PAN = 1 CAM_TILT = 2 TRIGHT_HYDR = 4 TLEFT_HYDR = 5 BRIGHT_HYDR = 6 BLEFT_HYDR = 7 class PiCar: def __init__(self): self.motorDriver = HBridgeMotorDriver(in1=5, in2=6, enable=13) self.servoDiver = ServoDriver(sda=2, scl=3) def f(self): pass class HBridgeMotorDriver: def __init__(self, in1, in2, enable): self.in1 = in1 self.in2 = in2 self.enable = enable # this gpio is pwm self.pwmEnable = PWMOutputDevice(enable, frequency=100) self.motor = Motor(forward=in1, backward=in2) self.pwmEnable.on() self.currSpeed = 0.0 # def start(self, startPWMDutyCycle: float = 1.0): # self.pwmEnable.on() # self.pwmEnable.value = startPWMDutyCycle # # def stop(self): # self.pwmEnable.value = 0.0 # # self.pwmEnable.off() def slowStart(self, accelRate: int = 1, perSec: float = 1, speedFrom: float = 0): self.accelerate(rate=accelRate, perSec=perSec, speedFrom=speedFrom) self.pwmEnable.value = 0.0 def slowStop(self, decelRate: int = 1, perSec: float = 1, speedFrom: float = 100): self.decelerate(rate=decelRate, perSec=perSec, speedFrom=speedFrom) self.pwmEnable.value = 0.0 # self.pwmEnable.off() def accelerate(self, rate: int = 1, perSec: float = 1, speedFrom: float = 0, speedTo: float = 100): if speedFrom < 0 or speedTo < 0: # in physics its posible to have negative speed but lets keep it positive for now print("one of the speed is negative") return if speedTo > speedFrom: print("Cant accelerate to a speed less than the start speed, do you want to decelerate instead? ") print("ERROR: accelerate Speed From: {} -> Speed To: {}".format(speedFrom, speedTo)) return if rate < 0: print("Cant accelerate at a negative rate, , do you want to decelerate instead?") return if rate == 0: print("going constant speed") return if rate > 100: rate = 100 print("Accelerating at a rate of {} unit/sec".format(rate)) for currRate in range(int(speedFrom), 101, rate): dutyCycle = currRate / 100 self.pwmEnable.value = dutyCycle currSpeed = currRate / perSec print("Current Speed: {} unit/sec".format(currSpeed)) if currSpeed >= speedTo: print("Accelerating stopped, speed limit of {} unit/sec reached".format(speedTo)) break sleep(perSec) def decelerate(self, rate: int = 1, perSec: float = 1, speedFrom: float = 100, speedTo: float = 0): if speedFrom < 0 or speedTo < 0: # in physics its posible to have negative speed but lets keep it positive for now print("one of the speed is negative") return if speedTo > speedFrom: print("Cant decelerate to a speed higher than the start speed, do you want to accelerate instead? ") print("ERROR: Decelerate Speed From: {} -> Speed To: {}".format(speedFrom, speedTo)) return if rate < 0: rate *= -1 if rate == 0: print("going constant speed") return if rate > 100: rate = 100 print("Decelerating at a rate of {} unit/sec".format(rate)) for r in range(int(speedFrom), 101, rate): currRate = speedFrom - r dutyCycle = currRate / 100 self.pwmEnable.value = dutyCycle currSpeed = currRate / perSec print("Current Speed: {} unit/sec".format(currSpeed)) if currSpeed <= speedTo: print("Accelerating stopped, speed limit of {} unit/sec reached".format(speedTo)) break sleep(perSec) def forward(self, pwmDutyCycle: float = 1.0): self.motor.forward() self.pwmEnable.value = pwmDutyCycle def backward(self, pwmDutyCycle: float = 1.0): # self.motor.backward(pwmDutyCycle) self.motor.backward() self.pwmEnable.value = pwmDutyCycle def halt(self): self.pwmEnable.off() class ServoDriver: def __init__(self, sda, scl): self.sda = sda self.scl = scl # self.vccPin = 17 # self.gndPin = 20 self.kit = ServoKit(channels=16) class DistanceSensor: pass if __name__ == "__main__": try: print("") except KeyboardInterrupt: print("Program Stopped via keyboard interrupt")
# Natural Language Toolkit: Zen Chatbot # # Copyright (C) 2001-2021 NLTK Project # Author: Amy Holland <amyrh@csse.unimelb.edu.au> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Zen Chatbot talks in gems of Zen wisdom. This is a sample conversation with Zen Chatbot: ZC: Welcome, my child. me: Good afternoon. ZC: Ask the question you have come to ask. me: How can I achieve enlightenment? ZC: How do you suppose? me: Through meditation. ZC: Form is emptiness, and emptiness form. me: How can I empty my mind of worldly troubles? ZC: Will an answer to that really help in your search for enlightenment? me: Yes. ZC: It is better to be right than to be certain. me: I seek truth and wisdom. ZC: The search for truth is a long journey. me: Are you sure? ZC: Maybe sure, maybe not sure. The chatbot structure is based on that of chat.eliza. Thus, it uses a translation table to convert from question to response i.e. "I am" --> "you are" Of course, since Zen Chatbot does not understand the meaning of any words, responses are very limited. Zen Chatbot will usually answer very vaguely, or respond to a question by asking a different question, in much the same way as Eliza. """ from nltk.chat.util import Chat, reflections # responses are matched top to bottom, so non-specific matches occur later # for each match, a list of possible responses is provided responses = ( # Zen Chatbot opens with the line "Welcome, my child." The usual # response will be a greeting problem: 'good' matches "good morning", # "good day" etc, but also "good grief!" and other sentences starting # with the word 'good' that may not be a greeting ( r"(hello(.*))|(good [a-zA-Z]+)", ( "The path to enlightenment is often difficult to see.", "Greetings. I sense your mind is troubled. Tell me of your troubles.", "Ask the question you have come to ask.", "Hello. Do you seek englightenment?", ), ), # "I need" and "I want" can be followed by a thing (eg 'help') # or an action (eg 'to see you') # # This is a problem with this style of response - # person: "I need you" # chatbot: "me can be achieved by hard work and dedication of the mind" # i.e. 'you' is not really a thing that can be mapped this way, so this # interpretation only makes sense for some inputs # ( r"i need (.*)", ( "%1 can be achieved by hard work and dedication of the mind.", "%1 is not a need, but a desire of the mind. Clear your mind of such concerns.", "Focus your mind on%1, and you will find what you need.", ), ), ( r"i want (.*)", ( "Desires of the heart will distract you from the path to enlightenment.", "Will%1 help you attain enlightenment?", "Is%1 a desire of the mind, or of the heart?", ), ), # why questions are separated into three types: # "why..I" e.g. "why am I here?" "Why do I like cake?" # "why..you" e.g. "why are you here?" "Why won't you tell me?" # "why..." e.g. "Why is the sky blue?" # problems: # person: "Why can't you tell me?" # chatbot: "Are you sure I tell you?" # - this style works for positives (e.g. "why do you like cake?") # but does not work for negatives (e.g. "why don't you like cake?") (r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")), (r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")), (r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")), # e.g. "are you listening?", "are you a duck" ( r"are you (.*)\?", ("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."), ), # e.g. "am I a duck?", "am I going to die?" ( r"am i (.*)\?", ("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."), ), # what questions, e.g. "what time is it?" # problems: # person: "What do you want?" # chatbot: "Seek truth, not what do me want." (r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")), # how questions, e.g. "how do you do?" ( r"how (.*)\?", ( "How do you suppose?", "Will an answer to that really help in your search for enlightenment?", "Ask yourself not how, but why.", ), ), # can questions, e.g. "can you run?", "can you come over here please?" ( r"can you (.*)\?", ( "I probably can, but I may not.", "Maybe I can%1, and maybe I cannot.", "I can do all, and I can do nothing.", ), ), # can questions, e.g. "can I have some cake?", "can I know truth?" ( r"can i (.*)\?", ( "You can%1 if you believe you can%1, and have a pure spirit.", "Seek truth and you will know if you can%1.", ), ), # e.g. "It is raining" - implies the speaker is certain of a fact ( r"it is (.*)", ( "How can you be certain that%1, when you do not even know yourself?", "Whether it is%1 or not does not change the way the world is.", ), ), # e.g. "is there a doctor in the house?" ( r"is there (.*)\?", ("There is%1 if you believe there is.", "It is possible that there is%1."), ), # e.g. "is it possible?", "is this true?" (r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")), # non-specific question ( r"(.*)\?", ( "Do you think %1?", "You seek the truth. Does the truth seek you?", "If you intentionally pursue the answers to your questions, the answers become hard to see.", "The answer to your question cannot be told. It must be experienced.", ), ), # expression of hate of form "I hate you" or "Kelly hates cheese" ( r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)", ( "Perhaps it is not about hating %2, but about hate from within.", "Weeds only grow when we dislike them", "Hate is a very strong emotion.", ), ), # statement containing the word 'truth' ( r"(.*) truth(.*)", ( "Seek truth, and truth will seek you.", "Remember, it is not the spoon which bends - only yourself.", "The search for truth is a long journey.", ), ), # desire to do an action # e.g. "I want to go shopping" ( r"i want to (.*)", ("You may %1 if your heart truly desires to.", "You may have to %1."), ), # desire for an object # e.g. "I want a pony" ( r"i want (.*)", ( "Does your heart truly desire %1?", "Is this a desire of the heart, or of the mind?", ), ), # e.g. "I can't wait" or "I can't do this" ( r"i can\'t (.*)", ( "What we can and can't do is a limitation of the mind.", "There are limitations of the body, and limitations of the mind.", "Have you tried to%1 with a clear mind?", ), ), # "I think.." indicates uncertainty. e.g. "I think so." # problem: exceptions... # e.g. "I think, therefore I am" ( r"i think (.*)", ( "Uncertainty in an uncertain world.", "Indeed, how can we be certain of anything in such uncertain times.", "Are you not, in fact, certain that%1?", ), ), # "I feel...emotions/sick/light-headed..." ( r"i feel (.*)", ( "Your body and your emotions are both symptoms of your mind." "What do you believe is the root of such feelings?", "Feeling%1 can be a sign of your state-of-mind.", ), ), # exclaimation mark indicating emotion # e.g. "Wow!" or "No!" ( r"(.*)!", ( "I sense that you are feeling emotional today.", "You need to calm your emotions.", ), ), # because [statement] # e.g. "because I said so" ( r"because (.*)", ( "Does knowning the reasons behind things help you to understand" " the things themselves?", "If%1, what else must be true?", ), ), # yes or no - raise an issue of certainty/correctness ( r"(yes)|(no)", ( "Is there certainty in an uncertain world?", "It is better to be right than to be certain.", ), ), # sentence containing word 'love' ( r"(.*)love(.*)", ( "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.", "Free love!", ), ), # sentence containing word 'understand' - r ( r"(.*)understand(.*)", ( "If you understand, things are just as they are;" " if you do not understand, things are just as they are.", "Imagination is more important than knowledge.", ), ), # 'I', 'me', 'my' - person is talking about themself. # this breaks down when words contain these - eg 'Thyme', 'Irish' ( r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)", ( "'I', 'me', 'my'... these are selfish expressions.", "Have you ever considered that you might be a selfish person?", "Try to consider others, not just yourself.", "Think not just of yourself, but of others.", ), ), # 'you' starting a sentence # e.g. "you stink!" ( r"you (.*)", ("My path is not of concern to you.", "I am but one, and you but one more."), ), # say goodbye with some extra Zen wisdom. ( r"exit", ( "Farewell. The obstacle is the path.", "Farewell. Life is a journey, not a destination.", "Good bye. We are cups, constantly and quietly being filled." "\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.", ), ), # fall through case - # when stumped, respond with generic zen wisdom # ( r"(.*)", ( "When you're enlightened, every word is wisdom.", "Random talk is useless.", "The reverse side also has a reverse side.", "Form is emptiness, and emptiness is form.", "I pour out a cup of water. Is the cup empty?", ), ), ) zen_chatbot = Chat(responses, reflections) def zen_chat(): print("*" * 75) print("Zen Chatbot!".center(75)) print("*" * 75) print('"Look beyond mere words and letters - look into your mind"'.center(75)) print("* Talk your way to truth with Zen Chatbot.") print("* Type 'quit' when you have had enough.") print("*" * 75) print("Welcome, my child.") zen_chatbot.converse() def demo(): zen_chat() if __name__ == "__main__": demo()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # InSilicoSeq documentation build configuration file, created by # sphinx-quickstart on Tue May 30 11:45:01 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) from iss.version import __version__ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'InSilicoSeq' copyright = '2017, Hadrien Gourle' author = 'Hadrien Gourle' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__[:-2] # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'InSilicoSeqdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'InSilicoSeq.tex', 'InSilicoSeq Documentation', 'Hadrien Gourlé', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'insilicoseq', 'InSilicoSeq Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'InSilicoSeq', 'InSilicoSeq Documentation', author, 'InSilicoSeq', 'One line description of project.', 'Miscellaneous'), ]
from site_settings.models import GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_TOGGLE_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION API_CAPABILITIES = [GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION]
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import glob import re import time from os.path import basename from subprocess import PIPE, Popen from sys import platform, stdout from llnl.util import tty from spack import * is_windows = platform == 'win32' if not is_windows: from fcntl import F_GETFL, F_SETFL, fcntl from os import O_NONBLOCK re_optline = re.compile(r'\s+[0-9]+\..*\((serial|smpar|dmpar|dm\+sm)\)\s+') re_paroptname = re.compile(r'\((serial|smpar|dmpar|dm\+sm)\)') re_paroptnum = re.compile(r'\s+([0-9]+)\.\s+\(') re_nestline = re.compile(r'\(([0-9]+=[^)0-9]+)+\)') re_nestoptnum = re.compile(r'([0-9]+)=') re_nestoptname = re.compile(r'=([^,)]+)') def setNonBlocking(fd): """ Set the given file descriptor to non-blocking Non-blocking pipes are not supported on windows """ flags = fcntl(fd, F_GETFL) | O_NONBLOCK fcntl(fd, F_SETFL, flags) def collect_platform_options(stdoutpipe): # Attempt to parse to collect options optiondict = {} for line in stdoutpipe.splitlines(): if re_optline.match(line): numbers = re_paroptnum.findall(line) entries = re_paroptname.findall(line) paropts = dict(zip(entries, numbers)) platline = re_optline.sub("", line).strip() optiondict[platline] = paropts return optiondict def collect_nesting_options(stdoutpipe): nestoptline = re_nestline.search(stdoutpipe)[0] nestoptnum = re_nestoptnum.findall(nestoptline) nestoptname = re_nestoptname.findall(nestoptline) nestoptname = [x.replace(" ", "_") for x in nestoptname] return dict(zip(nestoptname, nestoptnum)) class Wrf(Package): """The Weather Research and Forecasting (WRF) Model is a next-generation mesoscale numerical weather prediction system designed for both atmospheric research and operational forecasting applications. """ homepage = "https://www.mmm.ucar.edu/weather-research-and-forecasting-model" url = "https://github.com/wrf-model/WRF/archive/v4.2.tar.gz" maintainers = ["MichaelLaufer", "ptooley"] version("4.3.3", sha256='1b98b8673513f95716c7fc54e950dfebdb582516e22758cd94bc442bccfc0b86') version("4.3.2", sha256='2c682da0cd0fd13f57d5125eef331f9871ec6a43d860d13b0c94a07fa64348ec') version("4.3.1", sha256='6c9a69d05ee17d2c80b3699da173cfe6fdf65487db7587c8cc96bfa9ceafce87') version("4.2", sha256="c39a1464fd5c439134bbd39be632f7ce1afd9a82ad726737e37228c6a3d74706") version("4.0", sha256="9718f26ee48e6c348d8e28b8bc5e8ff20eafee151334b3959a11b7320999cf65") version("3.9.1.1", sha256="a04f5c425bedd262413ec88192a0f0896572cc38549de85ca120863c43df047a", url="https://github.com/wrf-model/WRF/archive/V3.9.1.1.tar.gz") resource(name='elec', url='https://master.dl.sourceforge.net/project/wrfelec/WRFV3911_elec.beta_release.01.tgz', sha256='eaaece04711a2883f39349f0857468b42af1a6f8d0985759ce5dfde4058316b4', when='@3.9.1.1+elec', destination='.' ) variant( "build_type", default="dmpar", values=("serial", "smpar", "dmpar", "dm+sm"), ) variant( "nesting", default="basic", values=("no_nesting", "basic", "preset_moves", "vortex_following"), ) variant( "compile_type", default="em_real", values=( "em_real", "em_quarter_ss", "em_b_wave", "em_les", "em_heldsuarez", "em_tropical_cyclone", "em_hill2d_x", "em_squall2d_x", "em_squall2d_y", "em_grav2d_x", "em_seabreeze2d_x", "em_scm_xy", ), ) variant( "pnetcdf", default=True, description="Parallel IO support through Pnetcdf library", ) variant( "elec", default=False, description="Compile support for the storm electrification package" + "for the WRF-ARW" ) conflicts("@4.0:", when="+elec", msg="WRF_ELEC is only supported in V3.9.1.1") patch("patches/3.9/netcdf_backport.patch", when="@3.9.1.1") patch("patches/3.9/tirpc_detect.patch", when="@3.9.1.1") patch("patches/3.9/add_aarch64.patch", when="@3.9.1.1") patch("patches/3.9/force_flags.patch", when="@3.9.1.1 %gcc@10:") patch("patches/3.9/configure_aocc_2.3.patch", when="@3.9.1.1 %aocc@:2.4.0") patch("patches/3.9/configure_aocc_3.0.patch", when="@3.9.1.1 %aocc@3.0.0") patch("patches/3.9/configure_aocc_3.1.patch", when="@3.9.1.1 %aocc@3.1.0") patch("patches/3.9/fujitsu.patch", when="@3.9.1.1 %fj") patch("patches/3.9/add_elec_support.patch", when="@3.9.1.1+elec") patch("patches/3.9/add_elec_changes.patch", when="@3.9.1.1+elec") # These patches deal with netcdf & netcdf-fortran being two diff things # Patches are based on: # https://github.com/easybuilders/easybuild-easyconfigs/blob/master/easybuild/easyconfigs/w/WRF/WRF-3.5_netCDF-Fortran_separate_path.patch patch("patches/4.0/arch.Config.pl.patch", when="@4.0") patch("patches/4.0/arch.configure.defaults.patch", when="@4.0") patch("patches/4.0/arch.conf_tokens.patch", when="@4.0") patch("patches/4.0/arch.postamble.patch", when="@4.0") patch("patches/4.0/configure.patch", when="@4.0") patch("patches/4.0/external.io_netcdf.makefile.patch", when="@4.0") patch("patches/4.0/Makefile.patch", when="@4.0") patch("patches/4.0/tirpc_detect.patch", when="@4.0") patch("patches/4.0/add_aarch64.patch", when="@4.0") patch("patches/4.2/arch.Config.pl.patch", when="@4.2:") patch("patches/4.2/arch.configure.defaults.patch", when="@4.2") patch("patches/4.2/arch.conf_tokens.patch", when="@4.2:") patch("patches/4.2/arch.postamble.patch", when="@4.2") patch("patches/4.2/configure.patch", when="@4.2:") patch("patches/4.2/external.io_netcdf.makefile.patch", when="@4.2:") patch("patches/4.2/var.gen_be.Makefile.patch", when="@4.2:") patch("patches/4.2/Makefile.patch", when="@4.2") patch("patches/4.2/tirpc_detect.patch", when="@4.2") patch("patches/4.2/add_aarch64.patch", when="@4.2:") patch("patches/4.2/configure_aocc_2.3.patch", when="@4.2 %aocc@:2.4.0") patch("patches/4.2/configure_aocc_3.0.patch", when="@4.2: %aocc@3.0.0:3.2.0") patch("patches/4.2/hdf5_fix.patch", when="@4.2: %aocc") patch("patches/4.2/derf_fix.patch", when="@4.2 %aocc") # Various syntax fixes found by FPT tool patch("https://github.com/wrf-model/WRF/commit/6502d5d9c15f5f9a652dec244cc12434af737c3c.patch?full_index=1", sha256="c5162c23a132b377132924f8f1545313861c6cee5a627e9ebbdcf7b7b9d5726f", when="@4.2 %fj") patch("patches/4.2/configure_fujitsu.patch", when="@4 %fj") patch("patches/4.3/Makefile.patch", when="@4.3:") patch("patches/4.3/arch.postamble.patch", when="@4.3:") patch("patches/4.3/fujitsu.patch", when="@4.3: %fj") # Syntax errors in physics routines patch("https://github.com/wrf-model/WRF/commit/7c6fd575b7a8fe5715b07b38db160e606c302956.patch?full_index=1", sha256="1ce97f4fd09e440bdf00f67711b1c50439ac27595ea6796efbfb32e0b9a1f3e4", when="@4.3.1") patch("https://github.com/wrf-model/WRF/commit/238a7d219b7c8e285db28fe4f0c96ebe5068d91c.patch?full_index=1", sha256="27c7268f6c84b884d21e4afad0bab8554b06961cf4d6bfd7d0f5a457dcfdffb1", when="@4.3.1") depends_on("pkgconfig", type=("build")) depends_on("libtirpc") depends_on("mpi") # According to: # http://www2.mmm.ucar.edu/wrf/users/docs/user_guide_v4/v4.0/users_guide_chap2.html#_Required_Compilers_and_1 # Section: "Required/Optional Libraries to Download" depends_on("parallel-netcdf", when="+pnetcdf") depends_on("netcdf-c") depends_on("netcdf-fortran") depends_on("jasper") depends_on("libpng") depends_on("zlib") depends_on("perl") depends_on("jemalloc", when="%aocc") # not sure if +fortran is required, but seems like a good idea depends_on("hdf5+fortran+hl+mpi") # build script use csh depends_on("tcsh", type=("build")) # time is not installed on all systems b/c bash provides it # this fixes that for csh install scripts depends_on("time", type=("build")) depends_on("m4", type="build") depends_on("libtool", type="build") depends_on("boxmg4wrf", type="build", when="+elec") depends_on("tar", type="build", when="+elec") phases = ["configure", "build", "install"] def setup_run_environment(self, env): env.set("WRF_HOME", self.prefix) env.append_path("PATH", self.prefix.main) env.append_path("PATH", self.prefix.tools) def setup_build_environment(self, env): env.set("NETCDF", self.spec["netcdf-c"].prefix) if "+pnetcdf" in self.spec: env.set("PNETCDF", self.spec["parallel-netcdf"].prefix) # This gets used via the applied patch files env.set("NETCDFF", self.spec["netcdf-fortran"].prefix) env.set("PHDF5", self.spec["hdf5"].prefix) env.set("JASPERINC", self.spec["jasper"].prefix.include) env.set("JASPERLIB", self.spec["jasper"].prefix.lib) if self.spec.satisfies("%gcc@10:"): args = "-w -O2 -fallow-argument-mismatch -fallow-invalid-boz" env.set("FCFLAGS", args) env.set("FFLAGS", args) if self.spec.satisfies("%aocc"): env.set("WRFIO_NCD_LARGE_FILE_SUPPORT", 1) env.set("HDF5", self.spec["hdf5"].prefix) env.prepend_path('PATH', ancestor(self.compiler.cc)) if self.spec.satisfies("+elec"): env.set("WRF_ELEC", 1) env.set("BOXMGLIBDIR", self.spec["boxmg4wrf"].prefix) def patch(self): # Let's not assume csh is intalled in bin files = glob.glob("*.csh") filter_file("^#!/bin/csh -f", "#!/usr/bin/env csh", *files) filter_file("^#!/bin/csh", "#!/usr/bin/env csh", *files) def answer_configure_question(self, outputbuf): # Platform options question: if "Please select from among the following" in outputbuf: options = collect_platform_options(outputbuf) comp_pair = "%s/%s" % ( basename(self.compiler.fc).split("-")[0], basename(self.compiler.cc).split("-")[0], ) compiler_matches = dict( (x, y) for x, y in options.items() if comp_pair in x.lower() ) if len(compiler_matches) > 1: tty.warn("Found multiple potential build options") try: compiler_key = min(compiler_matches.keys(), key=len) tty.warn("Selected build option %s." % compiler_key) return ( "%s\n" % compiler_matches[compiler_key][ self.spec.variants["build_type"].value ] ) except KeyError: InstallError( "build_type %s unsupported for %s compilers" % (self.spec.variants["build_type"].value, comp_pair) ) if "Compile for nesting?" in outputbuf: options = collect_nesting_options(outputbuf) try: return "%s\n" % options[self.spec.variants["nesting"].value] except KeyError: InstallError("Failed to parse correct nesting option") def do_configure_fixup(self): # Fix mpi compiler wrapper aliases # In version 4.2 the file to be patched is called # configure.defaults, while in earlier versions # it's configure_new.defaults if self.spec.satisfies("@3.9.1.1"): config = FileFilter(join_path('arch', 'configure_new.defaults')) else: config = FileFilter(join_path('arch', 'configure.defaults')) if self.spec.satisfies("@3.9.1.1 %gcc"): config.filter(r'^DM_FC.*mpif90 -f90=\$\(SFC\)', 'DM_FC = {0}'.format(self.spec['mpi'].mpifc)) config.filter(r'^DM_CC.*mpicc -cc=\$\(SCC\)', 'DM_CC = {0}'.format(self.spec['mpi'].mpicc)) if self.spec.satisfies("%aocc"): config.filter( '^DM_FC.*mpif90 -DMPI2SUPPORT', 'DM_FC = {0}'.format(self.spec['mpi'].mpifc + ' -DMPI2_SUPPORT') ) config.filter( '^DM_.CC*mpicc -DMPI2SUPPORT', 'DM_CC = {0}'.format(self.spec['mpi'].mpicc) + ' -DMPI2_SUPPORT' ) if self.spec.satisfies("@4.2: %intel"): config.filter('^DM_FC.*mpif90', 'DM_FC = {0}'.format(self.spec['mpi'].mpifc)) config.filter('^DM_CC.*mpicc', 'DM_CC = {0}'.format(self.spec['mpi'].mpicc)) @run_before('configure') def untar(self): tar = which('tar') tar('-xvf', 'WRFV3911_elec/elec.tgz') def configure(self, spec, prefix): # Remove broken default options... self.do_configure_fixup() if self.spec.compiler.name not in ["intel", "gcc", "aocc", "fj"]: raise InstallError( "Compiler %s not currently supported for WRF build." % self.spec.compiler.name ) p = Popen("./configure", stdin=PIPE, stdout=PIPE, stderr=PIPE) if not is_windows: setNonBlocking(p.stdout) setNonBlocking(p.stderr) # Because of WRFs custom configure scripts that require interactive # input we need to parse and respond to questions. The details can # vary somewhat with the exact version, so try to detect and fail # gracefully on unexpected questions. stallcounter = 0 outputbuf = "" while True: line = p.stderr.readline().decode() if not line: line = p.stdout.readline().decode() if not line: if p.poll() is not None: returncode = p.returncode break if stallcounter > 300: raise InstallError( "Output stalled for 30s, presumably an " "undetected question." ) time.sleep(0.1) # Try to do a bit of rate limiting stallcounter += 1 continue stdout.write(line) stallcounter = 0 outputbuf += line if ( "Enter selection" in outputbuf or "Compile for nesting" in outputbuf ): answer = self.answer_configure_question(outputbuf) p.stdin.write(answer.encode()) p.stdin.flush() outputbuf = "" if returncode != 0: raise InstallError("Configure failed - unknown error") @run_after("configure") def patch_for_libmvec(self): if self.spec.satisfies("@3.9.1.1 %aocc"): fp = self.package_dir + "/patches/3.9/aocc_lmvec.patch" which('patch')('-s', '-p1', '-i', '{0}'.format(fp), '-d', '.') def run_compile_script(self): csh_bin = self.spec["tcsh"].prefix.bin.csh csh = Executable(csh_bin) if self.spec.satisfies("+elec"): num_jobs = str(1) else: # num of compile jobs capped at 20 in wrf num_jobs = str(min(int(make_jobs), 10)) # Now run the compile script and track the output to check for # failure/success We need to do this because upstream use `make -i -k` # and the custom compile script will always return zero regardless of # success or failure result_buf = csh( "./compile", "-j", num_jobs, self.spec.variants["compile_type"].value, output=str, error=str ) print(result_buf) if "Executables successfully built" in result_buf: return True return False def build(self, spec, prefix): result = self.run_compile_script() if not result: tty.warn( "Compilation failed first time (WRF idiosyncrasies?) " "- trying again..." ) result = self.run_compile_script() if not result: raise InstallError( "Compile failed. Check the output log for details." ) def install(self, spec, prefix): # Save all install files as many are needed for WPS and WRF runs install_tree(".", prefix)
# Copyright 2019 The ROBEL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuration for a tracker component group.""" from typing import Iterable, Optional import numpy as np from transforms3d.euler import euler2mat, quat2euler from transforms3d.quaternions import quat2mat from robel.simulation.sim_scene import SimScene class TrackerGroupConfig: """Group configuration for a TrackerComponent.""" def __init__(self, sim_scene: SimScene, element_name: Optional[str] = None, element_type: Optional[str] = None, qpos_indices: Optional[Iterable[int]] = None, qvel_indices: Optional[Iterable[int]] = None, sim_observation_noise: Optional[float] = None): """Initializes a group configuration for a TrackerComponent. Args: sim_scene: The simulation, used for validation purposes. element_name: The name of the element to use for tracking in simulation. element_type: The type of the element as defined in the XML. Should be one of `site`, `body`, `geom`, or `joint`. If this is `joint`, `qpos_indices` and `qvel_indices` should be provided. qpos_indices: The indices into `MjData.qpos` to read for the joint element position and rotation. qvel_indices: The indices into `MjData.qvel` to read for the joint element velocity. This defaults to `qpos_indices`. sim_observation_noise: The range of the observation noise (in meters) to apply to the state in simulation. """ self.element_type = element_type if self.element_type not in ['site', 'body', 'geom', 'joint']: raise ValueError('Unknown element type %s' % self.element_type) self.element_name = element_name self.element_id = None self.element_attr = None self.qpos_indices = None self.qvel_indices = None self._is_euler = False if self.element_type == 'joint': if qpos_indices is None: raise ValueError('Must provided qpos_indices for joints.') # Ensure that the qpos indices are valid. nq = sim_scene.model.nq assert all(-nq <= i < nq for i in qpos_indices), \ 'All qpos indices must be in [-{}, {}]'.format(nq, nq - 1) self.qpos_indices = np.array(qpos_indices, dtype=int) if len(self.qpos_indices) == 6: self._is_euler = True elif len(self.qpos_indices) != 7: raise ValueError('qpos_indices must be 6 or 7 elements.') if qvel_indices is None: if not self._is_euler: raise ValueError( 'qvel_indices must be provided for free joints.') qvel_indices = qpos_indices # Ensure that the qvel indices are valid. nv = sim_scene.model.nv assert all(-nv <= i < nv for i in qvel_indices), \ 'All qvel indices must be in [-{}, {}]'.format(nv, nv - 1) self.qvel_indices = np.array(qvel_indices, dtype=int) else: self.element_attr = (lambda obj, attr_name: getattr( obj, self.element_type + '_' + attr_name)) self.element_id = self.element_attr(sim_scene.model, 'name2id')( element_name) self.sim_observation_noise = sim_observation_noise def get_pos(self, sim_scene: SimScene) -> np.ndarray: """Returns the cartesian position of the element.""" if self.qpos_indices is not None: return sim_scene.data.qpos[self.qpos_indices[:3]] return self.element_attr(sim_scene.data, 'xpos')[self.element_id, :] def get_rot(self, sim_scene: SimScene) -> np.ndarray: """Returns the (3x3) rotation matrix of the element.""" if self.qpos_indices is not None: qpos = sim_scene.data.qpos[self.qpos_indices[3:]] if self._is_euler: return euler2mat(*qpos, axes='rxyz') return quat2mat(qpos) return self.element_attr(sim_scene.data, 'xmat')[self.element_id].reshape((3, 3)) def get_vel(self, sim_scene: SimScene) -> np.ndarray: """Returns the cartesian velocity of the element.""" if self.qvel_indices is not None: return sim_scene.data.qvel[self.qvel_indices[:3]] raise NotImplementedError('Cartesian velocity is not supported for ' + self.element_type) def get_angular_vel(self, sim_scene: SimScene) -> np.ndarray: """Returns the angular velocity (x, y, z) of the element.""" if self.qvel_indices is not None: return sim_scene.data.qvel[self.qvel_indices[3:]] raise NotImplementedError('Angular velocity is not supported for ' + self.element_type) def set_pos(self, sim_scene: SimScene, pos: np.ndarray): """Sets the cartesian position of the element.""" if self.qpos_indices is not None: sim_scene.data.qpos[self.qpos_indices[:len(pos)]] = pos return self.element_attr(sim_scene.model, 'pos')[self.element_id, :len(pos)] = pos def set_rot_quat(self, sim_scene: SimScene, quat: np.ndarray): """Sets the cartesian position of the element.""" if self.qpos_indices is not None: qpos = quat if self._is_euler: qpos = quat2euler(quat, axes='rxyz') sim_scene.data.qpos[self.qpos_indices[3:]] = qpos return self.element_attr(sim_scene.model, 'quat')[self.element_id, :] = quat
import string import numpy as np from numpy.testing import assert_array_equal from pandas import DataFrame, MultiIndex, Series from shapely.geometry import LinearRing, LineString, MultiPoint, Point, Polygon from shapely.geometry.collection import GeometryCollection from shapely.ops import unary_union from geopandas import GeoDataFrame, GeoSeries from geopandas.base import GeoPandasBase from geopandas.tests.util import assert_geoseries_equal, geom_almost_equals, geom_equals from pandas.testing import assert_frame_equal, assert_series_equal import pytest def assert_array_dtype_equal(a, b, *args, **kwargs): a = np.asanyarray(a) b = np.asanyarray(b) assert a.dtype == b.dtype assert_array_equal(a, b, *args, **kwargs) class TestGeomMethods: def setup_method(self): self.t1 = Polygon([(0, 0), (1, 0), (1, 1)]) self.t2 = Polygon([(0, 0), (1, 1), (0, 1)]) self.t3 = Polygon([(2, 0), (3, 0), (3, 1)]) self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)]) self.inner_sq = Polygon( [(0.25, 0.25), (0.75, 0.25), (0.75, 0.75), (0.25, 0.75)] ) self.nested_squares = Polygon(self.sq.boundary, [self.inner_sq.boundary]) self.p0 = Point(5, 5) self.p3d = Point(5, 5, 5) self.g0 = GeoSeries( [ self.t1, self.t2, self.sq, self.inner_sq, self.nested_squares, self.p0, None, ] ) self.g1 = GeoSeries([self.t1, self.sq]) self.g2 = GeoSeries([self.sq, self.t1]) self.g3 = GeoSeries([self.t1, self.t2]) self.g3.crs = "epsg:4326" self.g4 = GeoSeries([self.t2, self.t1]) self.g4.crs = "epsg:4326" self.g_3d = GeoSeries([self.p0, self.p3d]) self.na = GeoSeries([self.t1, self.t2, Polygon()]) self.na_none = GeoSeries([self.t1, None]) self.a1 = self.g1.copy() self.a1.index = ["A", "B"] self.a2 = self.g2.copy() self.a2.index = ["B", "C"] self.esb = Point(-73.9847, 40.7484) self.sol = Point(-74.0446, 40.6893) self.landmarks = GeoSeries([self.esb, self.sol], crs="epsg:4326") self.l1 = LineString([(0, 0), (0, 1), (1, 1)]) self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)]) self.g5 = GeoSeries([self.l1, self.l2]) self.g6 = GeoSeries([self.p0, self.t3]) self.empty = GeoSeries([]) self.all_none = GeoSeries([None, None]) self.empty_poly = Polygon() # Crossed lines self.l3 = LineString([(0, 0), (1, 1)]) self.l4 = LineString([(0, 1), (1, 0)]) self.crossed_lines = GeoSeries([self.l3, self.l4]) # Placeholder for testing, will just drop in different geometries # when needed self.gdf1 = GeoDataFrame( {"geometry": self.g1, "col0": [1.0, 2.0], "col1": ["geo", "pandas"]} ) self.gdf2 = GeoDataFrame( {"geometry": self.g1, "col3": [4, 5], "col4": ["rand", "string"]} ) self.gdf3 = GeoDataFrame( {"geometry": self.g3, "col3": [4, 5], "col4": ["rand", "string"]} ) def _test_unary_real(self, op, expected, a): """ Tests for 'area', 'length', 'is_valid', etc. """ fcmp = assert_series_equal self._test_unary(op, expected, a, fcmp) def _test_unary_topological(self, op, expected, a): if isinstance(expected, GeoPandasBase): fcmp = assert_geoseries_equal else: def fcmp(a, b): assert a.equals(b) self._test_unary(op, expected, a, fcmp) def _test_binary_topological(self, op, expected, a, b, *args, **kwargs): """ Tests for 'intersection', 'union', 'symmetric_difference', etc. """ if isinstance(expected, GeoPandasBase): fcmp = assert_geoseries_equal else: def fcmp(a, b): assert geom_equals(a, b) if isinstance(b, GeoPandasBase): right_df = True else: right_df = False self._binary_op_test(op, expected, a, b, fcmp, True, right_df, *args, **kwargs) def _test_binary_real(self, op, expected, a, b, *args, **kwargs): fcmp = assert_series_equal self._binary_op_test(op, expected, a, b, fcmp, True, False, *args, **kwargs) def _test_binary_operator(self, op, expected, a, b): """ The operators only have GeoSeries on the left, but can have GeoSeries or GeoDataFrame on the right. If GeoDataFrame is on the left, geometry column is used. """ if isinstance(expected, GeoPandasBase): fcmp = assert_geoseries_equal else: def fcmp(a, b): assert geom_equals(a, b) if isinstance(b, GeoPandasBase): right_df = True else: right_df = False self._binary_op_test(op, expected, a, b, fcmp, False, right_df) def _binary_op_test( self, op, expected, left, right, fcmp, left_df, right_df, *args, **kwargs ): """ This is a helper to call a function on GeoSeries and GeoDataFrame arguments. For example, 'intersection' is a member of both GeoSeries and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs. This function has the ability to test all four combinations of input types. Parameters ---------- expected : str The operation to be tested. e.g., 'intersection' left: GeoSeries right: GeoSeries fcmp: function Called with the result of the operation and expected. It should assert if the result is incorrect left_df: bool If the left input should also be called with a GeoDataFrame right_df: bool Indicates whether the right input should be called with a GeoDataFrame """ def _make_gdf(s): n = len(s) col1 = string.ascii_lowercase[:n] col2 = range(n) return GeoDataFrame( {"geometry": s.values, "col1": col1, "col2": col2}, index=s.index, crs=s.crs, ) # Test GeoSeries.op(GeoSeries) result = getattr(left, op)(right, *args, **kwargs) fcmp(result, expected) if left_df: # Test GeoDataFrame.op(GeoSeries) gdf_left = _make_gdf(left) result = getattr(gdf_left, op)(right, *args, **kwargs) fcmp(result, expected) if right_df: # Test GeoSeries.op(GeoDataFrame) gdf_right = _make_gdf(right) result = getattr(left, op)(gdf_right, *args, **kwargs) fcmp(result, expected) if left_df: # Test GeoDataFrame.op(GeoDataFrame) result = getattr(gdf_left, op)(gdf_right, *args, **kwargs) fcmp(result, expected) def _test_unary(self, op, expected, a, fcmp): # GeoSeries, (GeoSeries or geometry) result = getattr(a, op) fcmp(result, expected) # GeoDataFrame, (GeoSeries or geometry) gdf = self.gdf1.set_geometry(a) result = getattr(gdf, op) fcmp(result, expected) # TODO reenable for all operations once we use pyproj > 2 # def test_crs_warning(self): # # operations on geometries should warn for different CRS # no_crs_g3 = self.g3.copy() # no_crs_g3.crs = None # with pytest.warns(UserWarning): # self._test_binary_topological('intersection', self.g3, # self.g3, no_crs_g3) def test_intersection(self): self._test_binary_topological("intersection", self.t1, self.g1, self.g2) with pytest.warns(UserWarning, match="The indices .+ different"): self._test_binary_topological( "intersection", self.all_none, self.g1, self.empty ) def test_union_series(self): self._test_binary_topological("union", self.sq, self.g1, self.g2) def test_union_polygon(self): self._test_binary_topological("union", self.sq, self.g1, self.t2) def test_symmetric_difference_series(self): self._test_binary_topological("symmetric_difference", self.sq, self.g3, self.g4) def test_symmetric_difference_poly(self): expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs) self._test_binary_topological( "symmetric_difference", expected, self.g3, self.t1 ) def test_difference_series(self): expected = GeoSeries([GeometryCollection(), self.t2]) self._test_binary_topological("difference", expected, self.g1, self.g2) def test_difference_poly(self): expected = GeoSeries([self.t1, self.t1]) self._test_binary_topological("difference", expected, self.g1, self.t2) def test_geo_op_empty_result(self): l1 = LineString([(0, 0), (1, 1)]) l2 = LineString([(2, 2), (3, 3)]) expected = GeoSeries([GeometryCollection()]) # binary geo resulting in empty geometry result = GeoSeries([l1]).intersection(l2) assert_geoseries_equal(result, expected) # binary geo empty result with right GeoSeries result = GeoSeries([l1]).intersection(GeoSeries([l2])) assert_geoseries_equal(result, expected) # unary geo resulting in emtpy geometry result = GeoSeries([GeometryCollection()]).convex_hull assert_geoseries_equal(result, expected) def test_boundary(self): l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)]) l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]) expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs) self._test_unary_topological("boundary", expected, self.g1) def test_area(self): expected = Series(np.array([0.5, 1.0]), index=self.g1.index) self._test_unary_real("area", expected, self.g1) expected = Series(np.array([0.5, np.nan]), index=self.na_none.index) self._test_unary_real("area", expected, self.na_none) def test_bounds(self): # Set columns to get the order right expected = DataFrame( { "minx": [0.0, 0.0], "miny": [0.0, 0.0], "maxx": [1.0, 1.0], "maxy": [1.0, 1.0], }, index=self.g1.index, columns=["minx", "miny", "maxx", "maxy"], ) result = self.g1.bounds assert_frame_equal(expected, result) gdf = self.gdf1.set_geometry(self.g1) result = gdf.bounds assert_frame_equal(expected, result) def test_bounds_empty(self): # test bounds of empty GeoSeries # https://github.com/geopandas/geopandas/issues/1195 s = GeoSeries([]) result = s.bounds expected = DataFrame( columns=["minx", "miny", "maxx", "maxy"], index=s.index, dtype="float64" ) assert_frame_equal(result, expected) def test_unary_union(self): p1 = self.t1 p2 = Polygon([(2, 0), (3, 0), (3, 1)]) expected = unary_union([p1, p2]) g = GeoSeries([p1, p2]) self._test_unary_topological("unary_union", expected, g) def test_contains(self): expected = [True, False, True, False, False, False, False] assert_array_dtype_equal(expected, self.g0.contains(self.t1)) def test_length(self): expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index) self._test_unary_real("length", expected, self.g1) expected = Series(np.array([2 + np.sqrt(2), np.nan]), index=self.na_none.index) self._test_unary_real("length", expected, self.na_none) def test_crosses(self): expected = [False, False, False, False, False, False, False] assert_array_dtype_equal(expected, self.g0.crosses(self.t1)) expected = [False, True] assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3)) def test_disjoint(self): expected = [False, False, False, False, False, True, False] assert_array_dtype_equal(expected, self.g0.disjoint(self.t1)) def test_relate(self): expected = Series( [ "212101212", "212101212", "212FF1FF2", "2FFF1FFF2", "FF2F112F2", "FF0FFF212", None, ], index=self.g0.index, ) assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq)) expected = Series(["FF0FFF212", None], index=self.g6.index) assert_array_dtype_equal(expected, self.g6.relate(self.na_none)) def test_distance(self): expected = Series( np.array([np.sqrt((5 - 1) ** 2 + (5 - 1) ** 2), np.nan]), self.na_none.index ) assert_array_dtype_equal(expected, self.na_none.distance(self.p0)) expected = Series(np.array([np.sqrt(4 ** 2 + 4 ** 2), np.nan]), self.g6.index) assert_array_dtype_equal(expected, self.g6.distance(self.na_none)) def test_intersects(self): expected = [True, True, True, True, True, False, False] assert_array_dtype_equal(expected, self.g0.intersects(self.t1)) expected = [True, False] assert_array_dtype_equal(expected, self.na_none.intersects(self.t2)) expected = np.array([], dtype=bool) assert_array_dtype_equal(expected, self.empty.intersects(self.t1)) expected = np.array([], dtype=bool) assert_array_dtype_equal(expected, self.empty.intersects(self.empty_poly)) expected = [False] * 7 assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly)) def test_overlaps(self): expected = [True, True, False, False, False, False, False] assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq)) expected = [False, False] assert_array_dtype_equal(expected, self.g4.overlaps(self.t1)) def test_touches(self): expected = [False, True, False, False, False, False, False] assert_array_dtype_equal(expected, self.g0.touches(self.t1)) def test_within(self): expected = [True, False, False, False, False, False, False] assert_array_dtype_equal(expected, self.g0.within(self.t1)) expected = [True, True, True, True, True, False, False] assert_array_dtype_equal(expected, self.g0.within(self.sq)) def test_is_valid(self): expected = Series(np.array([True] * len(self.g1)), self.g1.index) self._test_unary_real("is_valid", expected, self.g1) def test_is_empty(self): expected = Series(np.array([False] * len(self.g1)), self.g1.index) self._test_unary_real("is_empty", expected, self.g1) def test_is_ring(self): expected = Series(np.array([True] * len(self.g1)), self.g1.index) self._test_unary_real("is_ring", expected, self.g1) def test_is_simple(self): expected = Series(np.array([True] * len(self.g1)), self.g1.index) self._test_unary_real("is_simple", expected, self.g1) def test_has_z(self): expected = Series([False, True], self.g_3d.index) self._test_unary_real("has_z", expected, self.g_3d) def test_xy_points(self): expected_x = [-73.9847, -74.0446] expected_y = [40.7484, 40.6893] assert_array_dtype_equal(expected_x, self.landmarks.geometry.x) assert_array_dtype_equal(expected_y, self.landmarks.geometry.y) def test_xy_polygons(self): # accessing x attribute in polygon geoseries should raise an error with pytest.raises(ValueError): _ = self.gdf1.geometry.x # and same for accessing y attribute in polygon geoseries with pytest.raises(ValueError): _ = self.gdf1.geometry.y def test_centroid(self): polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)]) point = Point(0, 0) polygons = GeoSeries([polygon for i in range(3)]) points = GeoSeries([point for i in range(3)]) assert_geoseries_equal(polygons.centroid, points) def test_convex_hull(self): # the convex hull of a square should be the same as the square squares = GeoSeries([self.sq for i in range(3)]) assert_geoseries_equal(squares, squares.convex_hull) def test_exterior(self): exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3]) for expected, computed in zip(exp_exterior, self.g3.exterior): assert computed.equals(expected) def test_interiors(self): original = GeoSeries([self.t1, self.nested_squares]) # This is a polygon with no interior. expected = [] assert original.interiors[0] == expected # This is a polygon with an interior. expected = LinearRing(self.inner_sq.boundary) assert original.interiors[1][0].equals(expected) def test_interpolate(self): expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)]) self._test_binary_topological( "interpolate", expected, self.g5, 0.75, normalized=True ) expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)]) self._test_binary_topological("interpolate", expected, self.g5, 1.5) def test_interpolate_distance_array(self): expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)]) self._test_binary_topological( "interpolate", expected, self.g5, np.array([0.75, 1.5]) ) expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)]) self._test_binary_topological( "interpolate", expected, self.g5, np.array([0.75, 1.5]), normalized=True ) def test_interpolate_distance_wrong_length(self): distances = np.array([1, 2, 3]) with pytest.raises(ValueError): self.g5.interpolate(distances) def test_interpolate_distance_wrong_index(self): distances = Series([1, 2], index=[99, 98]) with pytest.raises(ValueError): self.g5.interpolate(distances) def test_project(self): expected = Series([2.0, 1.5], index=self.g5.index) p = Point(1.0, 0.5) self._test_binary_real("project", expected, self.g5, p) expected = Series([1.0, 0.5], index=self.g5.index) self._test_binary_real("project", expected, self.g5, p, normalized=True) def test_affine_transform(self): # 45 degree reflection matrix matrix = [0, 1, 1, 0, 0, 0] expected = self.g4 res = self.g3.affine_transform(matrix) assert_geoseries_equal(expected, res) def test_translate_tuple(self): trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y assert self.landmarks.translate(*trans)[0].equals(self.sol) res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0] assert res.equals(self.sol) def test_rotate(self): angle = 98 expected = self.g4 o = Point(0, 0) res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o) assert geom_almost_equals(self.g4, res) res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0)) assert geom_almost_equals(expected, res.rotate(-angle, origin=o)) def test_scale(self): expected = self.g4 scale = 2.0, 1.0 inv = tuple(1.0 / i for i in scale) o = Point(0, 0) res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o) assert geom_almost_equals(expected, res) res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o) res = res.scale(*inv, origin=o) assert geom_almost_equals(expected, res) def test_skew(self): expected = self.g4 skew = 45.0 o = Point(0, 0) # Test xs res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o) assert geom_almost_equals(expected, res) res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o) res = res.skew(xs=-skew, origin=o) assert geom_almost_equals(expected, res) # Test ys res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o) assert geom_almost_equals(expected, res) res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o) res = res.skew(ys=-skew, origin=o) assert geom_almost_equals(expected, res) def test_buffer(self): original = GeoSeries([Point(0, 0)]) expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5), (5, 0)))]) calculated = original.buffer(5, resolution=1) assert geom_almost_equals(expected, calculated) def test_buffer_args(self): args = dict(cap_style=3, join_style=2, mitre_limit=2.5) calculated_series = self.g0.buffer(10, **args) for original, calculated in zip(self.g0, calculated_series): if original is None: assert calculated is None else: expected = original.buffer(10, **args) assert calculated.equals(expected) def test_buffer_distance_array(self): original = GeoSeries([self.p0, self.p0]) expected = GeoSeries( [ Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))), Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))), ] ) calculated = original.buffer(np.array([1, 5]), resolution=1) assert_geoseries_equal(calculated, expected, check_less_precise=True) def test_buffer_distance_wrong_length(self): original = GeoSeries([self.p0, self.p0]) distances = np.array([1, 2, 3]) with pytest.raises(ValueError): original.buffer(distances) def test_buffer_distance_wrong_index(self): original = GeoSeries([self.p0, self.p0], index=[0, 1]) distances = Series(data=[1, 2], index=[99, 98]) with pytest.raises(ValueError): original.buffer(distances) def test_buffer_empty_none(self): p = Polygon([(0, 0), (0, 1), (1, 1), (1, 0)]) s = GeoSeries([p, GeometryCollection(), None]) result = s.buffer(0) assert_geoseries_equal(result, s) result = s.buffer(np.array([0, 0, 0])) assert_geoseries_equal(result, s) def test_envelope(self): e = self.g3.envelope assert np.all(e.geom_equals(self.sq)) assert isinstance(e, GeoSeries) assert self.g3.crs == e.crs def test_total_bounds(self): bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y assert isinstance(self.landmarks.total_bounds, np.ndarray) assert tuple(self.landmarks.total_bounds) == bbox df = GeoDataFrame( {"geometry": self.landmarks, "col1": range(len(self.landmarks))} ) assert tuple(df.total_bounds) == bbox def test_explode_geoseries(self): s = GeoSeries( [MultiPoint([(0, 0), (1, 1)]), MultiPoint([(2, 2), (3, 3), (4, 4)])] ) s.index.name = "test_index_name" expected_index_name = ["test_index_name", None] index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)] expected = GeoSeries( [Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 3), Point(4, 4)], index=MultiIndex.from_tuples(index, names=expected_index_name), ) assert_geoseries_equal(expected, s.explode()) @pytest.mark.parametrize("index_name", [None, "test"]) def test_explode_geodataframe(self, index_name): s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)]) df = GeoDataFrame({"col": [1, 2], "geometry": s}) df.index.name = index_name test_df = df.explode() expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)]) expected_df = GeoDataFrame({"col": [1, 1, 2], "geometry": expected_s}) expected_index = MultiIndex( [[0, 1], [0, 1]], # levels [[0, 0, 1], [0, 1, 0]], # labels/codes names=[index_name, None], ) expected_df = expected_df.set_index(expected_index) assert_frame_equal(test_df, expected_df) # # Test '&', '|', '^', and '-' # def test_intersection_operator(self): with pytest.warns(DeprecationWarning): self._test_binary_operator("__and__", self.t1, self.g1, self.g2) with pytest.warns(DeprecationWarning): self._test_binary_operator("__and__", self.t1, self.gdf1, self.g2) def test_union_operator(self): with pytest.warns(DeprecationWarning): self._test_binary_operator("__or__", self.sq, self.g1, self.g2) with pytest.warns(DeprecationWarning): self._test_binary_operator("__or__", self.sq, self.gdf1, self.g2) def test_union_operator_polygon(self): with pytest.warns(DeprecationWarning): self._test_binary_operator("__or__", self.sq, self.g1, self.t2) with pytest.warns(DeprecationWarning): self._test_binary_operator("__or__", self.sq, self.gdf1, self.t2) def test_symmetric_difference_operator(self): with pytest.warns(DeprecationWarning): self._test_binary_operator("__xor__", self.sq, self.g3, self.g4) with pytest.warns(DeprecationWarning): self._test_binary_operator("__xor__", self.sq, self.gdf3, self.g4) def test_difference_series2(self): expected = GeoSeries([GeometryCollection(), self.t2]) with pytest.warns(DeprecationWarning): self._test_binary_operator("__sub__", expected, self.g1, self.g2) with pytest.warns(DeprecationWarning): self._test_binary_operator("__sub__", expected, self.gdf1, self.g2) def test_difference_poly2(self): expected = GeoSeries([self.t1, self.t1]) with pytest.warns(DeprecationWarning): self._test_binary_operator("__sub__", expected, self.g1, self.t2) with pytest.warns(DeprecationWarning): self._test_binary_operator("__sub__", expected, self.gdf1, self.t2)
from flask import Blueprint, render_template from src.extensions import db from src.models import Donated
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow Policies API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc from typing import Optional, Text, Sequence import six import tensorflow as tf import tensorflow_probability as tfp from tf_agents.distributions import reparameterized_sampling from tf_agents.specs import tensor_spec from tf_agents.trajectories import policy_step from tf_agents.trajectories import time_step as ts from tf_agents.trajectories import trajectory from tf_agents.typing import types from tf_agents.utils import common from tf_agents.utils import nest_utils tfd = tfp.distributions @six.add_metaclass(abc.ABCMeta) class TFPolicy(tf.Module): """Abstract base class for TF Policies. The Policy represents a mapping from `time_steps` recieved from the environment to `actions` that can be applied to the environment. Agents expose two policies. A `policy` meant for deployment and evaluation, and a `collect_policy` for collecting data from the environment. The `collect_policy` is usually stochastic for exploring the environment better and may log auxilliary information such as log probabilities required for training as well. `Policy` objects can also be created directly by the users without using an `Agent`. The main methods of TFPolicy are: * `action`: Maps a `time_step` from the environment to an action. * `distribution`: Maps a `time_step` to a distribution over actions. * `get_initial_state`: Generates the initial state for stateful policies, e.g. RNN/LSTM policies. Example usage: ``` env = SomeTFEnvironment() policy = TFRandomPolicy(env.time_step_spec(), env.action_spec()) # Or policy = agent.policy or agent.collect_policy policy_state = policy.get_initial_state(env.batch_size) time_step = env.reset() while not time_step.is_last(): policy_step = policy.action(time_step, policy_state) time_step = env.step(policy_step.action) policy_state = policy_step.state # policy_step.info may contain side info for logging, such as action log # probabilities. ``` Policies can be saved to disk as SavedModels (see policy_saver.py and policy_loader.py) or as TF Checkpoints. A `PyTFEagerPolicy` can be used to wrap a `TFPolicy` so that it works with `PyEnvironment`s. **NOTE**: For API consistency, subclasses are not allowed to override public methods of `TFPolicy` class. Instead, they may implement the protected methods including `_get_initial_state`, `_action`, and `_distribution`. This public-calls-private convention allowed this base class to do things like properly add `spec` and shape checks, which provide users an easier experience when debugging their environments and networks. For researchers, and those developing new Policies, the `TFPolicy` base class constructor also accept a `validate_args` parameter. If `False`, this disables all spec structure, dtype, and shape checks in the public methods of these classes. It allows algorithm developers to iterate and try different input and output structures without worrying about overly restrictive requirements, or input and output states being in a certain format. However, *disabling argument validation* can make it very hard to identify structural input or algorithmic errors; and should not be done for final, or production-ready, Policies. In addition to having implementations that may disagree with specs, this mean that the resulting Policy may no longer interact well with other parts of TF-Agents. Examples include impedance mismatches with Actor/Learner APIs, replay buffers, and the model export functionality in `PolicySaver. """ # TODO(b/127327645) Remove this attribute. # This attribute allows subclasses to back out of automatic tf.function # attribute inside TF1 (for autodeps). _enable_functions = True def __init__( self, time_step_spec: ts.TimeStep, action_spec: types.NestedTensorSpec, policy_state_spec: types.NestedTensorSpec = (), info_spec: types.NestedTensorSpec = (), clip: bool = True, emit_log_probability: bool = False, automatic_state_reset: bool = True, observation_and_action_constraint_splitter: Optional[ types.Splitter] = None, validate_args: bool = True, name: Optional[Text] = None): """Initialization of TFPolicy class. Args: time_step_spec: A `TimeStep` spec of the expected time_steps. Usually provided by the user to the subclass. action_spec: A nest of BoundedTensorSpec representing the actions. Usually provided by the user to the subclass. policy_state_spec: A nest of TensorSpec representing the policy_state. Provided by the subclass, not directly by the user. info_spec: A nest of TensorSpec representing the policy info. Provided by the subclass, not directly by the user. clip: Whether to clip actions to spec before returning them. Default True. Most policy-based algorithms (PCL, PPO, REINFORCE) use unclipped continuous actions for training. emit_log_probability: Emit log-probabilities of actions, if supported. If True, policy_step.info will have CommonFields.LOG_PROBABILITY set. Please consult utility methods provided in policy_step for setting and retrieving these. When working with custom policies, either provide a dictionary info_spec or a namedtuple with the field 'log_probability'. automatic_state_reset: If `True`, then `get_initial_policy_state` is used to clear state in `action()` and `distribution()` for for time steps where `time_step.is_first()`. observation_and_action_constraint_splitter: A function used to process observations with action constraints. These constraints can indicate, for example, a mask of valid/invalid actions for a given state of the environment. The function takes in a full observation and returns a tuple consisting of 1) the part of the observation intended as input to the network and 2) the constraint. An example `observation_and_action_constraint_splitter` could be as simple as: ``` def observation_and_action_constraint_splitter(observation): return observation['network_input'], observation['constraint'] ``` *Note*: when using `observation_and_action_constraint_splitter`, make sure the provided `q_network` is compatible with the network-specific half of the output of the `observation_and_action_constraint_splitter`. In particular, `observation_and_action_constraint_splitter` will be called on the observation before passing to the network. If `observation_and_action_constraint_splitter` is None, action constraints are not applied. validate_args: Python bool. Whether to verify inputs to, and outputs of, functions like `action` and `distribution` against spec structures, dtypes, and shapes. Research code may prefer to set this value to `False` to allow iterating on input and output structures without being hamstrung by overly rigid checking (at the cost of harder-to-debug errors). See also `TFAgent.validate_args`. name: A name for this module. Defaults to the class name. """ super(TFPolicy, self).__init__(name=name) common.check_tf1_allowed() common.tf_agents_gauge.get_cell('TFAPolicy').set(True) common.assert_members_are_not_overridden(base_cls=TFPolicy, instance=self) if not isinstance(time_step_spec, ts.TimeStep): raise ValueError( 'The `time_step_spec` must be an instance of `TimeStep`, but is `{}`.' .format(type(time_step_spec))) self._time_step_spec = tensor_spec.from_spec(time_step_spec) self._action_spec = tensor_spec.from_spec(action_spec) self._policy_state_spec = tensor_spec.from_spec(policy_state_spec) self._emit_log_probability = emit_log_probability self._validate_args = validate_args if emit_log_probability: log_probability_spec = tensor_spec.BoundedTensorSpec( shape=(), dtype=tf.float32, maximum=0, minimum=-float('inf'), name='log_probability') log_probability_spec = tf.nest.map_structure( lambda _: log_probability_spec, action_spec) info_spec = policy_step.set_log_probability( info_spec, log_probability_spec) # pytype: disable=wrong-arg-types self._info_spec = tensor_spec.from_spec(info_spec) self._setup_specs() self._clip = clip self._action_fn = common.function_in_tf1(experimental_relax_shapes=False)( self._action) self._automatic_state_reset = automatic_state_reset self._observation_and_action_constraint_splitter = ( observation_and_action_constraint_splitter) def _setup_specs(self): self._policy_step_spec = policy_step.PolicyStep( action=self._action_spec, state=self._policy_state_spec, info=self._info_spec) self._trajectory_spec = trajectory.from_transition(self._time_step_spec, self._policy_step_spec, self._time_step_spec) def variables(self) -> Sequence[tf.Variable]: """Returns the list of Variables that belong to the policy.""" # Ignore self._variables() in favor of using tf.Module's tracking. return super(TFPolicy, self).variables @property def observation_and_action_constraint_splitter(self) -> types.Splitter: return self._observation_and_action_constraint_splitter @property def validate_args(self) -> bool: """Whether `action` & `distribution` validate input and output args.""" return self._validate_args def get_initial_state(self, batch_size: Optional[types.Int]) -> types.NestedTensor: """Returns an initial state usable by the policy. Args: batch_size: Tensor or constant: size of the batch dimension. Can be None in which case no dimensions gets added. Returns: A nested object of type `policy_state` containing properly initialized Tensors. """ return self._get_initial_state(batch_size) def _maybe_reset_state(self, time_step, policy_state): if policy_state is (): # pylint: disable=literal-comparison return policy_state batch_size = tf.compat.dimension_value(time_step.discount.shape[0]) if batch_size is None: batch_size = tf.shape(time_step.discount)[0] # Make sure we call this with a kwarg as it may be wrapped in tf.function # which would expect a tensor if it was not a kwarg. zero_state = self.get_initial_state(batch_size=batch_size) condition = time_step.is_first() # When experience is a sequence we only reset automatically for the first # time_step in the sequence as we can't easily generalize how the policy is # unrolled over the sequence. if nest_utils.get_outer_rank(time_step, self._time_step_spec) > 1: condition = time_step.is_first()[:, 0, ...] return nest_utils.where(condition, zero_state, policy_state) def action(self, time_step: ts.TimeStep, policy_state: types.NestedTensor = (), seed: Optional[types.Seed] = None) -> policy_step.PolicyStep: """Generates next action given the time_step and policy_state. Args: time_step: A `TimeStep` tuple corresponding to `time_step_spec()`. policy_state: A Tensor, or a nested dict, list or tuple of Tensors representing the previous policy_state. seed: Seed to use if action performs sampling (optional). Returns: A `PolicyStep` named tuple containing: `action`: An action Tensor matching the `action_spec`. `state`: A policy state tensor to be fed into the next call to action. `info`: Optional side information such as action log probabilities. Raises: RuntimeError: If subclass __init__ didn't call super().__init__. ValueError or TypeError: If `validate_args is True` and inputs or outputs do not match `time_step_spec`, `policy_state_spec`, or `policy_step_spec`. """ if self._enable_functions and getattr(self, '_action_fn', None) is None: raise RuntimeError( 'Cannot find _action_fn. Did %s.__init__ call super?' % type(self).__name__) if self._enable_functions: action_fn = self._action_fn else: action_fn = self._action if self._validate_args: time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step) policy_state = nest_utils.prune_extra_keys( self._policy_state_spec, policy_state) nest_utils.assert_same_structure( time_step, self._time_step_spec, message='time_step and time_step_spec structures do not match') # TODO(b/158804957): Use literal comparison because in some strange cases # (tf.function? autograph?) the expression "x not in (None, (), [])" gets # converted to a tensor. if not (policy_state is None or policy_state is () or policy_state is []): # pylint: disable=literal-comparison nest_utils.assert_same_structure( policy_state, self._policy_state_spec, message=('policy_state and policy_state_spec ' 'structures do not match')) if self._automatic_state_reset: policy_state = self._maybe_reset_state(time_step, policy_state) step = action_fn(time_step=time_step, policy_state=policy_state, seed=seed) def clip_action(action, action_spec): if isinstance(action_spec, tensor_spec.BoundedTensorSpec): return common.clip_to_spec(action, action_spec) return action if self._validate_args: nest_utils.assert_same_structure( step.action, self._action_spec, message='action and action_spec structures do not match') if self._clip: clipped_actions = tf.nest.map_structure(clip_action, step.action, self._action_spec) step = step._replace(action=clipped_actions) if self._validate_args: nest_utils.assert_same_structure( step, self._policy_step_spec, message='action output and policy_step_spec structures do not match') def compare_to_spec(value, spec): return value.dtype.is_compatible_with(spec.dtype) compatibility = [ compare_to_spec(v, s) for (v, s) in zip(tf.nest.flatten(step.action), tf.nest.flatten(self.action_spec))] if not all(compatibility): get_dtype = lambda x: x.dtype action_dtypes = tf.nest.map_structure(get_dtype, step.action) spec_dtypes = tf.nest.map_structure(get_dtype, self.action_spec) raise TypeError('Policy produced an action with a dtype that doesn\'t ' 'match its action_spec. Got action:\n %s\n with ' 'action_spec:\n %s' % (action_dtypes, spec_dtypes)) return step def distribution( self, time_step: ts.TimeStep, policy_state: types.NestedTensor = () ) -> policy_step.PolicyStep: """Generates the distribution over next actions given the time_step. Args: time_step: A `TimeStep` tuple corresponding to `time_step_spec()`. policy_state: A Tensor, or a nested dict, list or tuple of Tensors representing the previous policy_state. Returns: A `PolicyStep` named tuple containing: `action`: A tf.distribution capturing the distribution of next actions. `state`: A policy state tensor for the next call to distribution. `info`: Optional side information such as action log probabilities. Raises: ValueError or TypeError: If `validate_args is True` and inputs or outputs do not match `time_step_spec`, `policy_state_spec`, or `policy_step_spec`. """ if self._validate_args: time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step) policy_state = nest_utils.prune_extra_keys( self._policy_state_spec, policy_state) nest_utils.assert_same_structure( time_step, self._time_step_spec, message='time_step and time_step_spec structures do not match') nest_utils.assert_same_structure( policy_state, self._policy_state_spec, message='policy_state and policy_state_spec structures do not match') if self._automatic_state_reset: policy_state = self._maybe_reset_state(time_step, policy_state) step = self._distribution(time_step=time_step, policy_state=policy_state) if self.emit_log_probability: # This here is set only for compatibility with info_spec in constructor. info = policy_step.set_log_probability( step.info, tf.nest.map_structure( lambda _: tf.constant(0., dtype=tf.float32), policy_step.get_log_probability(self._info_spec))) step = step._replace(info=info) if self._validate_args: nest_utils.assert_same_structure( step, self._policy_step_spec, message=('distribution output and policy_step_spec structures ' 'do not match')) return step def update(self, policy, tau: float = 1.0, tau_non_trainable: Optional[float] = None, sort_variables_by_name: bool = False) -> tf.Operation: """Update the current policy with another policy. This would include copying the variables from the other policy. Args: policy: Another policy it can update from. tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard update. This is used for trainable variables. tau_non_trainable: A float scalar in [0, 1] for non_trainable variables. If None, will copy from tau. sort_variables_by_name: A bool, when True would sort the variables by name before doing the update. Returns: An TF op to do the update. """ if self.variables(): return common.soft_variables_update( policy.variables(), self.variables(), tau=tau, tau_non_trainable=tau_non_trainable, sort_variables_by_name=sort_variables_by_name) else: return tf.no_op() @property def emit_log_probability(self) -> bool: """Whether this policy instance emits log probabilities or not.""" return self._emit_log_probability @property def time_step_spec(self) -> ts.TimeStep: """Describes the `TimeStep` tensors returned by `step()`. Returns: A `TimeStep` namedtuple with `TensorSpec` objects instead of Tensors, which describe the shape, dtype and name of each tensor returned by `step()`. """ return self._time_step_spec @property def action_spec(self) -> types.NestedTensorSpec: """Describes the TensorSpecs of the Tensors expected by `step(action)`. `action` can be a single Tensor, or a nested dict, list or tuple of Tensors. Returns: An single BoundedTensorSpec, or a nested dict, list or tuple of `BoundedTensorSpec` objects, which describe the shape and dtype of each Tensor expected by `step()`. """ return self._action_spec @property def policy_state_spec(self) -> types.NestedTensorSpec: """Describes the Tensors expected by `step(_, policy_state)`. `policy_state` can be an empty tuple, a single Tensor, or a nested dict, list or tuple of Tensors. Returns: An single TensorSpec, or a nested dict, list or tuple of `TensorSpec` objects, which describe the shape and dtype of each Tensor expected by `step(_, policy_state)`. """ return self._policy_state_spec @property def info_spec(self) -> types.NestedTensorSpec: """Describes the Tensors emitted as info by `action` and `distribution`. `info` can be an empty tuple, a single Tensor, or a nested dict, list or tuple of Tensors. Returns: An single TensorSpec, or a nested dict, list or tuple of `TensorSpec` objects, which describe the shape and dtype of each Tensor expected by `step(_, policy_state)`. """ return self._info_spec @property def policy_step_spec(self) -> policy_step.PolicyStep: """Describes the output of `action()`. Returns: A nest of TensorSpec which describe the shape and dtype of each Tensor emitted by `action()`. """ return self._policy_step_spec # TODO(kbanoop, ebrevdo): Should this be collect_data_spec to mirror agents? @property def trajectory_spec(self) -> trajectory.Trajectory: """Describes the Tensors written when using this policy with an environment. Returns: A `Trajectory` containing all tensor specs associated with the observation_spec, action_spec, policy_state_spec, and info_spec of this policy. """ return self._trajectory_spec @property def collect_data_spec(self) -> trajectory.Trajectory: """Describes the Tensors written when using this policy with an environment. Returns: A nest of TensorSpec which describe the shape and dtype of each Tensor required to train the agent which generated this policy. """ return self._trajectory_spec # Subclasses MAY optionally override _action. def _action(self, time_step: ts.TimeStep, policy_state: types.NestedTensor, seed: Optional[types.Seed] = None) -> policy_step.PolicyStep: """Implementation of `action`. Args: time_step: A `TimeStep` tuple corresponding to `time_step_spec()`. policy_state: A Tensor, or a nested dict, list or tuple of Tensors representing the previous policy_state. seed: Seed to use if action performs sampling (optional). Returns: A `PolicyStep` named tuple containing: `action`: An action Tensor matching the `action_spec`. `state`: A policy state tensor to be fed into the next call to action. `info`: Optional side information such as action log probabilities. """ seed_stream = tfp.util.SeedStream(seed=seed, salt='tf_agents_tf_policy') distribution_step = self._distribution(time_step, policy_state) # pytype: disable=wrong-arg-types actions = tf.nest.map_structure( lambda d: reparameterized_sampling.sample(d, seed=seed_stream()), distribution_step.action) info = distribution_step.info if self.emit_log_probability: try: log_probability = tf.nest.map_structure(lambda a, d: d.log_prob(a), actions, distribution_step.action) info = policy_step.set_log_probability(info, log_probability) except: raise TypeError('%s does not support emitting log-probabilities.' % type(self).__name__) return distribution_step._replace(action=actions, info=info) ## Subclasses MUST implement these. def _distribution( self, time_step: ts.TimeStep, policy_state: types.NestedTensorSpec) -> policy_step.PolicyStep: """Implementation of `distribution`. Args: time_step: A `TimeStep` tuple corresponding to `time_step_spec()`. policy_state: A Tensor, or a nested dict, list or tuple of Tensors representing the previous policy_state. Returns: A `PolicyStep` named tuple containing: `action`: A (optionally nested) of tfp.distribution.Distribution capturing the distribution of next actions. `state`: A policy state tensor for the next call to distribution. `info`: Optional side information such as action log probabilities. """ raise NotImplementedError() # Subclasses MAY optionally overwrite _get_initial_state. def _get_initial_state(self, batch_size: int) -> types.NestedTensor: """Returns the initial state of the policy network. Args: batch_size: A constant or Tensor holding the batch size. Can be None, in which case the state will not have a batch dimension added. Returns: A nest of zero tensors matching the spec of the policy network state. """ return tensor_spec.zero_spec_nest( self._policy_state_spec, outer_dims=None if batch_size is None else [batch_size])
''' Module to manage FreeBSD kernel modules ''' import os def __virtual__(): ''' Only runs on FreeBSD systems ''' return 'kmod' if __grains__['kernel'] == 'FreeBSD' else False def _new_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an kldstat dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return list(post - pre) def _rm_mods(pre_mods, post_mods): ''' Return a list of the new modules, pass an kldstat dict before running modprobe and one after modprobe has run ''' pre = set() post = set() for mod in pre_mods: pre.add(mod['module']) for mod in post_mods: post.add(mod['module']) return list(pre - post) def available(): ''' Return a list of all available kernel modules CLI Example:: salt '*' kmod.available ''' ret = [] for path in __salt__['cmd.run']('ls /boot/kernel | grep .ko$').split('\n'): bpath = os.path.basename(path) comps = bpath.split('.') if 'ko' in comps: # This is a kernel module, return it without the .ko extension ret.append('.'.join(comps[:comps.index('ko')])) return ret def check_available(mod): ''' Check to see if the specified kernel module is available CLI Example:: salt '*' kmod.check_available kvm ''' return mod in available() def lsmod(): ''' Return a dict containing information about currently loaded modules CLI Example:: salt '*' kmod.lsmod ''' ret = [] for line in __salt__['cmd.run']('kldstat').split('\n'): comps = line.split() if not len(comps) > 2: continue if comps[0] == 'Module': continue mdat = {} mdat['module'] = comps[0] mdat['size'] = comps[1] mdat['depcount'] = comps[2] if len(comps) > 3: mdat['deps'] = comps[3].split(',') else: mdat['deps'] = [] ret.append(mdat) return ret def load(mod): ''' Load the specified kernel module CLI Example:: salt '*' kmod.load kvm ''' pre_mods = lsmod() __salt__['cmd.run_all']('kldload {0}'.format(mod)) post_mods = lsmod() return _new_mods(pre_mods, post_mods) def remove(mod): ''' Remove the specified kernel module CLI Example:: salt '*' kmod.remove kvm ''' pre_mods = lsmod() __salt__['cmd.run_all']('kldunload {0}'.format(mod)) post_mods = lsmod() return _rm_mods(pre_mods, post_mods)
# from app import app # if __name__ == "__main__": # app.run()
from . import base, fields, mixins class File(base.TelegramObject, mixins.Downloadable): """ This object represents a file ready to be downloaded. The file can be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile. Maximum file size to download is 20 MB https://core.telegram.org/bots/api#file """ file_id: base.String = fields.Field() file_unique_id: base.String = fields.Field() file_size: base.Integer = fields.Field() file_path: base.String = fields.Field()
import json import logging import random import warnings from collections import namedtuple from pyramid.settings import asbool import ujson from kinto.core.decorators import deprecate_kwargs from . import generators class Missing: """Dummy value to represent a value that is completely absent from an object. Handling these correctly is important for pagination. """ pass MISSING = Missing() logger = logging.getLogger(__name__) Filter = namedtuple("Filter", ["field", "value", "operator"]) """Filtering properties.""" Sort = namedtuple("Sort", ["field", "direction"]) """Sorting properties.""" DEFAULT_ID_FIELD = "id" DEFAULT_MODIFIED_FIELD = "last_modified" DEFAULT_DELETED_FIELD = "deleted" _HEARTBEAT_DELETE_RATE = 0.6 _HEARTBEAT_RESOURCE_NAME = "__heartbeat__" _HEART_PARENT_ID = _HEARTBEAT_RESOURCE_NAME _HEARTBEAT_OBJECT = {"__heartbeat__": True} class StorageBase: """Storage abstraction used by resource views. It is meant to be instantiated at application startup. Any operation may raise a `HTTPServiceUnavailable` error if an error occurs with the underlying service. Configuration can be changed to choose which storage backend will persist the objects. :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPServiceUnavailable` """ id_generator = generators.UUID4() """Id generator used when no one is provided for create.""" def __init__(self, strict_json=True): """initialize json (de)serializer to be the strict, slow json or ujson""" if strict_json: self.json = json else: self.json = ujson def initialize_schema(self, dry_run=False): """Create every necessary objects (like tables or indices) in the backend. This is executed when the ``kinto migrate`` command is run. :param bool dry_run: simulate instead of executing the operations. """ raise NotImplementedError def flush(self, auth=None): """Remove **every** object from this storage. """ raise NotImplementedError def resource_timestamp(self, resource_name, parent_id, auth=None): """Get the highest timestamp of every objects in this `resource_name` for this `parent_id`. .. note:: This should take deleted objects into account. :param str resource_name: the resource name. :param str parent_id: the resource parent. :returns: the latest timestamp of the resource. :rtype: int """ raise NotImplementedError def create( self, resource_name, parent_id, obj, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None, ): """Create the specified `obj` in this `resource_name` for this `parent_id`. Assign the id to the object, using the attribute :attr:`kinto.core.resource.model.Model.id_field`. .. note:: This will update the resource timestamp. :raises: :exc:`kinto.core.storage.exceptions.UnicityError` :param str resource_name: the resource name. :param str parent_id: the resource parent. :param dict obj: the object to create. :returns: the newly created object. :rtype: dict """ raise NotImplementedError def get( self, resource_name, parent_id, object_id, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None, ): """Retrieve the object with specified `object_id`, or raise error if not found. :raises: :exc:`kinto.core.storage.exceptions.ObjectNotFoundError` :param str resource_name: the resource name. :param str parent_id: the resource parent. :param str object_id: unique identifier of the object :returns: the stored object. :rtype: dict """ raise NotImplementedError def update( self, resource_name, parent_id, object_id, obj, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None, ): """Overwrite the `obj` with the specified `object_id`. If the specified id is not found, the object is created with the specified id. .. note:: This will update the resource timestamp. :param str resource_name: the resource name. :param str parent_id: the resource parent. :param str object_id: unique identifier of the object :param dict obj: the object to update or create. :returns: the updated object. :rtype: dict """ raise NotImplementedError def delete( self, resource_name, parent_id, object_id, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None, last_modified=None, ): """Delete the object with specified `object_id`, and raise error if not found. Deleted objects must be removed from the database, but their ids and timestamps of deletion must be tracked for synchronization purposes. (See :meth:`kinto.core.storage.StorageBase.get_all`) .. note:: This will update the resource timestamp. :raises: :exc:`kinto.core.storage.exceptions.ObjectNotFoundError` :param str resource_name: the resource name. :param str parent_id: the resource parent. :param str object_id: unique identifier of the object :param bool with_deleted: track deleted object with a tombstone :returns: the deleted object, with minimal set of attributes. :rtype: dict """ raise NotImplementedError def delete_all( self, resource_name, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None, ): """Delete all objects in this `resource_name` for this `parent_id`. :param str resource_name: the resource name. :param str parent_id: the resource parent. :param filters: Optionnally filter the objects to delete. :type filters: list of :class:`kinto.core.storage.Filter` :param sorting: Optionnally sort the objects by attribute. Each sort instruction in this list refers to a field and a direction (negative means descending). All sort instructions are cumulative. :type sorting: list of :class:`kinto.core.storage.Sort` :param pagination_rules: Optionnally paginate the deletion of objects. This list of rules aims to reduce the set of objects to the current page. A rule is a list of filters (see `filters` parameter), and all rules are combined using *OR*. :type pagination_rules: list of list of :class:`kinto.core.storage.Filter` :param int limit: Optionnally limit the number of objects to be deleted. :param bool with_deleted: track deleted objects with a tombstone :returns: the list of deleted objects, with minimal set of attributes. :rtype: list """ raise NotImplementedError def purge_deleted( self, resource_name, parent_id, before=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None, ): """Delete all deleted object tombstones in this `resource_name` for this `parent_id`. :param str resource_name: the resource name. :param str parent_id: the resource parent. :param int before: Optionnal timestamp to limit deletion (exclusive) :returns: The number of deleted objects. :rtype: int """ raise NotImplementedError @deprecate_kwargs({"collection_id": "resource_name"}) def get_all(self, *args, **kwargs): """Legacy method to support code that relied on the old API where the storage's get_all() would return a tuple of (<list of objects paginated>, <count of all>). Since then, we're being more explicit and expecting the client to deliberately decide if they need a paginated list or a count. This method exists solely to make the transition easier. """ warnings.warn("Use either self.list_all() or self.count_all()", DeprecationWarning) list_ = self.list_all(*args, **kwargs) kwargs.pop("pagination_rules", None) kwargs.pop("limit", None) kwargs.pop("sorting", None) kwargs.pop("include_deleted", None) count = self.count_all(*args, **kwargs) return (list_, count) def list_all( self, resource_name, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, include_deleted=False, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None, ): """Retrieve all objects in this `resource_name` for this `parent_id`. :param str resource_name: the resource name. :param str parent_id: the resource parent, possibly containing a wildcard '*'. (This can happen when implementing "administrator" operations on a Resource, for example, like ``kinto.plugins.accounts``.) :param filters: Optionally filter the objects by their attribute. Each filter in this list is a tuple of a field, a value and a comparison (see `kinto.core.utils.COMPARISON`). All filters are combined using *AND*. :type filters: list of :class:`kinto.core.storage.Filter` :param sorting: Optionnally sort the objects by attribute. Each sort instruction in this list refers to a field and a direction (negative means descending). All sort instructions are cumulative. :type sorting: list of :class:`kinto.core.storage.Sort` :param pagination_rules: Optionnally paginate the list of objects. This list of rules aims to reduce the set of objects to the current page. A rule is a list of filters (see `filters` parameter), and all rules are combined using *OR*. :type pagination_rules: list of list of :class:`kinto.core.storage.Filter` :param int limit: Optionnally limit the number of objects to be retrieved. :param bool include_deleted: Optionnally include the deleted objects that match the filters. :returns: the limited list of objects of matching objects in the resource (deleted ones excluded). :rtype: list """ raise NotImplementedError def count_all( self, resource_name, parent_id, filters=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None, ): """Return a count of all objects in this `resource_name` for this `parent_id`. :param str resource_name: the resource name. :param str parent_id: the parent resource, possibly containing a wildcard '*'. (This can happen when implementing "administrator" operations on a UserResource, for example.) :param filters: Optionally filter the objects by their attribute. Each filter in this list is a tuple of a field, a value and a comparison (see `kinto.core.utils.COMPARISON`). All filters are combined using *AND*. :type filters: list of :class:`kinto.core.storage.Filter` :returns: the total number of matching objects in the resource (deleted ones excluded). :rtype: int """ raise NotImplementedError def collection_timestamp(self, collection_id, parent_id, auth=None): message = "`collection_timestamp()` is deprecated, use `resource_timestamp()` instead." warnings.warn(message, DeprecationWarning) return self.resource_timestamp(resource_name=collection_id, parent_id=parent_id, auth=auth) def heartbeat(backend): def ping(request): """Test that storage is operational. :param request: current request object :type request: :class:`~pyramid:pyramid.request.Request` :returns: ``True`` is everything is ok, ``False`` otherwise. :rtype: bool """ try: auth = request.headers.get("Authorization") storage_kw = dict( resource_name=_HEARTBEAT_RESOURCE_NAME, parent_id=_HEART_PARENT_ID, auth=auth ) if asbool(request.registry.settings.get("readonly")): # Do not try to write in readonly mode. backend.get_all(**storage_kw) else: if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE: backend.delete_all(**storage_kw) backend.purge_deleted(**storage_kw) # Kinto/kinto#985 else: backend.create(obj=_HEARTBEAT_OBJECT, **storage_kw) return True except Exception: logger.exception("Heartbeat Error") return False return ping
import os import click from movie import Movie from scan import Scan from helper import Helper @click.command() @click.option('--endings', default='mp4, mkv', help='File-endings that are accepted as valid movie-files. ' + 'Default: [.mkv, .mp4]' ) @click.option('--size_limit', default="1500", help='Smaller files are excluded from search (in MegaBytes). ' + "Default: 1500") @click.argument('search_path', required=True) def main(endings, size_limit, search_path): # initiate global function variables movie_list = [] longest_title = 0 # initiate options & arguments from cli movie_endings = tuple(endings.split(", ")) movie_size_limit = int(size_limit) * 1024 * 1024 # MegaBytes # initiate needed objects scanner = Scan(movie_endings, movie_size_limit) helper = Helper() # look for all available files inside directory recursively for root, subs, files in os.walk(search_path): # do available files match a movie-file? for file in files: # is movie file? bool_movie = scanner.is_movie(file) if not bool_movie: continue # is large enough? movie_path = os.path.join(root, file) movie_folder = os.path.basename(root) bool_large = scanner.is_large(movie_path) if not bool_large: continue # is movie file and large enough, try to extract a valid movie name extracted_data = scanner.extract_file_data(file, movie_folder) # if movie has valid data, create a new movie object if -1 in extracted_data: print("Problem with: " + extracted_data[0] + " " + str(extracted_data[1])) else: # data valid, create object and append it movie_object = Movie( extracted_data[0], extracted_data[1], movie_path, root ) movie_list.append(movie_object) # does the current movie have the longest title? if longest_title < len(movie_object.title): longest_title = len(movie_object.title) result_str = 'Movies counted: {number}'.format(number=len(movie_list)) print(result_str) # try to fetch imdb rating for each movie-object for movie in movie_list: movie.fetch_rating() # is current movie in top 250 movie.imdb_top = helper.is_imdb_top(movie) # sort movies by their rating and print them print("") movie_list.sort(key=lambda x: x.rating, reverse=True) for movie in movie_list: movie.print_data(longest_title) if __name__ == '__main__': main()
import math class Player: def __init__(self): pass # self.most_common = lambda : self.numbers.index(max(self.numbers)) + 1 def initcards(self,num1,num2,num3,num4,num_all): self.numbers = [num1,num2,num3,num4] self.num_all = num_all self.common = self.numbers.index(max(self.numbers)) + 1 def guess(self): prob = self.num_all / 4 ceil = math.ceil(prob) floor = math.floor(prob) prob = floor if abs(ceil - prob)> abs(floor - prob) else ceil return {self.common :prob + max(self.numbers)} def play(self): guess_ansewr = self.guess() return(guess_ansewr) def play_one_round(cart_list,num_all): player = Player() player.initcards(cart_list.count(1), cart_list.count(2), cart_list.count(3), cart_list.count(4), num_all) try: player_guess = player.play() print(player_guess) except: print('something wrong please try again') l, num_all = get_input() play_one_round(l,num_all) def get_input(): l = input('list of my cart: ').split() num_all = int(input('number of all cart: ')) l = list(map(int,l)) return l,num_all if __name__ == '__main__': l, num_all = get_input() play_one_round(l,num_all)
# coding: utf-8 """ Pure Storage FlashBlade REST 1.3 Python SDK Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/). OpenAPI spec version: 1.3 Contact: info@purestorage.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class FileSystemsApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def create_file_systems(self, file_system, **kwargs): """ Create a new file system This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_file_systems(file_system, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param FileSystem file_system: the attribute map used to create the file system (required) :return: FileSystemResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_file_systems_with_http_info(file_system, **kwargs) else: (data) = self.create_file_systems_with_http_info(file_system, **kwargs) return data def create_file_systems_with_http_info(self, file_system, **kwargs): """ Create a new file system This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_file_systems_with_http_info(file_system, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param FileSystem file_system: the attribute map used to create the file system (required) :return: FileSystemResponse If the method is called asynchronously, returns the request thread. """ all_params = ['file_system'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_file_systems" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'file_system' is set if ('file_system' not in params) or (params['file_system'] is None): raise ValueError("Missing the required parameter `file_system` when calling `create_file_systems`") collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'file_system' in params: body_params = params['file_system'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.3/file-systems', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='FileSystemResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_file_systems(self, name, **kwargs): """ Delete a file system This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_file_systems(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: name of the file system to be deleted (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_file_systems_with_http_info(name, **kwargs) else: (data) = self.delete_file_systems_with_http_info(name, **kwargs) return data def delete_file_systems_with_http_info(self, name, **kwargs): """ Delete a file system This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_file_systems_with_http_info(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: name of the file system to be deleted (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['name'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_file_systems" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `delete_file_systems`") collection_formats = {} path_params = {} query_params = [] if 'name' in params: query_params.append(('name', params['name'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.3/file-systems', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_file_systems(self, **kwargs): """ List file systems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_file_systems(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters. :param str filter: The filter to be used for query. :param str sort: The way to order the results. :param int start: start :param int limit: limit, should be >= 0 :param str token: token :param bool total: Return a total object in addition to the other results. :param bool total_only: Return only the total object. :return: FileSystemResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_file_systems_with_http_info(**kwargs) else: (data) = self.list_file_systems_with_http_info(**kwargs) return data def list_file_systems_with_http_info(self, **kwargs): """ List file systems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_file_systems_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters. :param str filter: The filter to be used for query. :param str sort: The way to order the results. :param int start: start :param int limit: limit, should be >= 0 :param str token: token :param bool total: Return a total object in addition to the other results. :param bool total_only: Return only the total object. :return: FileSystemResponse If the method is called asynchronously, returns the request thread. """ all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token', 'total', 'total_only'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_file_systems" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'filter' in params: query_params.append(('filter', params['filter'])) if 'sort' in params: query_params.append(('sort', params['sort'])) if 'start' in params: query_params.append(('start', params['start'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'token' in params: query_params.append(('token', params['token'])) if 'total' in params: query_params.append(('total', params['total'])) if 'total_only' in params: query_params.append(('total_only', params['total_only'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.3/file-systems', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='FileSystemResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_file_systems(self, name, attributes, **kwargs): """ Update an existing file system This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_file_systems(name, attributes, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: the name of the file system to be updated (required) :param FileSystem attributes: the new attributes, only modifiable fields could be used. (required) :return: FileSystemResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_file_systems_with_http_info(name, attributes, **kwargs) else: (data) = self.update_file_systems_with_http_info(name, attributes, **kwargs) return data def update_file_systems_with_http_info(self, name, attributes, **kwargs): """ Update an existing file system This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_file_systems_with_http_info(name, attributes, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: the name of the file system to be updated (required) :param FileSystem attributes: the new attributes, only modifiable fields could be used. (required) :return: FileSystemResponse If the method is called asynchronously, returns the request thread. """ all_params = ['name', 'attributes'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_file_systems" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'name' is set if ('name' not in params) or (params['name'] is None): raise ValueError("Missing the required parameter `name` when calling `update_file_systems`") # verify the required parameter 'attributes' is set if ('attributes' not in params) or (params['attributes'] is None): raise ValueError("Missing the required parameter `attributes` when calling `update_file_systems`") collection_formats = {} path_params = {} query_params = [] if 'name' in params: query_params.append(('name', params['name'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'attributes' in params: body_params = params['attributes'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['AuthTokenHeader'] return self.api_client.call_api('/1.3/file-systems', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='FileSystemResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
fp = open('test.txt') output = "" for line in fp: line = line.strip() line = line.replace(" ", "") output+=line print(output)
from plumber import Behavior from plumber import PlumbingCollision from plumber import default from plumber import finalize from plumber import override from plumber import plumb from plumber import plumber from plumber import plumbifexists from plumber import plumbing from plumber.behavior import behaviormetaclass from plumber.compat import add_metaclass from plumber.instructions import Instruction from plumber.instructions import _implements from plumber.instructions import payload from plumber.instructions import plumb_str from plumber.plumber import searchnameinbases from zope.interface import Interface from zope.interface import implementer from zope.interface.interface import InterfaceClass import inspect import sys if sys.version_info < (2, 7): # pragma: no cover import unittest2 as unittest else: # pragma: no cover import unittest class TestInstructions(unittest.TestCase): def test_payload(self): class Foo: pass self.assertTrue(payload(Instruction(Instruction(Foo))) is Foo) def test_plumb_str(self): leftdoc = """Left head __plbnext__ Left tail """ rightdoc = """Right head __plbnext__ Right tail """ self.assertEqual(plumb_str(leftdoc, rightdoc).split('\n'), [ 'Left head', '', ' Right head', '', ' __plbnext__', '', ' Right tail', '', ' Left tail', ' ' ]) leftdoc = """Left tail """ rightdoc = """Right tail """ self.assertEqual(plumb_str(leftdoc, rightdoc).split('\n'), [ 'Right tail', '', 'Left tail', ' ' ]) class A: pass self.assertTrue(plumb_str(A, None) is A) self.assertTrue(plumb_str(None, A) is A) self.assertTrue(plumb_str(None, None) is None) def test_instruction(self): class Foo: pass self.assertTrue(Instruction(Foo).item is Foo) self.assertTrue(Instruction(Foo).__name__ is None) self.assertTrue(Instruction(Foo, name='foo').__name__ == 'foo') self.assertRaises( NotImplementedError, lambda: Instruction(None) + 1 ) self.assertRaises( NotImplementedError, lambda: Instruction(None)(None) ) def test_default(self): # First default wins from left to right def1 = default(1) self.assertTrue(def1 + def1 is def1) def2 = default(2) self.assertTrue(def1 + def2 is def1) self.assertTrue(def2 + def1 is def2) # Override wins over default ext3 = override(3) self.assertTrue(def1 + ext3 is ext3) # Finalize wins over default fin4 = finalize(4) self.assertTrue(def1 + fin4 is fin4) # Adding with something else than default/override, raises # ``PlumbingCollision`` err = None try: def1 + Instruction('foo') except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, 'default') self.assertEqual(err.left.payload, 1) self.assertEqual(err.right.__class__.__name__, 'Instruction') self.assertEqual(err.right.payload, 'foo') def test_override(self): # First override wins against following equal overrides and arbitrary # defaults ext1 = override(1) self.assertTrue(ext1 + ext1 is ext1) self.assertTrue(ext1 + override(1) is ext1) self.assertTrue(ext1 + override(2) is ext1) self.assertTrue(ext1 + default(2) is ext1) fin3 = finalize(3) self.assertTrue(ext1 + fin3 is fin3) # Everything except default/override collides err = None try: ext1 + Instruction(1) except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, 'override') self.assertEqual(err.left.payload, 1) self.assertEqual(err.right.__class__.__name__, 'Instruction') self.assertEqual(err.right.payload, 1) def test_finalize(self): # First override wins against following equal overrides and arbitrary # defaults fin1 = finalize(1) self.assertTrue(fin1 + fin1 is fin1) self.assertTrue(fin1 + finalize(1) is fin1) self.assertTrue(fin1 + default(2) is fin1) self.assertTrue(fin1 + override(2) is fin1) # Two unequal finalize collide err = None try: fin1 + finalize(2) except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, 'finalize') self.assertEqual(err.left.payload, 1) self.assertEqual(err.right.__class__.__name__, 'finalize') self.assertEqual(err.right.payload, 2) # Everything except default/override collides try: fin1 + Instruction(1) except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, 'finalize') self.assertEqual(err.left.payload, 1) self.assertEqual(err.right.__class__.__name__, 'Instruction') self.assertEqual(err.right.payload, 1) def test_plumb(self): plb1 = plumb(1) self.assertTrue(plb1 + plumb(1) is plb1) err = None try: plb1 + Instruction(1) except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, 'plumb') self.assertEqual(err.left.payload, 1) self.assertEqual(err.right.__class__.__name__, 'Instruction') self.assertEqual(err.right.payload, 1) try: func_a = lambda x: None prop_b = property(lambda x: None) plumb(func_a) + plumb(prop_b) except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, 'plumb') self.assertEqual(err.left.payload, func_a) self.assertEqual(err.right.__class__.__name__, 'plumb') self.assertEqual(err.right.payload, prop_b) try: plumb(1) + plumb(2) except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, 'plumb') self.assertEqual(err.left.payload, 1) self.assertEqual(err.right.__class__.__name__, 'plumb') self.assertEqual(err.right.payload, 2) def test_implements(self): # classImplements interfaces foo = _implements(('foo',)) self.assertTrue(foo == foo) self.assertTrue(foo + foo is foo) self.assertTrue(foo == _implements(('foo',))) self.assertTrue(foo != _implements(('bar',))) self.assertTrue( _implements(('foo', 'bar')) == _implements(('bar', 'foo')) ) self.assertTrue(foo + _implements(('foo',)) is foo) bar = _implements(('bar',)) foobar = foo + bar self.assertEqual(foobar.__class__.__name__, '_implements') self.assertEqual(foobar.__name__, '__interfaces__') self.assertEqual(foobar.payload, ('bar', 'foo')) self.assertTrue(foo + bar == bar + foo) err = None try: foo + Instruction("bar") except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__class__.__name__, '_implements') self.assertEqual(err.left.__name__, '__interfaces__') self.assertEqual(err.left.payload, ('foo',)) self.assertEqual(err.right.__class__.__name__, 'Instruction') self.assertEqual(err.right.payload, 'bar') class TestBehavior(unittest.TestCase): def test_behaviormetaclass(self): @add_metaclass(behaviormetaclass) class A(object): pass self.assertEqual( getattr(A, '__plumbing_instructions__', 'No behavior'), 'No behavior' ) @add_metaclass(behaviormetaclass) class B(Behavior): pass self.assertEqual( getattr(B, '__plumbing_instructions__', None) and 'Behavior', 'Behavior' ) class TestPlumber(unittest.TestCase): def test_searchnameinbases(self): class A(object): foo = 1 class B(A): pass self.assertTrue(searchnameinbases('foo', (B,))) self.assertFalse(searchnameinbases('bar', (B,))) class TestGlobalMetaclass(unittest.TestCase): @unittest.skipIf( sys.version_info[0] >= 3, '__metaclass__ attribute on module leven only works in python 2') def test_global_metaclass(self): from plumber.tests import globalmetaclass as gm # A zope.interface.Interface is not affected by the global # ``__metaclass__``. self.assertEqual(gm.IBehavior1.__class__, InterfaceClass) # A global meta-class declaration makes all classes at least new-style # classes, even when not subclassing subclasses self.assertEqual(gm.Foo.__class__, plumber) self.assertTrue(issubclass(gm.Foo, object)) # If subclassing object, the global metaclass declaration is ignored:: self.assertEqual(gm.ClassMaybeUsingAPlumbing.__class__, type) self.assertEqual(gm.ClassReallyUsingAPlumbing.__class__, plumber) self.assertTrue(issubclass(gm.ClassReallyUsingAPlumbing, object)) self.assertTrue( gm.IBehavior1.implementedBy(gm.ClassReallyUsingAPlumbing) ) self.assertEqual(gm.BCClassReallyUsingAPlumbing.__class__, plumber) self.assertTrue(issubclass(gm.BCClassReallyUsingAPlumbing, object)) self.assertTrue( gm.IBehavior1.implementedBy(gm.BCClassReallyUsingAPlumbing) ) class TestMetaclassHooks(unittest.TestCase): def test_metaclasshook(self): class IBehaviorInterface(Interface): pass @plumber.metaclasshook def test_metclass_hook(cls, name, bases, dct): if not IBehaviorInterface.implementedBy(cls): return cls.hooked = True self.assertTrue(test_metclass_hook in plumber.__metaclass_hooks__) @implementer(IBehaviorInterface) class MetaclassConsideredBehavior(Behavior): pass @plumbing(MetaclassConsideredBehavior) class Plumbing(object): pass self.assertTrue(Plumbing.hooked) class BehaviorIgnoredByMetaclassHook(Behavior): pass @plumbing(BehaviorIgnoredByMetaclassHook) class Plumbing2(object): pass self.assertRaises(AttributeError, lambda: Plumbing2.hooked) plumber.__metaclass_hooks__.remove(test_metclass_hook) class TestPlumberBasics(unittest.TestCase): def test_basics(self): class Behavior1(Behavior): a = default(True) @default def foo(self): return 42 class Behavior2(Behavior): @default @property def bar(self): return 17 Base = dict @plumbing(Behavior1, Behavior2) class Plumbing(Base): def foobar(self): return 5 plb = Plumbing() self.assertTrue(plb.a) self.assertEqual(plb.foo(), 42) self.assertEqual(plb.bar, 17) self.assertEqual(plb.foobar(), 5) plb['a'] = 1 self.assertEqual(plb['a'], 1) class Sub(Plumbing): a = 'Sub' self.assertEqual(Sub.a, 'Sub') self.assertEqual(Sub().foo(), 42) self.assertEqual(Sub().bar, 17) self.assertEqual(Sub().foobar(), 5) stacks = Plumbing.__plumbing_stacks__ self.assertEqual(len(stacks['history']), 5) stages = stacks['stages'] self.assertEqual(sorted(list(stages.keys())), ['stage1', 'stage2']) stage_1 = stages['stage1'] self.assertEqual(sorted(list(stage_1.keys())), ['a', 'bar', 'foo']) stage_2 = stages['stage2'] self.assertEqual(sorted(list(stage_2.keys())), ['__interfaces__']) @unittest.skipIf( sys.version_info[0] >= 3, '__metaclass__ property only works in python 2') def test_bc_plumbing_py2(self): class Behavior1(Behavior): a = default(True) class BCPlumbing(object): __metaclass__ = plumber __plumbing__ = Behavior1 plb = BCPlumbing() self.assertTrue(plb.a) class TestPlumberStage1(unittest.TestCase): def test_finalize_instruction(self): class Behavior1(Behavior): N = finalize('Behavior1') class Behavior2(Behavior): M = finalize('Behavior2') class Base(object): K = 'Base' @plumbing(Behavior1, Behavior2) class Plumbing(Base): L = 'Plumbing' res = list() for x in ['K', 'L', 'M', 'N']: res.append("%s from %s" % (x, getattr(Plumbing, x))) self.assertEqual(res, [ 'K from Base', 'L from Plumbing', 'M from Behavior2', 'N from Behavior1', ]) def test_finalize_collisions(self): err = None class Behavior1(Behavior): O = finalize(False) try: @plumbing(Behavior1) class Plumbing(object): O = True except PlumbingCollision as e: err = e finally: self.assertEqual(err.left, 'Plumbing class') self.assertEqual(err.right.__parent__.__name__, 'Behavior1') self.assertEqual(err.right.__class__.__name__, 'finalize') self.assertEqual(err.right.__name__, 'O') self.assertFalse(err.right.payload) class Behavior2(Behavior): P = finalize(False) try: @plumbing(Behavior2) class Plumbing(object): P = True except PlumbingCollision as e: err = e finally: self.assertEqual(err.left, 'Plumbing class') self.assertEqual(err.right.__parent__.__name__, 'Behavior2') self.assertEqual(err.right.__class__.__name__, 'finalize') self.assertEqual(err.right.__name__, 'P') self.assertFalse(err.right.payload) class Behavior3(Behavior): Q = finalize(False) class Behavior4(Behavior): Q = finalize(True) try: @plumbing(Behavior3, Behavior4) class Plumbing(object): pass except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__parent__.__name__, 'Behavior3') self.assertEqual(err.left.__class__.__name__, 'finalize') self.assertEqual(err.left.__name__, 'Q') self.assertFalse(err.left.payload) self.assertEqual(err.right.__parent__.__name__, 'Behavior4') self.assertEqual(err.right.__class__.__name__, 'finalize') self.assertEqual(err.right.__name__, 'Q') self.assertTrue(err.right.payload) def test_override_instruction(self): class Behavior1(Behavior): K = override('Behavior1') M = override('Behavior1') class Behavior2(Behavior): K = override('Behavior2') L = override('Behavior2') M = override('Behavior2') class Base(object): K = 'Base' L = 'Base' M = 'Base' @plumbing(Behavior1, Behavior2) class Plumbing(Base): K = 'Plumbing' res = list() for x in ['K', 'L', 'M']: res.append("%s from %s" % (x, getattr(Plumbing, x))) self.assertEqual(res, [ 'K from Plumbing', 'L from Behavior2', 'M from Behavior1' ]) def test_default_instruction(self): class Behavior1(Behavior): N = default('Behavior1') class Behavior2(Behavior): K = default('Behavior2') L = default('Behavior2') M = default('Behavior2') N = default('Behavior2') class Base(object): K = 'Base' L = 'Base' @plumbing(Behavior1, Behavior2) class Plumbing(Base): L = 'Plumbing' res = list() for x in ['K', 'L', 'M', 'N']: res.append("%s from %s" % (x, getattr(Plumbing, x))) self.assertEqual(res, [ 'K from Base', 'L from Plumbing', 'M from Behavior2', 'N from Behavior1' ]) def test_finalize_wins_over_override(self): class Behavior1(Behavior): K = override('Behavior1') L = finalize('Behavior1') class Behavior2(Behavior): K = finalize('Behavior2') L = override('Behavior2') class Base(object): K = 'Base' L = 'Base' @plumbing(Behavior1, Behavior2) class Plumbing(Base): pass res = list() for x in ['K', 'L']: res.append("%s from %s" % (x, getattr(Plumbing, x))) self.assertEqual(res, [ 'K from Behavior2', 'L from Behavior1' ]) def test_finalize_wins_over_default(self): class Behavior1(Behavior): K = default('Behavior1') L = finalize('Behavior1') class Behavior2(Behavior): K = finalize('Behavior2') L = default('Behavior2') class Base(object): K = 'Base' L = 'Base' @plumbing(Behavior1, Behavior2) class Plumbing(Base): pass res = list() for x in ['K', 'L']: res.append("%s from %s" % (x, getattr(Plumbing, x))) self.assertEqual(res, [ 'K from Behavior2', 'L from Behavior1' ]) def test_override_wins_over_default(self): class Behavior1(Behavior): K = default('Behavior1') L = override('Behavior1') class Behavior2(Behavior): K = override('Behavior2') L = default('Behavior2') class Base(object): K = 'Base' L = 'Base' @plumbing(Behavior1, Behavior2) class Plumbing(Base): pass res = list() for x in ['K', 'L']: res.append("%s from %s" % (x, getattr(Plumbing, x))) self.assertEqual(res, [ 'K from Behavior2', 'L from Behavior1' ]) def test_subclassing_behaviors(self): class Behavior1(Behavior): J = default('Behavior1') K = default('Behavior1') M = override('Behavior1') class Behavior2(Behavior1): # overrides ``J`` of ``Behavior1`` J = default('Behavior2') L = default('Behavior2') # this one wins, even if ``M`` on superclass is ``override`` # instruction due to ordinary inheritance behavior. M = default('Behavior2') @plumbing(Behavior2) class Plumbing(object): pass plb = Plumbing() self.assertEqual(plb.J, 'Behavior2') self.assertEqual(plb.K, 'Behavior1') self.assertEqual(plb.L, 'Behavior2') self.assertEqual(plb.M, 'Behavior2') class TestPlumberStage2(unittest.TestCase): def test_method_pipelines(self): res = list() class Behavior1(Behavior): @plumb def __getitem__(_next, self, key): res.append("Behavior1 start") key = key.lower() ret = _next(self, key) res.append("Behavior1 stop") return ret class Behavior2(Behavior): @plumb def __getitem__(_next, self, key): res.append("Behavior2 start") ret = 2 * _next(self, key) res.append("Behavior2 stop") return ret Base = dict @plumbing(Behavior1, Behavior2) class Plumbing(Base): pass plb = Plumbing() plb['abc'] = 6 self.assertEqual(plb['AbC'], 12) self.assertEqual(res, [ 'Behavior1 start', 'Behavior2 start', 'Behavior2 stop', 'Behavior1 stop' ]) def test_endpoint_not_exists(self): err = None class Behavior1(Behavior): @plumb def foo(_next, self): pass # pragma: no cover try: @plumbing(Behavior1) class Plumbing(object): pass except AttributeError as e: err = e finally: self.assertEqual( str(err), 'type object \'Plumbing\' has no attribute \'foo\'' ) def test_plumb_if_exists(self): class Behavior1(Behavior): @plumbifexists def foo(_next, self): pass # pragma: no cover @plumbifexists def bar(_next, self): return 2 * _next(self) @plumbing(Behavior1) class Plumbing(object): def bar(self): return 6 self.assertFalse(hasattr(Plumbing, 'foo')) self.assertEqual(Plumbing().bar(), 12) def test_property_pipelines(self): class Behavior1(Behavior): @plumb @property def foo(_next, self): return 2 * _next(self) @plumbing(Behavior1) class Plumbing1(object): @property def foo(self): return 3 plb = Plumbing1() self.assertEqual(plb.foo, 6) class Behavior2(Behavior): @plumb @property def foo(_next, self): return 2 * _next(self) class Behavior3(Behavior): def set_foo(self, value): self._foo = value foo = plumb(property( None, override(set_foo), )) @plumbing(Behavior2, Behavior3) class Plumbing2(object): @property def foo(self): return self._foo plb = Plumbing2() plb.foo = 4 self.assertEqual(plb.foo, 8) def test_subclassing_behaviors(self): class Behavior1(Behavior): @plumb def foo(_next, self): return 'Behavior1 ' + _next(self) @plumb def bar(_next, self): return 'Behavior1 ' + _next(self) class Behavior2(Behavior1): @plumb def foo(_next, self): return 'Behavior2 ' + _next(self) @plumbing(Behavior2) class Plumbing(object): def foo(self): return 'foo' def bar(self): return 'bar' plb = Plumbing() self.assertEqual(plb.foo(), 'Behavior2 Behavior1 foo') self.assertEqual(plb.bar(), 'Behavior1 bar') def test_mixing_properties_and_methods(self): err = None class Behavior1(Behavior): @plumb def foo(_next, self): return _next(self) # pragma: no cover try: @plumbing(Behavior1) class Plumbing(object): @property def foo(self): return 5 # pragma: no cover except PlumbingCollision as e: err = e finally: self.assertEqual(err.left.__parent__.__name__, 'Behavior1') self.assertEqual(err.left.__class__.__name__, 'plumb') self.assertEqual(err.left.__name__, 'foo') self.assertEqual(err.left.payload.__name__, 'foo') self.assertEqual(err.right.__name__, 'Plumbing') self.assertTrue(inspect.isclass(err.right)) def test_docstrings_joined(self): class P1(Behavior): """P1 """ @plumb def foo(self): """P1.foo """ bar = plumb(property(None, None, None, "P1.bar")) class P2(Behavior): @override def foo(self): """P2.foo """ bar = plumb(property(None, None, None, "P2.bar")) @plumbing(P1, P2) class Plumbing(object): """Plumbing """ bar = property(None, None, None, "Plumbing.bar") self.assertEqual(Plumbing.__doc__.strip(), 'Plumbing\n\nP1') self.assertEqual(Plumbing.foo.__doc__.strip(), 'P2.foo\n\nP1.foo') self.assertEqual( Plumbing.bar.__doc__.strip(), 'Plumbing.bar\n\nP2.bar\n\nP1.bar' ) def test_slots(self): class P1(Behavior): @default def somewhing_which_writes_to_foo(self, foo_val): self.foo = foo_val @plumbing(P1) class WithSlots(object): __slots__ = 'foo' self.assertEqual( type(WithSlots.__dict__['foo']).__name__, 'member_descriptor' ) ob = WithSlots() ob.somewhing_which_writes_to_foo('foo') self.assertEqual(ob.foo, 'foo') def test_zope_interface(self): class IBase(Interface): pass @implementer(IBase) class Base(object): pass self.assertTrue(IBase.implementedBy(Base)) class IBehavior1(Interface): pass @implementer(IBehavior1) class Behavior1(Behavior): blub = 1 class IBehavior2Base(Interface): pass @implementer(IBehavior2Base) class Behavior2Base(Behavior): pass class IBehavior2(Interface): pass @implementer(IBehavior2) class Behavior2(Behavior2Base): pass self.assertTrue(IBehavior1.implementedBy(Behavior1)) self.assertTrue(IBehavior2Base.implementedBy(Behavior2Base)) self.assertTrue(IBehavior2Base.implementedBy(Behavior2)) self.assertTrue(IBehavior2.implementedBy(Behavior2)) class IPlumbingClass(Interface): pass @implementer(IPlumbingClass) @plumbing(Behavior1, Behavior2) class PlumbingClass(Base): pass self.assertTrue(IPlumbingClass.implementedBy(PlumbingClass)) self.assertTrue(IBase.implementedBy(PlumbingClass)) self.assertTrue(IBehavior1.implementedBy(PlumbingClass)) self.assertTrue(IBehavior2.implementedBy(PlumbingClass)) self.assertTrue(IBehavior2Base.implementedBy(PlumbingClass)) plb = PlumbingClass() self.assertTrue(IPlumbingClass.providedBy(plb)) self.assertTrue(IBase.providedBy(plb)) self.assertTrue(IBehavior1.providedBy(plb)) self.assertTrue(IBehavior2.providedBy(plb)) self.assertTrue(IBehavior2Base.providedBy(plb)) if __name__ == '__main__': unittest.main() # pragma: no cover
r""""Contains definitions of the methods used by the _DataLoaderIter workers to collate samples fetched from dataset into Tensor(s). These **needs** to be in global scope since Py2 doesn't support serializing static methods. """ import torch import re from torch._six import container_abcs, string_classes, int_classes _use_shared_memory = False r"""Whether to use shared memory in default_collate""" np_str_obj_array_pattern = re.compile(r'[SaUO]') error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}" numpy_type_map = { 'float64': torch.DoubleTensor, 'float32': torch.FloatTensor, 'float16': torch.HalfTensor, 'int64': torch.LongTensor, 'int32': torch.IntTensor, 'int16': torch.ShortTensor, 'int8': torch.CharTensor, 'uint8': torch.ByteTensor, } def default_collate(batch): r"""Puts each data field into a tensor with outer dimension batch size""" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): out = None if _use_shared_memory: # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if np_str_obj_array_pattern.search(elem.dtype.str) is not None: raise TypeError(error_msg_fmt.format(elem.dtype)) return default_collate([torch.from_numpy(b) for b in batch]) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], float): return torch.tensor(batch, dtype=torch.float32) elif isinstance(batch[0], int_classes): return torch.tensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], container_abcs.Mapping): return {key: default_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple return type(batch[0])(*(default_collate(samples) for samples in zip(*batch))) elif isinstance(batch[0], container_abcs.Sequence): transposed = zip(*batch) return [default_collate(samples) for samples in transposed] raise TypeError((error_msg_fmt.format(type(batch[0]))))
from replit import clear from art import logo print(logo) bids = {} bidding_finished = False def find_highest_bidder(bidding_record): highest_bid = 0 winner = "" for bidder in bidding_record: bid_amount = bidding_record[bidder] if bid_amount > highest_bid: highest_bid = bid_amount winner = bidder print(f"The winner is {winner} with a bid of ${highest_bid}") while not bidding_finished: name = input("What is your name?: ") price = int(input("What is your bid?: $")) bids[name] = price should_continue = input("Are there any other bidders? Type 'yes or 'no'.\n") if should_continue == "no": bidding_finished = True find_highest_bidder(bids) elif should_continue == "yes": clear()
from lightning_plus.api_basebone.drf.routers import SimpleRouter from .upload import views as upload_views router = SimpleRouter(custom_base_name="basebone-app") router.register("upload", upload_views.UploadViewSet) urlpatterns = router.urls
from rllab.envs.base import Step from rllab.misc.overrides import overrides from rllab.envs.mujoco.mujoco_env import MujocoEnv import numpy as np from rllab.core.serializable import Serializable from rllab.misc import logger from rllab.misc import autoargs from contextlib import contextmanager class SwimmerEnv(MujocoEnv, Serializable): FILE = 'swimmer.xml' @autoargs.arg('ctrl_cost_coeff', type=float, help='cost coefficient for controls') def __init__( self, ctrl_cost_coeff=1e-2, *args, **kwargs): self.ctrl_cost_coeff = ctrl_cost_coeff super(SwimmerEnv, self).__init__(*args, **kwargs) Serializable.quick_init(self, locals()) def get_current_obs(self): return np.concatenate([ self.model.data.qpos.flat, self.model.data.qvel.flat, self.get_body_com("torso").flat, ]).reshape(-1) def step(self, action): self.forward_dynamics(action) next_obs = self.get_current_obs() lb, ub = self.action_bounds scaling = (ub - lb) * 0.5 ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum( np.square(action / scaling)) forward_reward = self.get_body_comvel("torso")[0] reward = forward_reward - ctrl_cost done = False return Step(next_obs, reward, done) # @overrides # def reset_mujoco(self, init_state=None): # super(SwimmerEnv, self).reset_mujoco(init) # if init_state is not None: # idx = self.model.body_names.index("torso") # self.model.data.com_subtree[idx][0] = init_state[0] # self.model.data.com_subtree[idx][1] = init_state[1] @overrides # ignoring the goal def reset(self, *args, **kwargs): return super(SwimmerEnv, self).reset(*args, **kwargs) # passing in keyword arguments @overrides def log_diagnostics(self, paths): if len(paths) > 0: progs = [ path["observations"][-1][-3] - path["observations"][0][-3] for path in paths ] logger.record_tabular('AverageForwardProgress', np.mean(progs)) logger.record_tabular('MaxForwardProgress', np.max(progs)) logger.record_tabular('MinForwardProgress', np.min(progs)) logger.record_tabular('StdForwardProgress', np.std(progs)) else: logger.record_tabular('AverageForwardProgress', np.nan) logger.record_tabular('MaxForwardProgress', np.nan) logger.record_tabular('MinForwardProgress', np.nan) logger.record_tabular('StdForwardProgress', np.nan) @contextmanager def set_kill_outside(self): self.kill_outside = True try: yield finally: self.kill_outside = False
import inspect import logging from datetime import timedelta from typing import Any, Tuple import pytest import prefect from prefect.core import Edge, Flow, Parameter, Task from prefect.engine.cache_validators import all_inputs, duration_only, never_use from prefect.engine.results import PrefectResult, LocalResult from prefect.utilities.configuration import set_temporary_config from prefect.configuration import process_task_defaults from prefect.utilities.tasks import task class AddTask(Task): def run(self, x, y=1): return x + y class TestCreateTask: """Test various Task constructors""" def test_create_task_with_no_args(self): """Tasks can be created with no arguments""" assert Task() def test_create_task_is_not_auto_generated(self): assert Task().auto_generated is False def test_create_task_with_name(self): t1 = Task() assert t1.name == "Task" t2 = Task(name="test") assert t2.name == "test" def test_create_task_with_cache_key(self): t1 = Task() assert t1.cache_key is None t2 = Task(cache_key="test") assert t2.cache_key == "test" def test_create_task_with_slug(self): t1 = Task() assert t1.slug is None t2 = Task(slug="test") assert t2.slug == "test" def test_create_task_with_max_retries(self): t1 = Task() assert t1.max_retries == 0 t2 = Task(max_retries=5, retry_delay=timedelta(0)) assert t2.max_retries == 5 with set_temporary_config({"tasks.defaults.max_retries": 3}) as config: # Cover type casting of task defaults process_task_defaults(config) t3 = Task(retry_delay=timedelta(0)) assert t3.max_retries == 3 def test_create_task_with_retry_delay(self): t1 = Task(retry_delay=timedelta(seconds=30), max_retries=1) assert t1.retry_delay == timedelta(seconds=30) with set_temporary_config({"tasks.defaults.retry_delay": 3}) as config: # Cover type casting of task defaults process_task_defaults(config) t2 = Task(max_retries=1) assert t2.retry_delay == timedelta(seconds=3) def test_create_task_with_max_retries_and_no_retry_delay(self): with pytest.raises(ValueError): Task(max_retries=1, retry_delay=None) def test_create_task_with_retry_delay_and_no_max_retries(self): with pytest.raises( ValueError, match="A `max_retries` argument greater than 0 must be provided if specifying a retry delay", ): Task(retry_delay=timedelta(seconds=30)) @pytest.mark.parametrize("max_retries", [None, 0, False]) def test_create_task_with_retry_delay_and_invalid_max_retries(self, max_retries): with pytest.raises( ValueError, match="A `max_retries` argument greater than 0 must be provided if specifying a retry delay", ): Task(retry_delay=timedelta(seconds=30), max_retries=max_retries) def test_create_task_with_max_retry_override_to_0(self): with set_temporary_config( {"tasks.defaults.max_retries": 3, "tasks.defaults.retry_delay": 3} ) as config: process_task_defaults(config) t = Task(max_retries=0, retry_delay=None) assert t.max_retries == 0 assert t.retry_delay is None # max_retries set to 0 will not pull retry_delay from the config process_task_defaults(config) t = Task(max_retries=0) assert t.max_retries == 0 assert t.retry_delay is None def test_create_task_with_timeout(self): t1 = Task() assert t1.timeout is None with pytest.raises(TypeError): Task(timeout=0.5) t3 = Task(timeout=1) assert t3.timeout == 1 with set_temporary_config({"tasks.defaults.timeout": 3}) as config: # Cover type casting of task defaults process_task_defaults(config) t4 = Task() assert t4.timeout == 3 t4 = Task(timeout=timedelta(seconds=2)) assert t4.timeout == 2 with pytest.warns(UserWarning): t5 = Task(timeout=timedelta(seconds=3, milliseconds=1, microseconds=1)) assert t5.timeout == 3 def test_create_task_with_trigger(self): t1 = Task() assert t1.trigger is prefect.triggers.all_successful t2 = Task(trigger=prefect.triggers.all_failed) assert t2.trigger == prefect.triggers.all_failed def test_create_task_without_state_handler(self): assert Task().state_handlers == [] @pytest.mark.parametrize("handlers", [[lambda *a: 1], [lambda *a: 1, lambda *a: 2]]) def test_create_task_with_state_handler(self, handlers): assert Task(state_handlers=handlers).state_handlers == handlers def test_create_task_with_on_failure(self): t = Task(on_failure=lambda *args: None) assert len(t.state_handlers) == 1 def test_create_task_illegal_handler(self): with pytest.raises(TypeError): Task(state_handlers=lambda *a: 1) def test_class_instantiation_rejects_varargs(self): with pytest.raises(ValueError): class VarArgsTask(Task): def run(self, x, *y): pass def test_class_instantiation_rejects_mapped_kwarg(self): with pytest.raises(ValueError): class MappedTasks(Task): def run(self, x, mapped): pass with pytest.raises(ValueError): class MappedTasks(Task): def run(self, x, mapped=None): pass def test_class_instantiation_rejects_mapped_kwarg_decorator(self): with pytest.raises(ValueError): @task def run(x, mapped): pass with pytest.raises(ValueError): @task def run(x, mapped=None): pass def test_class_instantiation_rejects_upstream_tasks_kwarg(self): with pytest.raises(ValueError): class UpstreamTasks(Task): def run(self, x, upstream_tasks): pass with pytest.raises(ValueError): class UpstreamTasks(Task): def run(self, x, upstream_tasks=None): pass def test_class_instantiation_rejects_upstream_tasks_kwarg_decorator(self): with pytest.raises(ValueError): @task def run(x, upstream_tasks): pass with pytest.raises(ValueError): @task def run(x, upstream_tasks=None): pass def test_class_instantiation_rejects_flow_kwarg(self): with pytest.raises(ValueError): class FlowTasks(Task): def run(self, x, flow): pass with pytest.raises(ValueError): class FlowTasks(Task): def run(self, x, flow=None): pass def test_class_instantiation_rejects_flow_kwarg_decorator(self): with pytest.raises(ValueError): @task def run(x, flow): pass with pytest.raises(ValueError): @task def run(x, flow=None): pass def test_class_instantiation_rejects_task_args_kwarg(self): with pytest.raises(ValueError): class TaskArgs(Task): def run(self, x, task_args): pass with pytest.raises(ValueError): class TaskArgs(Task): def run(self, x, task_args=None): pass def test_class_instantiation_rejects_task_args_kwarg_decorator(self): with pytest.raises(ValueError): @task def run(x, task_args): pass with pytest.raises(ValueError): @task def run(x, task_args=None): pass def test_class_instantiation_raises_helpful_warning_for_unsupported_callables(self): with pytest.raises(ValueError, match="This function can not be inspected"): task(zip) def test_task_signature_generation(self): class Test(Task): def run(self, x: int, y: bool, z: int = 1, **kwargs): pass t = Test() sig = inspect.signature(t) # signature is a superset of the `run` method for k, p in inspect.signature(t.run).parameters.items(): assert sig.parameters[k] == p # extra kwonly args to __call__ also in sig assert set(sig.parameters).issuperset( {"mapped", "task_args", "upstream_tasks", "flow"} ) assert sig.return_annotation == "Task" # doesn't override class signature class_sig = inspect.signature(Test) assert "name" in class_sig.parameters def test_create_task_with_and_without_cache_for(self): t1 = Task() assert t1.cache_validator is never_use t2 = Task(cache_for=timedelta(days=1)) assert t2.cache_validator is duration_only t3 = Task(cache_for=timedelta(days=1), cache_validator=all_inputs) assert t3.cache_validator is all_inputs def test_bad_cache_kwarg_combo(self): with pytest.warns(UserWarning, match=".*Task will not be cached.*"): Task(cache_validator=all_inputs) def test_create_task_with_and_without_result(self): t1 = Task() assert t1.result is None t2 = Task(result=PrefectResult()) assert isinstance(t2.result, PrefectResult) def test_create_parameter_uses_prefect_result(self): p = Parameter("p") assert isinstance(p.result, PrefectResult) def test_create_task_with_and_without_checkpoint(self): t = Task() assert t.checkpoint is None s = Task(checkpoint=True) assert s.checkpoint is True def test_create_task_with_and_without_log_stdout(self): t = Task() assert t.log_stdout is False s = Task(log_stdout=True) assert s.log_stdout is True def test_create_task_with_task_run_name(self): t1 = Task() assert t1.task_run_name is None t2 = Task(task_run_name="test") assert t2.task_run_name == "test" t2 = Task(task_run_name=lambda: 42) assert t2.task_run_name() == 42 def test_task_has_logger(): t = Task() assert isinstance(t.logger, logging.Logger) assert t.logger.name == "prefect.Task" def test_task_has_logger_with_informative_name(): t = Task(name="foo") assert isinstance(t.logger, logging.Logger) assert t.logger.name == "prefect.foo" def test_task_produces_no_result(): t = Task() assert t.run() is None def test_task_is_not_iterable(): t = Task() with pytest.raises(TypeError): list(t) def test_tags_are_added_when_arguments_are_bound(): t1 = AddTask(tags=["math"]) t2 = AddTask(tags=["math"]) with prefect.context(tags=["test"]): with Flow(name="test"): t1.bind(1, 2) t3 = t2(1, 2) assert t1.tags == {"math", "test"} assert t3.tags == {"math", "test"} def test_tags(): t1 = Task() assert t1.tags == set() with pytest.raises(TypeError): Task(tags="test") t3 = Task(tags=["test", "test2", "test"]) assert t3.tags == {"test", "test2"} with prefect.context(tags=["test"]): t4 = Task() assert t4.tags == {"test"} with prefect.context(tags=["test1", "test2"]): t5 = Task(tags=["test3"]) assert t5.tags == {"test1", "test2", "test3"} class TestInputsOutputs: class add(Task): def run(self, x, y: int = 1) -> int: return x + y @task def mult(x, y: int = 1) -> int: return x * y def test_inputs(self): assert self.add().inputs() == dict( x=dict(type=Any, required=True, default=None), y=dict(type=int, required=False, default=1), ) def test_inputs_task_decorator(self): with Flow("test"): assert self.mult(x=1).inputs() == dict( x=dict(type=Any, required=True, default=None), y=dict(type=int, required=False, default=1), ) def test_outputs(self): assert self.add().outputs() == int def test_outputs_task_decorator(self): with Flow("test"): assert self.mult(x=1).outputs() == int class TestTaskCopy: def test_copy_copies(self): class CopyTask(Task): class_attr = 42 def __init__(self, instance_val, **kwargs): self.instance_val = instance_val super().__init__(**kwargs) def run(self, run_val): return (run_val, self.class_attr, self.instance_val) ct = CopyTask("username") other = ct.copy() assert isinstance(other, CopyTask) assert ct is not other assert hash(ct) != hash(other) assert ct != other assert other.run("pass") == ("pass", 42, "username") def test_copy_warns_if_dependencies_in_active_flow(self): t1 = Task() t2 = Task() with Flow(name="test") as flow: t1.set_dependencies(downstream_tasks=[t2]) with pytest.warns(UserWarning, match="You are making a copy"): flow.add_task(t1.copy()) with Flow(name="test") as flow: with pytest.warns(None) as rec: flow.add_task(t1.copy()) # no dependencies in this flow assert len(rec) == 0 def test_copy_changes_slug(self): t1 = Task(slug="test") t2 = t1.copy() assert t1.slug == "test" assert t1.slug != t2.slug def test_copy_accepts_task_args(self): t = Task() t2 = t.copy(name="new-task") t3 = t.copy(**{"max_retries": 4200}) assert t2.name == "new-task" assert t3.max_retries == 4200 def test_copy_accepts_slug_as_task_args(self): t = Task(slug="test") t2 = t.copy(slug="test-2") assert t.slug == "test" assert t2.slug == "test-2" def test_copy_appropriately_sets_result_target_if_target_provided(self): # https://github.com/PrefectHQ/prefect/issues/2588 @task(target="target", result=LocalResult(dir=".")) def X(): pass @task def Y(): pass with Flow("test"): x = X() y = Y(task_args=dict(target="target", result=LocalResult(dir="."))) assert x.result.location == "target" assert y.result.location == "target" class TestDependencies: def test_set_downstream(self): f = Flow(name="test") t1 = Task() t2 = Task() t1.set_downstream(t2, flow=f) assert Edge(t1, t2) in f.edges def test_set_downstream_context(self): with Flow(name="test") as f: t1 = Task() t2 = Task() t1.set_downstream(t2) assert Edge(t1, t2) in f.edges def test_set_downstream_no_flow(self): t1 = Task() t2 = Task() with pytest.raises(ValueError, match="No Flow was passed"): t1.set_downstream(t2) @pytest.mark.parametrize( "props", [{"mapped": True}, {"key": "x"}, {"key": "x", "mapped": True}] ) def test_set_downstream_with_properties(self, props): with Flow(name="test") as f: t1 = Task() t2 = Task() t1.set_downstream(t2, **props) assert Edge(t1, t2, **props) in f.edges def test_set_upstream(self): f = Flow(name="test") t1 = Task() t2 = Task() t2.set_upstream(t1, flow=f) assert Edge(t1, t2) in f.edges def test_set_upstream_context(self): with Flow(name="test") as f: t1 = Task() t2 = Task() t2.set_upstream(t1) assert Edge(t1, t2) in f.edges def test_set_upstream_no_flow(self): t1 = Task() t2 = Task() with pytest.raises(ValueError, match="No Flow was passed"): t2.set_upstream(t1) @pytest.mark.parametrize( "props", [{"mapped": True}, {"key": "x"}, {"key": "x", "mapped": True}] ) def test_set_upstream_with_properties(self, props): with Flow(name="test") as f: t1 = Task() t2 = Task() t2.set_upstream(t1, **props) assert Edge(t1, t2, **props) in f.edges def test_set_dependencies_stream_allows_chaining(self): t1 = Task() t2 = Task() t3 = Task() with Flow(name="test") as f: t1_result = t1() t2_result = t2() t3_result = t3() assert t1_result.set_downstream(t2_result) is t1_result assert t3_result.set_upstream(t2_result) is t3_result assert ( t3_result.set_dependencies(f, upstream_tasks=[t1_result]) is t3_result ) class TestSerialization: def test_serialization(self): t = Task(name="test") s = t.serialize() assert isinstance(s, dict) assert s["slug"] == t.slug assert s["type"] == "prefect.core.task.Task" assert s["name"] == t.name def test_subclass_serialization(self): class NewTask(Task): pass s = NewTask().serialize() assert isinstance(s, dict) assert s["type"].endswith(".NewTask") def test_deserialization(self): t = Task(name="test") s = t.serialize() t2 = prefect.serialization.task.TaskSchema().load(s) assert isinstance(t2, Task) assert t2.name == t.name def test_subclass_deserialization(self): class NewTask(Task): pass t = NewTask(name="test") s = t.serialize() t2 = prefect.serialization.task.TaskSchema().load(s) assert type(t2) is Task assert not isinstance(t2, NewTask) assert t2.name == t.name def test_parameter_serialization(self): p = Parameter(name="p") serialized = p.serialize() assert serialized["name"] == "p" assert serialized["default"] is None assert serialized["required"] is True def test_parameter_deserialization(self): p = Parameter(name="p") serialized = p.serialize() p2 = prefect.serialization.task.ParameterSchema().load(serialized) assert isinstance(p2, Parameter) assert p2.name == p.name assert p2.required == p.required assert p2.default == p.default class TestTaskArgs: def test_task_args_raises_for_non_attrs(self): t = Task() with Flow(name="test"): with pytest.raises(AttributeError, match="foo"): t(task_args={"foo": "bar"}) @pytest.mark.parametrize( "attr,val", [ ("name", "foo-bar"), ("slug", "foo-bar"), ("max_retries", 4200), ("retry_delay", timedelta(seconds=1)), ("timeout", 12), ("skip_on_upstream_skip", False), ("cache_for", timedelta(seconds=1)), ], ) def test_task_args_sets_new_attrs(self, attr, val): t = Task() with Flow(name="test") as f: t(task_args={attr: val}) assert getattr(f.tasks.pop(), attr) == val @pytest.mark.parametrize( "attr,val", [ ("name", "foo-bar"), ("slug", "foo-bar"), ("max_retries", 4200), ("retry_delay", timedelta(seconds=1)), ("timeout", 12), ("skip_on_upstream_skip", False), ("cache_for", timedelta(seconds=1)), ], ) def test_task_args_sets_new_attrs_on_mapped_tasks(self, attr, val): t = Task() with Flow(name="test") as f: t.map(upstream_tasks=[1, 2, 3, 4], task_args={attr: val}) tasks = f.get_tasks(name="Task") assert all(getattr(tt, attr) == val for tt in tasks) def test_tags_are_appended_to_when_updating_with_task_args(self): t = AddTask(tags=["math"]) with prefect.context(tags=["test"]): with Flow(name="test"): t2 = t(1, 2, task_args={"name": "test-tags", "tags": ["new-tag"]}) assert t2.tags == {"math", "test", "new-tag"} def test_task_check_mapped_args_are_subscriptable_in_advance(self): t = Task() with pytest.raises(TypeError): with Flow(name="test"): t.map({1, 2, 3, 4}) class TestTaskNout: def test_nout_defaults_to_none(self): @task def test(self): pass assert test.nout is None def test_nout_provided_explicitly(self): @task(nout=2) def test(self): pass assert test.nout == 2 @pytest.mark.parametrize( "ret_type, nout", [ (int, None), (Tuple, None), (Tuple[()], 0), (Tuple[int, ...], None), (Tuple[int, int], 2), (Tuple[int, float, str], 3), ], ) def test_nout_inferred_from_signature(self, ret_type, nout): @task def test(a) -> ret_type: pass assert test.nout == nout def test_nout_none_not_iterable(self): @task def test(a): return a + 1, a - 1 with Flow("test"): with pytest.raises(TypeError, match="Task is not iterable"): a, b = test(1) def test_nout_provided_is_iterable(self): @task(nout=2) def test(a): return a + 1, a - 1 with Flow("test") as flow: a, b = test(1) res = flow.run() assert res.result[a].result == 2 assert res.result[b].result == 0 def test_nout_not_set_on_mapped_tasks(self): @task(nout=2) def test(a): return a + 1, a - 1 with Flow("test"): with pytest.raises(TypeError, match="Task is not iterable"): a, b = test.map(range(10)) @pytest.mark.skip("Result handlers not yet deprecated") def test_cache_options_show_deprecation(): with pytest.warns( UserWarning, match=r"all cache_\* options on a Task will be deprecated*" ): Task(cache_for=object()) with pytest.warns( UserWarning, match=r"all cache_\* options on a Task will be deprecated*" ): Task(cache_validator=object()) with pytest.warns( UserWarning, match=r"all cache_\* options on a Task will be deprecated*" ): Task(cache_key=object()) def test_passing_task_to_task_constructor_raises_helpful_warning(): class MyTask(Task): def __init__(self, a, b, **kwargs): self.a = a self.b = b super().__init__(**kwargs) with Flow("test"): a = Task()() with pytest.warns( UserWarning, match="A Task was passed as an argument to MyTask" ): t = MyTask(1, a)() # Warning doesn't stop normal operation assert t.a == 1 assert t.b == a def test_task_init_uses_reserved_attribute_raises_helpful_warning(): class MyTask(Task): def __init__(self, **kwargs): self.a = 1 self.target = "oh no!" super().__init__(**kwargs) with Flow("test"): with pytest.warns(UserWarning, match="`MyTask` sets a `target` attribute"): MyTask() @pytest.mark.parametrize("use_function_task", [True, False]) def test_task_called_outside_flow_context_raises_helpful_error(use_function_task): if use_function_task: @prefect.task def fn(x): return x else: class Fn(Task): def run(self, x): return x fn = Fn() with pytest.raises( ValueError, match=f"Could not infer an active Flow context while creating edge to {fn}", ) as exc_info: fn(1) run_call = "`fn.run(...)`" if use_function_task else "`Fn(...).run(...)`" assert ( "If you're trying to run this task outside of a Flow context, " f"you need to call {run_call}" in str(exc_info) ) def test_task_call_with_self_succeeds(): import dataclasses @dataclasses.dataclass class TestClass: count: int def increment(self): self.count = self.count + 1 seconds_task = task( TestClass.increment, target="{{task_slug}}_{{map_index}}", result=LocalResult() ) initial = TestClass(count=0) with Flow("test") as flow: seconds_task(initial) assert flow.run().is_successful()
''' ''' import re from billy.scrape.actions import Rule, BaseCategorizer committees = [ u"Veterans' Affairs", u'Agriculture and Agri-business Committee', u'Agriculture', u'Banking and Insurance', u'Banking', u'Children, Juveniles and Other Issues', u'Constitutional Revision', u'Council of Finance and Administration', u'Economic Development and Small Business', u'Economic Development', u'Education Accountability', u'Education', u'Employee Suggestion Award Board', u'Energy, Industry and Labor', u'Energy, Industry and Labor/Economic Development and Small Business', u'Enrolled Bills', u'Equal Pay Commission', u'Finance', u'Forest Management Review Commission', u'Government and Finance', u'Government Operations', u'Government Organization', u'Health and Human Resources Accountability', u'Health and Human Resources', u'Health', u'Homeland Security', u'House Rules', u'House Select Committee on Redistricting', u'Infrastructure', u'Insurance', u'Intern Committee', u'Interstate Cooperation', u'Judiciary', u'Law Institute', u'Minority Issues', u'Natural Resources', u'Outcomes-Based Funding Models in Higher Education', u'Parks, Recreation and Natural Resources', u'PEIA, Seniors and Long Term Care', u'Pensions and Retirement', u'Political Subdivisions', u'Post Audits', u'Regional Jail and Correctional Facility Authority', u'Roads and Transportation', u'Rule-Making Review Committee', u'Senior Citizen Issues', u'Special Investigations', u'Technology', u'Veterans Affairs', u'Veterans Affairs/ Homeland Security', u'Water Resources', u'Workforce Investment for Economic Development', ] committees_rgx = '(%s)' % '|'.join(sorted(committees, key=len, reverse=True)) rules = ( Rule(['Communicated to Senate', 'Senate received', 'Ordered to Senate'], actor='upper'), Rule(['Communicated to House', 'House received', 'Ordered to House'], actor='lower'), Rule('Read 1st time', 'bill:reading:1'), Rule('Read 2nd time', 'bill:reading:2'), Rule('Read 3rd time', 'bill:reading:3'), Rule('Filed for introduction', 'bill:filed'), Rule('^Introduced in', 'bill:introduced'), Rule(['Passed Senate', 'Passed House'], 'bill:passed'), Rule(['Reported do pass', 'With amendment, do pass'], 'committee:passed'), Rule([u', but first to .+?; then (?P<committees>[^;]+)', u'To (?P<committees>.+?) then']), Rule(u'(?i)voice vote', voice_vote=True), Rule([u'Amendment rejected'], [u'amendment:failed']), Rule([u'To Governor'], [u'governor:received']), Rule([u'Passed House'], [u'bill:passed']), Rule([u'Read 2nd time'], [u'bill:reading:2']), Rule([u', but first to (?P<committees>[^;]+)', u'Rejected'], []), Rule([u'Approved by Governor \d{1,2}/\d{1,2}/\d{1,2}$'], [u'governor:signed']), Rule([u'^Introduced'], [u'bill:introduced']), Rule([u'To .+? then (?P<committees>.+)'], []), Rule([u'^Filed for intro'], [u'bill:filed']), Rule([u'(?i)referred to (?P<committees>.+)'], [u'committee:referred']), Rule(u'Senator (?P<legislators>.+? )requests ' u'to be removed as sponsor of bill'), Rule([u'To House (?P<committees>[A-Z].+)'], [u'committee:referred']), Rule([u'Passed Senate'], [u'bill:passed']), Rule([u'(?i)committed to (?P<committees>.+?) on'], []), Rule([u'Vetoed by Governor'], [u'governor:vetoed']), Rule([u'(?i)House concurred in senate amendment'], []), Rule([u'Be rejected'], [u'bill:failed']), Rule([u'To .+? then (?P<committees>.+) then', u'reading to (?P<committees>.+)']), Rule([u'Adopted by'], [u'bill:passed']), Rule([u'House appointed conferees: (?P<legislators>.+)'], []), Rule([u'Read 3rd time'], [u'bill:reading:3']), Rule([u'Be adopted$'], [u'bill:passed']), Rule([u'(?i)originating in (House|Senate) (?P<committees>.+)', u'(?i)to house (?P<committees>.+)']), Rule([u'Read 1st time'], [u'bill:reading:1']), Rule([u'To .+? then .+? then (?P<committees>.+)']), Rule(r'To %s' % committees_rgx, 'committee:referred') ) class Categorizer(BaseCategorizer): rules = rules def categorize(self, text): '''Wrap categorize and add boilerplate committees. ''' attrs = BaseCategorizer.categorize(self, text) committees = attrs['committees'] for committee in re.findall(committees_rgx, text, re.I): if committee not in committees: committees.append(committee) return attrs def post_categorize(self, attrs): res = set() if 'legislators' in attrs: for text in attrs['legislators']: rgx = r'(,\s+(?![a-z]\.)|\s+and\s+)' legs = re.split(rgx, text) legs = filter(lambda x: x not in [', ', ' and '], legs) res |= set(legs) attrs['legislators'] = list(res) res = set() if 'committees' in attrs: for text in attrs['committees']: # Strip stuff like "Rules on 1st reading" for text in text.split('then'): text = re.sub(r' on .+', '', text) text = text.strip() res.add(text) attrs['committees'] = list(res) return attrs
from polls.models import Poll, Choice from django.contrib import admin class ChoiceInline(admin.TabularInline): model = Choice extra = 3 class PollAdmin(admin.ModelAdmin): fieldsets = [ (None, {'fields': ['question']}), ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}), ] inlines = [ChoiceInline] list_display = ('question', 'pub_date', 'was_published_recently') list_filter = ['pub_date'] search_fields = ['question'] date_hierarchy = 'pub_date' admin.site.register(Poll, PollAdmin)
"""Fix incompatible renames Fixes: * sys.maxint -> sys.maxsize """ # Author: Christian Heimes # based on Collin Winter's fix_import # Local imports z .. zaimportuj fixer_base z ..fixer_util zaimportuj Name, attr_chain MAPPING = {"sys": {"maxint" : "maxsize"}, } LOOKUP = {} def alternates(members): zwróć "(" + "|".join(map(repr, members)) + ")" def build_pattern(): #bare = set() dla module, replace w list(MAPPING.items()): dla old_attr, new_attr w list(replace.items()): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #uzyskaj """ # import_name< 'import' (module=%r # | dotted_as_names< any* module=%r any* >) > # """ % (module, module) uzyskaj """ import_from< 'from' module_name=%r 'import' ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > """ % (module, old_attr, old_attr) uzyskaj """ power< module_name=%r trailer< '.' attr_name=%r > any* > """ % (module, old_attr) #uzyskaj """bare_name=%s""" % alternates(bare) klasa FixRenames(fixer_base.BaseFix): BM_compatible = Prawda PATTERN = "|".join(build_pattern()) order = "pre" # Pre-order tree traversal # Don't match the node jeżeli it's within another match def match(self, node): match = super(FixRenames, self).match results = match(node) jeżeli results: jeżeli any(match(obj) dla obj w attr_chain(node, "parent")): zwróć Nieprawda zwróć results zwróć Nieprawda #def start_tree(self, tree, filename): # super(FixRenames, self).start_tree(tree, filename) # self.replace = {} def transform(self, node, results): mod_name = results.get("module_name") attr_name = results.get("attr_name") #bare_name = results.get("bare_name") #import_mod = results.get("module") jeżeli mod_name oraz attr_name: new_attr = LOOKUP[(mod_name.value, attr_name.value)] attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
import os import pytest THIS_DIR = os.path.dirname(os.path.abspath(__file__)) if __name__ == '__main__': # for profiling purposes... pytest.main(os.path.join(THIS_DIR, '../tests/test_parsyfiles_by_type.py'))
""" Launching point and supporting functions for database management tools. This module serves as the launching point for the database management tools. Backend-specific implementations are located within their specific modules and common functions and methods are included in this file. """ import numpy as np from typing import Tuple, Dict from phoebe_shelves_clt.csv_backend import manage_csv from phoebe_shelves_clt.sql_backend import manage_sql from phoebe_shelves_clt.utils import data_model from phoebe_shelves_clt.utils import sql_api def prompt_for_rating(prompt: str): """Prompt user for an integer rating (max 5). Args: prompt: Prompt that user sees on the command line Outputs: rating (int | float): Intger rating or np.nan if empty string is passed """ rating = input(prompt) while rating not in {"", "1", "2", "3", "4", "5"}: rating = input("Choose an integer between 1 and 5 or leave blank: ") # Format rating rating = int(rating) if rating != "" else np.nan return(rating) def prompt_for_title(backend: str, *args) -> Tuple[str, Dict[str, int]]: """ Prompt for a title from the books table and return the title and ID Prompts the user to provide a title and returns the title and ID of any books that match the title *exactly*. Args: backend: Backend to use Positional Args: (CSVDataModel): Current instance of the CSV backend database (psycopg2.connection): Connection to the PostgreSQL database Returns: A tuple containing the following: title: Title of the book provided by the user title_results: Dictionary mapping possible titles to their ID's """ title = input("Please enter the book title: ") if backend == "csv": title_results = args[0].get_books_dict(title) else: query = f"SELECT title, id FROM books WHERE title ILIKE '{title}'" title_results = dict(sql_api.execute_query(args[0], query, "to_list")) # type: ignore return(title, title_results) def prompt_for_author(backend: str, *args) -> Tuple[str, Dict]: """ Prompt for an author from the authors table and return the name and ID Prompts the user to provide an author's last name and returns the names and ID's of possible matches based on the last name. Args: backend: Backend to use Positional Args: (CSVDataModel): Current instance of the CSV backend database (psycopg2.connection): Connection to the PostgreSQL database Returns: A tuple containing the following: last_name: Last name provided by the user author_results: Dictionary mapping possible authors to their ID's """ last_name = input("Please enter the author's last name: ") if backend == "csv": author_results = args[0].get_authors_dict(last_name) else: author_query = (sql_api.read_query('author_filter').format(last_name)) author_results = dict(sql_api.execute_query(args[0], author_query, "to_list")) # type: ignore return(last_name, author_results) def prompt_for_genre(backend: str, *args) -> Tuple[str, Dict]: """ Prompt for an genre from the genres table and return the name and ID Prompts user to enter a genre name. It then retrieves the potential matching options for further processing. Args: backend: Backend to use Positional Args: (CSVDataModel): Current instance of the CSV backend database (psycopg2.connection): Connection to the PostgreSQL database Returns: A tuple containing the following: genre_name: Genre name provided by the user genreresults: Dictionary mapping possible genres to their ID's """ genre_name = input("Please enter the genre name: ") if backend == "csv": genre_results = args[0].get_genres_dict(genre_name) else: genre_query = f"SELECT name, id from genres where name ilike '{genre_name}'" genre_results = dict(sql_api.execute_query(args[0], genre_query, "to_list")) # type: ignore return(genre_name, genre_results) def manage_module(backend: str, db_select: str, mode: str, **kwargs): """ Launch management workflows for either backend Launch the mangement workflows for either the CSV or SQL backends Args: backend: Backend to use db_select: Database to manage mode: Management mode Keyword Args: data_directory (string): Path to CSV backend data directory sql_configs (Dict): SQL server configurations """ if backend == "csv": model = data_model.CSVDataModel(kwargs["data_directory"]) manage_csv.main(db_select, mode, model) else: manage_sql.main(db_select, mode, kwargs["sql_configs"])
#!/usr/bin/env python3 import os import time import itertools import contextlib import fanout_utils def fanout_hls(context): context += { "starttime": int(time.time()), } cleanup(context) context += calculate_map_and_varmap(context) generate_master_playlists(context) fanout(context) print("Cleaning up") cleanup(context) def cleanup(c): with contextlib.suppress(FileExistsError): os.mkdir(os.path.join(c.hls_write_path, c.stream)) with contextlib.suppress(FileNotFoundError): fanout_utils.remove_glob(os.path.join( c.hls_write_path, c.stream, "*.ts")) fanout_utils.remove_glob(os.path.join( c.hls_write_path, "%s/*.m3u8" % c.stream)) fanout_utils.remove_glob(os.path.join( c.hls_write_path, "%s_*.m3u8" % c.stream)) def calculate_map_and_varmap(c): first_audio_stream_index = len(c.video_tracks) # HD+Native maps = ["-map 0:v:0 -map 0:a:0"] varmaps = ["v:0,a:0"] if 'SD' in c.video_tracks: # SD+Native maps += ["-map 0:v:1 -map 0:a:0"] varmaps += ["v:1,a:1"] if 'Slides' in c.video_tracks: # Slides+Native maps += ["-map 0:v:2 -map 0:a:0"] varmaps += ["v:2,a:2"] if 'Translated' in c.audio_tracks: # Translated maps += ["-map 0:a:1"] varmaps += ["a:%d" % (first_audio_stream_index+0)] if 'Translated-2' in c.audio_tracks: # Translated-2 maps += ["-map 0:a:2"] varmaps += ["a:%d" % (first_audio_stream_index+1)] return { "maps": maps, "varmaps": varmaps, "first_audio_stream_index": first_audio_stream_index, } def generate_master_playlists(c): for video_track, audio_track in itertools.product(c.video_tracks, c.audio_tracks): playlist_context = c + { "video_track": video_track, "audio_track": audio_track, } master_playlist = fanout_utils.format_and_strip(playlist_context, """ #EXTM3U #EXT-X-VERSION:3 #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Untranslated",DEFAULT={{ 'YES' if audio_track == 'Native' else 'NO' }} {% if 'Translated' in audio_tracks %} #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Translation 1",DEFAULT={{ 'YES' if audio_track == 'Translated' else 'NO' }},URI="{{ stream }}/chunks_{{ first_audio_stream_index+0 }}.m3u8" {% endif %} {% if 'Translated-2' in audio_tracks %} #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Translation 2",DEFAULT={{ 'YES' if audio_track == 'Translated-2' else 'NO' }},URI="{{ stream }}/chunks_{{ first_audio_stream_index+1 }}.m3u8" {% endif %} {% if video_track in ['HD'] %} #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=5000000,RESOLUTION=1920x1080,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio" {{ stream }}/chunks_0.m3u8 {% endif %} {% if 'SD' in video_tracks and video_track in ['HD', 'SD'] %} #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=800000,RESOLUTION=1024x576,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio" {{ stream }}/chunks_1.m3u8 {% endif %} {% if 'Slides' in video_tracks and video_track in ['HD', 'SD', 'Slides'] %} #EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=100000,RESOLUTION=1024x576,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio" {{ stream }}/chunks_2.m3u8 {% endif %} """) master_playlist_file = os.path.join( c.hls_write_path, "%s_%s_%s.m3u8" % (c.stream, audio_track.lower(), video_track.lower()) ) print("Writing Master Playlist-File %s" % master_playlist_file) with open(master_playlist_file, "w") as f: f.write(master_playlist) def fanout(c): command = fanout_utils.format_and_strip(c, """ ffmpeg -v warning -nostats -nostdin -y -analyzeduration 3000000 -i {{ pull_url }} -c:v copy -c:a copy {{ maps | join("\n\t") }} -hls_time 6 -hls_list_size 200 -hls_segment_filename "{{ hls_write_path }}/{{ stream }}/{{ starttime }}-%d_%v.ts" -hls_flags +delete_segments+omit_endlist+independent_segments -var_stream_map '{{ varmaps | join(" ") }}' "{{ hls_write_path }}/{{ stream }}/chunks_%v.m3u8" """) fanout_utils.call(command) if __name__ == "__main__": parser = fanout_utils.setup_argparse(name="hls") parser.add_argument('--hls_write_path', metavar='PATH', type=str, help='Path to write the HLS-Pieces and Master-Playlists to') args = parser.parse_args() fanout_utils.mainloop(name="hls", transcoding_stream="h264", calback=fanout_hls, args=args)
try: import unittest2 as unittest except ImportError: import unittest import sys sys.path.append('..') from pyrabbit import http class TestHTTPClient(unittest.TestCase): """ Except for the init test, these are largely functional tests that require a RabbitMQ management API to be available on localhost """ testhost = 'localhost:15672' testuser = 'guest' testpass = 'guest' def setUp(self): self.c = http.HTTPClient(self.testhost, self.testuser, self.testpass) def test_client_init(self): c = http.HTTPClient(self.testhost, self.testuser, self.testpass) self.assertIsInstance(c, http.HTTPClient) def test_client_init_sets_credentials(self): self.assertEqual(self.c.auth.username, self.testuser) self.assertEqual(self.c.auth.password, self.testpass) def test_client_init_sets_default_timeout(self): self.assertEqual(self.c.timeout, 5) def test_client_init_with_timeout(self): c = http.HTTPClient(self.testhost, self.testuser, self.testpass, 1) self.assertEqual(c.timeout, 1)
import datetime import json from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.db.models import Q from django.forms.formsets import formset_factory from django.shortcuts import get_object_or_404, redirect from django.utils.datastructures import MultiValueDictKeyError from django.utils.translation import ugettext as _, ungettext as ngettext from olympia import amo from olympia.constants import editors as rvw from olympia.access import acl from olympia.addons.models import Addon, Persona from olympia.amo.decorators import json_view, post_required from olympia.amo.urlresolvers import reverse from olympia.amo.utils import paginate, render from olympia.devhub.models import ActivityLog from olympia.editors import forms from olympia.editors.models import RereviewQueueTheme, ReviewerScore, ThemeLock from olympia.editors.views import base_context as context from olympia.search.views import name_only_query from olympia.zadmin.decorators import admin_required from .decorators import personas_reviewer_required QUEUE_PER_PAGE = 100 @personas_reviewer_required def home(request): data = context( reviews_total=ActivityLog.objects.total_reviews(theme=True)[:5], reviews_monthly=ActivityLog.objects.monthly_reviews(theme=True)[:5], queue_counts=queue_counts_themes(request) ) return render(request, 'editors/themes/home.html', data) def queue_counts_themes(request): counts = { 'themes': Persona.objects.no_cache() .filter(addon__status=amo.STATUS_PENDING) .count(), } if acl.action_allowed(request, 'SeniorPersonasTools', 'View'): counts.update({ 'flagged_themes': (Persona.objects.no_cache() .filter(addon__status=amo.STATUS_REVIEW_PENDING) .count()), 'rereview_themes': RereviewQueueTheme.objects.count() }) rv = {} if isinstance(type, basestring): return counts[type] for k, v in counts.items(): if not isinstance(type, list) or k in type: rv[k] = v return rv @personas_reviewer_required def themes_list(request, flagged=False, rereview=False): """Themes queue in list format.""" themes = [] if flagged: # TODO (ngoke): rename to STATUS_FLAGGED. themes = Addon.objects.filter(status=amo.STATUS_REVIEW_PENDING, type=amo.ADDON_PERSONA, persona__isnull=False) elif rereview: themes = [ rqt.theme.addon for rqt in RereviewQueueTheme.objects.select_related('theme__addon')] else: themes = Addon.objects.filter(status=amo.STATUS_PENDING, type=amo.ADDON_PERSONA, persona__isnull=False) search_form = forms.ThemeSearchForm(request.GET) per_page = request.GET.get('per_page', QUEUE_PER_PAGE) pager = paginate(request, themes, per_page) return render(request, 'editors/themes/queue_list.html', context( **{'addons': pager.object_list, 'flagged': flagged, 'pager': pager, 'rereview': rereview, 'theme_search_form': search_form, 'statuses': dict((k, unicode(v)) for k, v in amo.STATUS_CHOICES_API.items()), 'tab': ('rereview_themes' if rereview else 'flagged_themes' if flagged else 'pending_themes')})) def _themes_queue(request, flagged=False, rereview=False): """Themes queue in interactive format.""" themes = _get_themes(request, request.user, flagged=flagged, rereview=rereview) ThemeReviewFormset = formset_factory(forms.ThemeReviewForm) formset = ThemeReviewFormset( initial=[{'theme': _rereview_to_theme(rereview, theme).id} for theme in themes]) return render(request, 'editors/themes/queue.html', context( **{'actions': get_actions_json(), 'formset': formset, 'flagged': flagged, 'reject_reasons': rvw.THEME_REJECT_REASONS, 'rereview': rereview, 'reviewable': True, 'theme_formsets': zip(themes, formset), 'theme_count': len(themes), 'tab': ( 'flagged' if flagged else 'rereview' if rereview else 'pending')})) def _get_themes(request, reviewer, flagged=False, rereview=False): """Check out themes. :param flagged: Flagged themes (amo.STATUS_REVIEW_PENDING) :param rereview: Re-uploaded themes (RereviewQueueTheme) """ num = 0 themes = [] locks = [] status = (amo.STATUS_REVIEW_PENDING if flagged else amo.STATUS_PUBLIC if rereview else amo.STATUS_PENDING) if rereview: # Rereview themes. num, themes, locks = _get_rereview_themes(reviewer) else: # Pending and flagged themes. locks = ThemeLock.objects.no_cache().filter( reviewer=reviewer, theme__addon__status=status) num, themes = _calc_num_themes_checkout(locks) if themes: return themes themes = Persona.objects.no_cache().filter( addon__status=status, themelock=None) # Don't allow self-reviews. if (not settings.ALLOW_SELF_REVIEWS and not acl.action_allowed(request, 'Admin', '%')): if rereview: themes = themes.exclude(theme__addon__addonuser__user=reviewer) else: themes = themes.exclude(addon__addonuser__user=reviewer) # Check out themes by setting lock. themes = list(themes)[:num] expiry = get_updated_expiry() for theme in themes: ThemeLock.objects.create(theme=_rereview_to_theme(rereview, theme), reviewer=reviewer, expiry=expiry) # Empty pool? Go look for some expired locks. if not themes: expired_locks = ThemeLock.objects.filter( expiry__lte=datetime.datetime.now(), theme__addon__status=status)[:rvw.THEME_INITIAL_LOCKS] # Steal expired locks. for lock in expired_locks: lock.reviewer = reviewer lock.expiry = expiry lock.save() if expired_locks: locks = expired_locks if rereview: return (RereviewQueueTheme.objects.no_cache() .filter(theme__themelock__reviewer=reviewer) .exclude(theme__addon__status=amo.STATUS_REJECTED)) # New theme locks may have been created, grab all reviewer's themes again. return [lock.theme for lock in locks] @json_view @personas_reviewer_required def themes_search(request): search_form = forms.ThemeSearchForm(request.GET) if search_form.is_valid(): q = search_form.cleaned_data['q'] rereview = search_form.cleaned_data['queue_type'] == 'rereview' flagged = search_form.cleaned_data['queue_type'] == 'flagged' # ES query on name. themes = Addon.search().filter(type=amo.ADDON_PERSONA) if rereview: themes = themes.filter(has_theme_rereview=True) else: themes = themes.filter(status=(amo.STATUS_REVIEW_PENDING if flagged else amo.STATUS_PENDING), has_theme_rereview=False) themes = themes.query(or_=name_only_query(q))[:100] now = datetime.datetime.now() reviewers = [] for theme in themes: try: themelock = theme.persona.themelock if themelock.expiry > now: reviewers.append(themelock.reviewer.email) else: reviewers.append('') except ObjectDoesNotExist: reviewers.append('') themes = list(themes.values_dict('name', 'slug', 'status')) for theme, reviewer in zip(themes, reviewers): # Collapse single value fields from a list. theme['id'] = theme['id'][0] theme['slug'] = theme['slug'][0] theme['status'] = theme['status'][0] # Dehydrate. theme['reviewer'] = reviewer return {'objects': themes, 'meta': {'total_count': len(themes)}} @personas_reviewer_required def themes_queue(request): # By default, redirect back to the queue after a commit. request.session['theme_redirect_url'] = reverse( 'editors.themes.queue_themes') return _themes_queue(request) @admin_required(theme_reviewers=True) def themes_queue_flagged(request): # By default, redirect back to the queue after a commit. request.session['theme_redirect_url'] = reverse( 'editors.themes.queue_flagged') return _themes_queue(request, flagged=True) @admin_required(theme_reviewers=True) def themes_queue_rereview(request): # By default, redirect back to the queue after a commit. request.session['theme_redirect_url'] = reverse( 'editors.themes.queue_rereview') return _themes_queue(request, rereview=True) def _rereview_to_theme(rereview, theme): """ Follows foreign key of RereviewQueueTheme object to theme if in rereview queue. """ if rereview: return theme.theme return theme def _calc_num_themes_checkout(locks): """ Calculate number of themes to check out based on how many themes user currently has checked out. """ current_num = locks.count() if current_num < rvw.THEME_INITIAL_LOCKS: # Check out themes from the pool if none or not enough checked out. return rvw.THEME_INITIAL_LOCKS - current_num, [] else: # Update the expiry on currently checked-out themes. locks.update(expiry=get_updated_expiry()) return 0, [lock.theme for lock in locks] def _get_rereview_themes(reviewer): """Check out re-uploaded themes.""" locks = (ThemeLock.objects.select_related().no_cache() .filter(reviewer=reviewer, theme__rereviewqueuetheme__isnull=False) .exclude(theme__addon__status=amo.STATUS_REJECTED)) num, updated_locks = _calc_num_themes_checkout(locks) if updated_locks: locks = updated_locks themes = (RereviewQueueTheme.objects.no_cache() .filter(theme__addon__isnull=False, theme__themelock=None) .exclude(theme__addon__status=amo.STATUS_REJECTED)) return num, themes, locks @post_required @personas_reviewer_required def themes_commit(request): ThemeReviewFormset = formset_factory(forms.ThemeReviewForm) formset = ThemeReviewFormset(request.POST) scores = [] for form in formset: try: lock = ThemeLock.objects.filter( theme_id=form.data[form.prefix + '-theme'], reviewer=request.user) except MultiValueDictKeyError: # Address off-by-one error caused by management form. continue if lock and form.is_valid(): scores.append(form.save()) # Success message. points = sum(scores) success = ngettext( # L10n: {0} is the number of reviews. {1} is the points just earned. # L10n: {2} is the total number of points the reviewer has overall. '{0} theme review successfully processed (+{1} points, {2} total).', '{0} theme reviews successfully processed (+{1} points, {2} total).', len(scores)).format(len(scores), points, ReviewerScore.get_total(request.user)) amo.messages.success(request, success) if 'theme_redirect_url' in request.session: return redirect(request.session['theme_redirect_url']) else: return redirect(reverse('editors.themes.queue_themes')) @personas_reviewer_required def release_locks(request): ThemeLock.objects.filter(reviewer=request.user).delete() amo.messages.success( request, _('Your theme locks have successfully been released. ' 'Other reviewers may now review those released themes. ' 'You may have to refresh the page to see the changes reflected in ' 'the table below.')) return redirect(reverse('editors.themes.list')) @personas_reviewer_required def themes_single(request, slug): """ Like a detail page, manually review a single theme if it is pending and isn't locked. """ reviewer = request.user reviewable = True # Don't review an already reviewed theme. theme = get_object_or_404(Persona, addon__slug=slug) if (theme.addon.status != amo.STATUS_PENDING and not theme.rereviewqueuetheme_set.all()): reviewable = False if (not settings.ALLOW_SELF_REVIEWS and not acl.action_allowed(request, 'Admin', '%') and theme.addon.has_author(request.user)): reviewable = False else: # Don't review a locked theme (that's not locked to self). try: lock = theme.themelock if (lock.reviewer.id != reviewer.id and lock.expiry > datetime.datetime.now()): reviewable = False elif (lock.reviewer.id != reviewer.id and lock.expiry < datetime.datetime.now()): # Steal expired lock. lock.reviewer = reviewer lock.expiry = get_updated_expiry() lock.save() else: # Update expiry. lock.expiry = get_updated_expiry() lock.save() except ThemeLock.DoesNotExist: # Create lock if not created. ThemeLock.objects.create(theme=theme, reviewer=reviewer, expiry=get_updated_expiry()) ThemeReviewFormset = formset_factory(forms.ThemeReviewForm) formset = ThemeReviewFormset(initial=[{'theme': theme.id}]) # Since we started the review on the single page, we want to return to the # single page rather than get shot back to the queue. request.session['theme_redirect_url'] = reverse('editors.themes.single', args=[theme.addon.slug]) rereview = (theme.rereviewqueuetheme_set.all()[0] if theme.rereviewqueuetheme_set.exists() else None) return render(request, 'editors/themes/single.html', context( **{'formset': formset, 'theme': rereview if rereview else theme, 'theme_formsets': zip([rereview if rereview else theme], formset), 'theme_reviews': paginate(request, ActivityLog.objects.filter( action=amo.LOG.THEME_REVIEW.id, _arguments__contains=theme.addon.id)), 'actions': get_actions_json(), 'theme_count': 1, 'rereview': rereview, 'reviewable': reviewable, 'reject_reasons': rvw.THEME_REJECT_REASONS, 'action_dict': rvw.REVIEW_ACTIONS, 'tab': ('flagged' if theme.addon.status == amo.STATUS_REVIEW_PENDING else 'rereview' if rereview else 'pending')})) @personas_reviewer_required def themes_logs(request): data = request.GET.copy() if not data.get('start') and not data.get('end'): today = datetime.date.today() data['start'] = datetime.date(today.year, today.month, 1) form = forms.ReviewThemeLogForm(data) theme_logs = ActivityLog.objects.filter(action=amo.LOG.THEME_REVIEW.id) if form.is_valid(): data = form.cleaned_data if data.get('start'): theme_logs = theme_logs.filter(created__gte=data['start']) if data.get('end'): theme_logs = theme_logs.filter(created__lte=data['end']) if data.get('search'): term = data['search'] theme_logs = theme_logs.filter( Q(_details__icontains=term) | Q(user__display_name__icontains=term) | Q(user__username__icontains=term)).distinct() pager = paginate(request, theme_logs, 30) data = context(form=form, pager=pager, ACTION_DICT=rvw.REVIEW_ACTIONS, REJECT_REASONS=rvw.THEME_REJECT_REASONS, tab='themes') return render(request, 'editors/themes/logs.html', data) @admin_required(theme_reviewers=True) def deleted_themes(request): data = request.GET.copy() deleted = Addon.unfiltered.filter(type=amo.ADDON_PERSONA, status=amo.STATUS_DELETED) if not data.get('start') and not data.get('end'): today = datetime.date.today() data['start'] = datetime.date(today.year, today.month, 1) form = forms.DeletedThemeLogForm(data) if form.is_valid(): data = form.cleaned_data if data.get('start'): deleted = deleted.filter(modified__gte=data['start']) if data.get('end'): deleted = deleted.filter(modified__lte=data['end']) if data.get('search'): term = data['search'] deleted = deleted.filter( Q(name__localized_string__icontains=term)) return render(request, 'editors/themes/deleted.html', { 'form': form, 'pager': paginate(request, deleted.order_by('-modified'), 30), 'tab': 'deleted' }) @personas_reviewer_required def themes_history(request, username): if not username: username = request.user.username return render(request, 'editors/themes/history.html', context( **{'theme_reviews': paginate(request, ActivityLog.objects.filter( action=amo.LOG.THEME_REVIEW.id, user__username=username), 20), 'user_history': True, 'username': username, 'reject_reasons': rvw.THEME_REJECT_REASONS, 'action_dict': rvw.REVIEW_ACTIONS})) def get_actions_json(): return json.dumps({ 'moreinfo': rvw.ACTION_MOREINFO, 'flag': rvw.ACTION_FLAG, 'duplicate': rvw.ACTION_DUPLICATE, 'reject': rvw.ACTION_REJECT, 'approve': rvw.ACTION_APPROVE, }) def get_updated_expiry(): return (datetime.datetime.now() + datetime.timedelta(minutes=rvw.THEME_LOCK_EXPIRY))
from django import test from django.conf import settings from django_2gis_maps.widgets import DoubleGisMapsAddressWidget class WidgetTests(test.TestCase): def test_render_returns_xxxxxxx(self): widget = DoubleGisMapsAddressWidget() results = widget.render('name', 'value', attrs={'a1': 1, 'a2': 2}) expected = '<input a1="1" a2="2" name="name" type="text" value="value" />' expected += '<div class="map_canvas_wrapper">' expected += '<div id="map_canvas"></div></div>' self.assertHTMLEqual(expected, results) def test_render_returns_blank_for_value_when_none(self): widget = DoubleGisMapsAddressWidget() results = widget.render('name', None, attrs={'a1': 1, 'a2': 2}) expected = '<input a1="1" a2="2" name="name" type="text" />' expected += '<div class="map_canvas_wrapper">' expected += '<div id="map_canvas"></div></div>' self.assertHTMLEqual(expected, results) def test_maps_js_uses_api_key(self): widget = DoubleGisMapsAddressWidget() django_2gis_maps_js = "https://maps.api.2gis.ru/2.0/loader.js?pkg=full&skin=dark" self.assertEqual(django_2gis_maps_js, widget.Media().js[1])
from cs50 import get_int x = get_int("x: ") y = get_int("y: ") print(f"x + y = {x + y}") print(f"x - y = {x - y}") print(f"x * y = {x * y}") print(f"x / y = {x / y}") print(f"x mod y = {x % y}")
""" Modified state columns in executions table Revision ID: a472b5ad50b7 Revises: e1a50dae1ac9 Create Date: 2021-01-21 13:25:45.815775 """ import sqlalchemy as sa from alembic import op # TODO: import DEFAULT EXECUTION CODE HERE # revision identifiers, used by Alembic. revision = "a472b5ad50b7" down_revision = "e1a50dae1ac9" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( "executions", sa.Column( "state", sa.SmallInteger(), nullable=False, server_default=sa.text(str(0)), ), ) op.add_column("executions", sa.Column("state_message", sa.TEXT(), nullable=True)) # workaround to make migration work in sqlite: with op.batch_alter_table("executions") as batch_op: batch_op.drop_column("finished") # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column("executions", "state_message") op.drop_column("executions", "state") op.add_column( "executions", sa.Column( "finished", sa.BOOLEAN(), server_default=sa.text("false"), autoincrement=False, nullable=False, ), ) # ### end Alembic commands ###
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import json import warnings import pulumi import pulumi.runtime from .. import utilities, tables class Route(pulumi.CustomResource): description: pulumi.Output[str] dest_range: pulumi.Output[str] name: pulumi.Output[str] network: pulumi.Output[str] next_hop_gateway: pulumi.Output[str] next_hop_instance: pulumi.Output[str] next_hop_instance_zone: pulumi.Output[str] """ (Optional when `next_hop_instance` is specified) The zone of the instance specified in `next_hop_instance`. Omit if `next_hop_instance` is specified as a URL. """ next_hop_ip: pulumi.Output[str] next_hop_network: pulumi.Output[str] next_hop_vpn_tunnel: pulumi.Output[str] priority: pulumi.Output[float] project: pulumi.Output[str] """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ self_link: pulumi.Output[str] """ The URI of the created resource. """ tags: pulumi.Output[list] def __init__(__self__, resource_name, opts=None, description=None, dest_range=None, name=None, network=None, next_hop_gateway=None, next_hop_instance=None, next_hop_instance_zone=None, next_hop_ip=None, next_hop_vpn_tunnel=None, priority=None, project=None, tags=None, __name__=None, __opts__=None): """ Represents a Route resource. A route is a rule that specifies how certain packets should be handled by the virtual network. Routes are associated with virtual machines by tag, and the set of routes for a particular virtual machine is called its routing table. For each packet leaving a virtual machine, the system searches that virtual machine's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the next_hop field of the winning route -- either to another virtual machine destination, a virtual machine gateway or a Compute Engine-operated gateway. Packets that do not match any route in the sending virtual machine's routing table will be dropped. A Route resource must have exactly one specification of either nextHopGateway, nextHopInstance, nextHopIp, or nextHopVpnTunnel. To get more information about Route, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routes) * How-to Guides * [Using Routes](https://cloud.google.com/vpc/docs/using-routes) <div class = "oics-button" style="float: right; margin: 0 0 -15px"> <a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_working_dir=route_basic&cloudshell_image=gcr.io%2Fgraphite-cloud-shell-images%2Fterraform%3Alatest&open_in_editor=main.tf&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md" target="_blank"> <img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;"> </a> </div> :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] next_hop_instance_zone: (Optional when `next_hop_instance` is specified) The zone of the instance specified in `next_hop_instance`. Omit if `next_hop_instance` is specified as a URL. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if not resource_name: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(resource_name, str): raise TypeError('Expected resource name to be a string') if opts and not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() __props__['description'] = description if dest_range is None: raise TypeError('Missing required property dest_range') __props__['dest_range'] = dest_range __props__['name'] = name if network is None: raise TypeError('Missing required property network') __props__['network'] = network __props__['next_hop_gateway'] = next_hop_gateway __props__['next_hop_instance'] = next_hop_instance __props__['next_hop_instance_zone'] = next_hop_instance_zone __props__['next_hop_ip'] = next_hop_ip __props__['next_hop_vpn_tunnel'] = next_hop_vpn_tunnel __props__['priority'] = priority __props__['project'] = project __props__['tags'] = tags __props__['next_hop_network'] = None __props__['self_link'] = None super(Route, __self__).__init__( 'gcp:compute/route:Route', resource_name, __props__, opts) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
from __future__ import print_function import os import utils import argparse import point import one_step_approximator from IPython.core.debugger import Pdb MAX = float('inf') def print_output(output,output_file): if output_file == '': fh = None else: fh = open(output_file,'w') # print(len(output),file=fh) for mp in output: print(mp[0],mp[1] ,file=fh) # if fh: fh.close() def main(input_file,output_file): #Pdb().set_trace() k,error_type,points = utils.read_input(input_file) error_fn = utils.get_error_fn(error_type) ssa = one_step_approximator.get_one_step_approximator(error_type, points) n = len(points) if k >= n: output = [(p.x,p.y) for p in points] print_output(output,output_file) return #base case - #size of error - table k x n error_table = [] back_pointers = [] last_error_row = [0]*n this_back_pointers = [-1]*n for j in range(k-1,n): last_error_row[j],this_back_pointers[j] = ssa.get_approximation(j,n-1) # #Pdb().set_trace() back_pointers.append(this_back_pointers) for i in range(k-1): step_no = i+2 this_error_row = [0]*n this_back_pointers = [-1]*n #at step i for j in range(k-step_no,n): #num_points_on_right = n-j if (n-j) == step_no: this_error_row[j] = 0 this_back_pointers[j] = (points[j].y,j+1) break # current_min = MAX current_min_index = -1 current_ssay = -1 for l in range(j+1,n-i): this_ssa_e,this_ssa_y = ssa.get_approximation(j,l-1) this_score = ssa.combine(last_error_row[l], this_ssa_e) if this_score < current_min: current_min = this_score current_min_index = l current_ssay = this_ssa_y # # this_error_row[j] = current_min this_back_pointers[j] = (current_ssay, current_min_index) if step_no == k: break # last_error_row = this_error_row back_pointers.append(this_back_pointers) output = [] current_x_ind = 0 current_back_pointer = back_pointers[-1][current_x_ind] for i in range(k-2,-1,-1): output.append((points[current_x_ind].x, current_back_pointer[0])) current_x_ind = current_back_pointer[1] current_back_pointer = back_pointers[i][current_x_ind] # output.append((points[current_x_ind].x, current_back_pointer)) print_output(output,output_file) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file',help='input_file_name',type=str,default='input.txt') parser.add_argument('--output_file',help='output written in output file',default='') args = parser.parse_args() main(args.input_file, args.output_file)
""" This code has been a variation of geomet: https://github.com/geomet/geomet It has been modified under the Apache 2.0 license to fit the needs of the Esri JSON specificaction as defined here: https://developers.arcgis.com/documentation/common-data-types/geometry-objects.htm """ import binascii import struct from ._utils import block_splitter from ._utils import take from ._utils import as_bin_str from ._utils import flatten_multi_dim from itertools import chain #: '\x00': The first byte of any WKB string. Indicates big endian byte #: ordering for the data. BIG_ENDIAN = b'\x00' #: '\x01': The first byte of any WKB string. Indicates little endian byte #: ordering for the data. LITTLE_ENDIAN = b'\x01' #: High byte in a 4-byte geometry type field to indicate that a 4-byte SRID #: field follows. SRID_FLAG = b'\x20' #: Mapping of GeoJSON geometry types to the "2D" 4-byte binary string #: representation for WKB. "2D" indicates that the geometry is 2-dimensional, #: X and Y components. #: NOTE: Byte ordering is big endian. WKB_2D = { 'Point': b'\x00\x00\x00\x01', 'LineString': b'\x00\x00\x00\x02', 'Polygon': b'\x00\x00\x00\x03', 'MultiPoint': b'\x00\x00\x00\x04', 'MultiLineString': b'\x00\x00\x00\x05', 'MultiPolygon': b'\x00\x00\x00\x06', 'GeometryCollection': b'\x00\x00\x00\x07', } #: Mapping of GeoJSON geometry types to the "Z" 4-byte binary string #: representation for WKB. "Z" indicates that the geometry is 3-dimensional, #: with X, Y, and Z components. #: NOTE: Byte ordering is big endian. WKB_Z = { 'Point': b'\x00\x00\x03\xe9', 'LineString': b'\x00\x00\x03\xea', 'Polygon': b'\x00\x00\x03\xeb', 'MultiPoint': b'\x00\x00\x03\xec', 'MultiLineString': b'\x00\x00\x03\xed', 'MultiPolygon': b'\x00\x00\x03\xee', 'GeometryCollection': b'\x00\x00\x03\xef', } #: Mapping of GeoJSON geometry types to the "M" 4-byte binary string #: representation for WKB. "M" indicates that the geometry is 2-dimensional, #: with X, Y, and M ("Measure") components. #: NOTE: Byte ordering is big endian. WKB_M = { 'Point': b'\x00\x00\x07\xd1', 'LineString': b'\x00\x00\x07\xd2', 'Polygon': b'\x00\x00\x07\xd3', 'MultiPoint': b'\x00\x00\x07\xd4', 'MultiLineString': b'\x00\x00\x07\xd5', 'MultiPolygon': b'\x00\x00\x07\xd6', 'GeometryCollection': b'\x00\x00\x07\xd7', } #: Mapping of GeoJSON geometry types to the "ZM" 4-byte binary string #: representation for WKB. "ZM" indicates that the geometry is 4-dimensional, #: with X, Y, Z, and M ("Measure") components. #: NOTE: Byte ordering is big endian. WKB_ZM = { 'Point': b'\x00\x00\x0b\xb9', 'LineString': b'\x00\x00\x0b\xba', 'Polygon': b'\x00\x00\x0b\xbb', 'MultiPoint': b'\x00\x00\x0b\xbc', 'MultiLineString': b'\x00\x00\x0b\xbd', 'MultiPolygon': b'\x00\x00\x0b\xbe', 'GeometryCollection': b'\x00\x00\x0b\xbf', } #: Mapping of dimension types to maps of GeoJSON geometry type -> 4-byte binary #: string representation for WKB. _WKB = { '2D': WKB_2D, 'Z': WKB_Z, 'M': WKB_M, 'ZM': WKB_ZM, } #: Mapping from binary geometry type (as a 4-byte binary string) to GeoJSON #: geometry type. #: NOTE: Byte ordering is big endian. _BINARY_TO_GEOM_TYPE = dict( chain(*((reversed(x) for x in wkb_map.items()) for wkb_map in _WKB.values())) ) _INT_TO_DIM_LABEL = {2: '2D', 3: 'Z', 4: 'ZM'} def _get_geom_type(type_bytes): """Get the GeoJSON geometry type label from a WKB type byte string. :param type_bytes: 4 byte string in big endian byte order containing a WKB type number. It may also contain a "has SRID" flag in the high byte (the first type, since this is big endian byte order), indicated as 0x20. If the SRID flag is not set, the high byte will always be null (0x00). :returns: 3-tuple ofGeoJSON geometry type label, the bytes resprenting the geometry type, and a separate "has SRID" flag. If the input `type_bytes` contains an SRID flag, it will be removed. >>> # Z Point, with SRID flag >>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == ( ... 'Point', b'\\x00\\x00\\x03\\xe9', True) True >>> # 2D MultiLineString, without SRID flag >>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == ( ... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False) True """ # slice off the high byte, which may contain the SRID flag high_byte = type_bytes[0] high_byte = bytes([high_byte]) has_srid = high_byte == b'\x20' if has_srid: # replace the high byte with a null byte type_bytes = as_bin_str(b'\x00' + type_bytes[1:]) else: type_bytes = as_bin_str(type_bytes) # look up the geometry type geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes) return geom_type, type_bytes, has_srid def dump(obj, dest_file): """ Dump GeoJSON-like `dict` to WKB and write it to the `dest_file`. :param dict obj: A GeoJSON-like dictionary. It must at least the keys 'type' and 'coordinates'. :param dest_file: Open and writable file-like object. """ dest_file.write(dumps(obj)) def load(source_file, wkid=4326): """ Load a EsriJSON `dict` object from a ``source_file`` containing WKB (as a byte string). :param source_file: Open and readable file-like object. :returns: A GeoJSON `dict` representing the geometry read from the file. """ return loads(source_file.read(), wkid=wkid) def dumps(obj, big_endian=False): """ Dump a EsriJSON-like `dict` to a WKB string. :param dict obj: GeoJson-like `dict` object. :param bool big_endian: Defaults to `False`. If `True`, data values in the generated WKB will be represented using big endian byte order. Else, little endian. :returns: A WKB binary string representing of the ``obj``. """ def lu_geom(ks): if 'point' in ks: return "Point" elif 'paths' in ks: return "MultiLineString" elif 'x' in ks: return "Point" elif 'rings' in ks: return "MultiPolygon" elif 'points' in ks: return "MultiPoint" geom_type = lu_geom(obj.keys()) meta = obj.get('meta', {}) exporter = _dumps_registry.get(geom_type) if exporter is None: _unsupported_geom_type(geom_type) return exporter(obj, big_endian, meta) def loads(string, wkid=4326): """ Construct a EsriJSON `dict` from WKB (`string`). :param str string: WKB string. :param int wkid: The srid of the coordinate system. The default is 4326. """ string = iter(string) endianness = as_bin_str(take(1, string)) if endianness == BIG_ENDIAN: big_endian = True elif endianness == LITTLE_ENDIAN: big_endian = False else: raise ValueError("Invalid endian byte: '0x%s'. Expected 0x00 or 0x01" % binascii.hexlify(endianness.encode()).decode()) endian_token = '>' if big_endian else '<' # type_bytes = string[1:5] type_bytes = as_bin_str(take(4, string)) if not big_endian: # To identify the type, order the type bytes in big endian: type_bytes = type_bytes[::-1] geom_type, type_bytes, has_srid = _get_geom_type(type_bytes) srid = None if has_srid: srid_field = as_bin_str(take(4, string)) [srid] = struct.unpack('%si' % endian_token, srid_field) # data_bytes = string[5:] # FIXME: This won't work for GeometryCollections data_bytes = string importer = _loads_registry_esri.get(geom_type) if importer is None: _unsupported_geom_type(geom_type) data_bytes = iter(data_bytes) result = importer(big_endian, type_bytes, data_bytes, wkid) if has_srid: # As mentioned in the docstring above, includeEsriJSONpproaches to # indicating the SRID. result['meta'] = {'srid': int(srid)} result['crs'] = { 'type': 'name', 'properties': {'name': 'EPSG%s' % srid}, } return result def _unsupported_geom_type(geom_type): raise ValueError("Unsupported geometry type '%s'" % geom_type) # TODO: dont default meta to none def _header_bytefmt_byteorder(geom_type, num_dims, big_endian, meta=None): """ Utility function to get the WKB header (endian byte + type header), byte format string, and byte order string. """ dim = _INT_TO_DIM_LABEL.get(num_dims) if dim is None: pass # TODO: raise type_byte_str = _WKB[dim][geom_type] srid = meta.get('srid') if srid is not None: # Add the srid flag type_byte_str = SRID_FLAG + type_byte_str[1:] if big_endian: header = BIG_ENDIAN byte_fmt = b'>' byte_order = '>' else: header = LITTLE_ENDIAN byte_fmt = b'<' byte_order = '<' # reverse the byte ordering for little endian type_byte_str = type_byte_str[::-1] header += type_byte_str if srid is not None: srid = int(srid) if big_endian: srid_header = struct.pack('>i', srid) else: srid_header = struct.pack('<i', srid) header += srid_header byte_fmt += b'd' * num_dims return header, byte_fmt, byte_order def _dump_point(obj, big_endian, meta): """ Dump a EsriJSON-like `dict` to a point WKB string. :param dict obj: EsriJSON-like `dict` object. :param bool big_endian: If `True`, data values in the generated WKB will be represented using big endian byte order. Else, little endian. :param dict meta: Metadata associated with the GeoJSON object. Currently supported metadata: - srid: Used to support EWKT/EWKB. For example, ``meta`` equal to ``{'srid': '4326'}`` indicates that the geometry is defined using Extended WKT/WKB and that it bears a Spatial Reference System Identifier of 4326. This ID will be encoded into the resulting binary. Any other meta data objects will simply be ignored by this function. :returns: A WKB binary string representing of the Point ``obj``. """ coords = [obj['x'], obj['y']] num_dims = len(coords) wkb_string, byte_fmt, _ = _header_bytefmt_byteorder( 'Point', num_dims, big_endian, meta ) wkb_string += struct.pack(byte_fmt, *coords) return wkb_string def _dump_linestring(obj, big_endian, meta): """ Dump a GeoJSON-like `dict` to a linestring WKB string. Input parameters and output are similar to :func:`_dump_point`. """ coords = obj['coordinates'] vertex = coords[0] # Infer the number of dimensions from the first vertex num_dims = len(vertex) wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'LineString', num_dims, big_endian, meta ) # append number of vertices in linestring wkb_string += struct.pack('%sl' % byte_order, len(coords)) for vertex in coords: wkb_string += struct.pack(byte_fmt, *vertex) return wkb_string def _dump_polygon(obj, big_endian, meta): """ Dump a GeoJSON-like `dict` to a polygon WKB string. Input parameters and output are similar to :funct:`_dump_point`. """ coords = obj['coordinates'] vertex = coords[0][0] # Infer the number of dimensions from the first vertex num_dims = len(vertex) wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'Polygon', num_dims, big_endian, meta ) # number of rings: wkb_string += struct.pack('%sl' % byte_order, len(coords)) for ring in coords: # number of verts in this ring: wkb_string += struct.pack('%sl' % byte_order, len(ring)) for vertex in ring: wkb_string += struct.pack(byte_fmt, *vertex) return wkb_string def _dump_multipoint(obj, big_endian, meta): """ Dump a GeoJSON-like `dict` to a multipoint WKB string. Input parameters and output are similar to :funct:`_dump_point`. """ coords = obj['points'] vertex = coords[0] num_dims = len(vertex) wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'MultiPoint', num_dims, big_endian, meta ) point_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Point'] if big_endian: point_type = BIG_ENDIAN + point_type else: point_type = LITTLE_ENDIAN + point_type[::-1] wkb_string += struct.pack('%sl' % byte_order, len(coords)) for vertex in coords: # POINT type strings wkb_string += point_type wkb_string += struct.pack(byte_fmt, *vertex) return wkb_string def _dump_multilinestring(obj, big_endian, meta): """ Dump a GeoJSON-like `dict` to a multilinestring WKB string. Input parameters and output are similar to :funct:`_dump_point`. """ coords = obj['paths'] vertex = coords[0][0] num_dims = len(vertex) wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'MultiLineString', num_dims, big_endian, meta ) ls_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['LineString'] if big_endian: ls_type = BIG_ENDIAN + ls_type else: ls_type = LITTLE_ENDIAN + ls_type[::-1] # append the number of linestrings wkb_string += struct.pack('%sl' % byte_order, len(coords)) for linestring in coords: wkb_string += ls_type # append the number of vertices in each linestring wkb_string += struct.pack('%sl' % byte_order, len(linestring)) for vertex in linestring: wkb_string += struct.pack(byte_fmt, *vertex) return wkb_string def _dump_multipolygon(obj, big_endian, meta): """ Dump a GeoJSON-like `dict` to a multipolygon WKB string. Input parameters and output are similar to :funct:`_dump_point`. """ coords = [obj['rings']] vertex = coords[0][0][0] num_dims = len(vertex) wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'MultiPolygon', num_dims, big_endian, meta ) poly_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Polygon'] if big_endian: poly_type = BIG_ENDIAN + poly_type else: poly_type = LITTLE_ENDIAN + poly_type[::-1] # apped the number of polygons wkb_string += struct.pack('%sl' % byte_order, len(coords)) for polygon in coords: # append polygon header wkb_string += poly_type # append the number of rings in this polygon wkb_string += struct.pack('%sl' % byte_order, len(polygon)) for ring in polygon: # append the number of vertices in this ring wkb_string += struct.pack('%sl' % byte_order, len(ring)) for vertex in ring: wkb_string += struct.pack(byte_fmt, *vertex) return wkb_string def _dump_geometrycollection(obj, big_endian, meta): # TODO: handle empty collections geoms = obj['geometries'] # determine the dimensionality (2d, 3d, 4d) of the collection # by sampling the first geometry first_geom = geoms[0] rest = geoms[1:] first_wkb = dumps(first_geom, big_endian=big_endian) first_type = first_wkb[1:5] if not big_endian: first_type = first_type[::-1] if first_type in WKB_2D.values(): num_dims = 2 elif first_type in WKB_Z.values(): num_dims = 3 elif first_type in WKB_ZM.values(): num_dims = 4 wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder( 'GeometryCollection', num_dims, big_endian, meta ) # append the number of geometries wkb_string += struct.pack('%sl' % byte_order, len(geoms)) wkb_string += first_wkb for geom in rest: wkb_string += dumps(geom, big_endian=big_endian) return wkb_string def _load_point_esri(big_endian, type_bytes, data_bytes, wkid): """ Convert byte data for a Point to a EsriJSON `dict`. :param bool big_endian: If `True`, interpret the ``data_bytes`` in big endian order, else little endian. :param str type_bytes: 4-byte integer (as a binary string) indicating the geometry type (Point) and the dimensions (2D, Z, M or ZM). For consistency, these bytes are expected to always be in big endian order, regardless of the value of ``big_endian``. :param str data_bytes: Coordinate data in a binary string. :returns: EsriJSON `dict` representing the Point geometry. """ endian_token = '>' if big_endian else '<' if type_bytes == WKB_2D['Point']: coords = struct.unpack('%sdd' % endian_token, as_bin_str(take(16, data_bytes))) elif type_bytes == WKB_Z['Point']: coords = struct.unpack('%sddd' % endian_token, as_bin_str(take(24, data_bytes))) elif type_bytes == WKB_M['Point']: # NOTE: The use of XYM types geometries is quite rare. In the interest # of removing ambiguity, we will treat all XYM geometries as XYZM when # generate the GeoJSON. A default Z value of `0.0` will be given in # this case. coords = list(struct.unpack('%sddd' % endian_token, as_bin_str(take(24, data_bytes)))) coords.insert(2, 0.0) elif type_bytes == WKB_ZM['Point']: coords = struct.unpack('%sdddd' % endian_token, as_bin_str(take(32, data_bytes))) return { 'x': coords[0], 'y': coords[1], "spatialReference" : {'wkid' : wkid}} def _load_linestring_esri(big_endian, type_bytes, data_bytes, wkid): """converts wkb to esri json""" endian_token = '>' if big_endian else '<' is_m = False if type_bytes in WKB_2D.values(): num_dims = 2 elif type_bytes in WKB_Z.values(): num_dims = 3 elif type_bytes in WKB_M.values(): num_dims = 3 is_m = True elif type_bytes in WKB_ZM.values(): num_dims = 4 coords = [] [num_verts] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) while True: vert_wkb = as_bin_str(take(8 * num_dims, data_bytes)) fmt = '%s' + 'd' * num_dims vert = list(struct.unpack(fmt % endian_token, vert_wkb)) if is_m: vert.insert(2, 0.0) coords.append(vert) if len(coords) == num_verts: break return dict(paths=[list(coords)], spatialReference={'wkid' : wkid}) def _load_polygon_esri(big_endian, type_bytes, data_bytes, wkid): """converts wkb to esri json""" endian_token = '>' if big_endian else '<' data_bytes = iter(data_bytes) is_m = False if type_bytes in WKB_2D.values(): num_dims = 2 elif type_bytes in WKB_Z.values(): num_dims = 3 elif type_bytes in WKB_M.values(): num_dims = 3 is_m = True elif type_bytes in WKB_ZM.values(): num_dims = 4 coords = [] [num_rings] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) while True: ring = [] [num_verts] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) verts_wkb = as_bin_str(take(8 * num_verts * num_dims, data_bytes)) verts = block_splitter(verts_wkb, 8) verts = (b''.join(bytes([y]) for y in x) for x in verts) for vert_wkb in block_splitter(verts, num_dims): values = [struct.unpack('%sd' % endian_token, x)[0] for x in vert_wkb] if is_m: values.insert(2, 0.0) ring.append(values) coords.append(ring) if len(coords) == num_rings: break return dict(rings=coords, spatialReference={'wkid' : wkid}) def _load_multipoint_esri(big_endian, type_bytes, data_bytes, wkid): """converts wkb to esri json""" endian_token = '>' if big_endian else '<' data_bytes = iter(data_bytes) is_m = False if type_bytes in WKB_2D.values(): num_dims = 2 elif type_bytes in WKB_Z.values(): num_dims = 3 elif type_bytes in WKB_M.values(): num_dims = 3 is_m = True elif type_bytes in WKB_ZM.values(): num_dims = 4 if is_m: dim = 'M' else: dim = _INT_TO_DIM_LABEL[num_dims] coords = [] [num_points] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) while True: point_endian = as_bin_str(take(1, data_bytes)) point_type = as_bin_str(take(4, data_bytes)) values = struct.unpack('%s%s' % (endian_token, 'd' * num_dims), as_bin_str(take(8 * num_dims, data_bytes))) values = list(values) if is_m: values.insert(2, 0.0) if big_endian: assert point_endian == BIG_ENDIAN assert point_type == _WKB[dim]['Point'] else: assert point_endian == LITTLE_ENDIAN assert point_type[::-1] == _WKB[dim]['Point'] coords.append(list(values)) if len(coords) == num_points: break return dict(points=coords, spatialReference={'wkid' : wkid}) def _load_multilinestring_esri(big_endian, type_bytes, data_bytes, wkid): """converts wkb to esri json""" endian_token = '>' if big_endian else '<' data_bytes = iter(data_bytes) is_m = False if type_bytes in WKB_2D.values(): num_dims = 2 elif type_bytes in WKB_Z.values(): num_dims = 3 elif type_bytes in WKB_M.values(): num_dims = 3 is_m = True elif type_bytes in WKB_ZM.values(): num_dims = 4 if is_m: dim = 'M' else: dim = _INT_TO_DIM_LABEL[num_dims] [num_ls] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) coords = [] while True: ls_endian = as_bin_str(take(1, data_bytes)) ls_type = as_bin_str(take(4, data_bytes)) if big_endian: assert ls_endian == BIG_ENDIAN assert ls_type == _WKB[dim]['LineString'] else: assert ls_endian == LITTLE_ENDIAN assert ls_type[::-1] == _WKB[dim]['LineString'] [num_verts] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) num_values = num_dims * num_verts values = struct.unpack(endian_token + 'd' * num_values, as_bin_str(take(8 * num_values, data_bytes))) values = list(block_splitter(values, num_dims)) if is_m: for v in values: v.insert(2, 0.0) coords.append(values) if len(coords) == num_ls: break return dict(paths=coords, spatialReference={'wkid' : wkid}) def _load_multipolygon_esri(big_endian, type_bytes, data_bytes, wkid): """converts wkb to esri json""" endian_token = '>' if big_endian else '<' is_m = False if type_bytes in WKB_2D.values(): num_dims = 2 elif type_bytes in WKB_Z.values(): num_dims = 3 elif type_bytes in WKB_M.values(): num_dims = 3 is_m = True elif type_bytes in WKB_ZM.values(): num_dims = 4 if is_m: dim = 'M' else: dim = _INT_TO_DIM_LABEL[num_dims] [num_polys] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) coords = [] while True: polygon = [] poly_endian = as_bin_str(take(1, data_bytes)) poly_type = as_bin_str(take(4, data_bytes)) if big_endian: assert poly_endian == BIG_ENDIAN assert poly_type == _WKB[dim]['Polygon'] else: assert poly_endian == LITTLE_ENDIAN assert poly_type[::-1] == _WKB[dim]['Polygon'] [num_rings] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) for _ in range(num_rings): ring = [] [num_verts] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) for _ in range(num_verts): vert_wkb = as_bin_str(take(8 * num_dims, data_bytes)) fmt = '%s' + 'd' * num_dims vert = list(struct.unpack(fmt % endian_token, vert_wkb)) if is_m: vert.insert(2, 0.0) ring.append(vert) polygon.append(ring) coords.append(polygon) if len(coords) == num_polys: break return dict(rings=[coord[0] for coord in coords], spatialReference={'wkid' : wkid}) def _check_dimensionality(geom, num_dims): def first_geom(gc): for g in gc['geometries']: if not g['type'] == 'GeometryCollection': return g first_vert = { 'Point': lambda x: x['coordinates'], 'LineString': lambda x: x['coordinates'][0], 'Polygon': lambda x: x['coordinates'][0][0], 'MultiLineString': lambda x: x['coordinates'][0][0], 'MultiPolygon': lambda x: x['coordinates'][0][0][0], 'GeometryCollection': first_geom, } if not len(first_vert[geom['type']](geom)) == num_dims: error = 'Cannot mix dimensionality in a geometry' raise Exception(error) def _load_geometrycollection(big_endian, type_bytes, data_bytes): endian_token = '>' if big_endian else '<' is_m = False if type_bytes in WKB_2D.values(): num_dims = 2 elif type_bytes in WKB_Z.values(): num_dims = 3 elif type_bytes in WKB_M.values(): num_dims = 3 is_m = True elif type_bytes in WKB_ZM.values(): num_dims = 4 geometries = [] [num_geoms] = struct.unpack('%sl' % endian_token, as_bin_str(take(4, data_bytes))) while True: geometry = loads(data_bytes) if is_m: _check_dimensionality(geometry, 4) else: _check_dimensionality(geometry, num_dims) # TODO(LB): Add type assertions for the geometry; collections should # not mix 2d, 3d, 4d, etc. geometries.append(geometry) if len(geometries) == num_geoms: break return dict(type='GeometryCollection', geometries=geometries) _dumps_registry = { 'Point': _dump_point, 'LineString': _dump_linestring, 'Polygon': _dump_polygon, 'MultiPoint': _dump_multipoint, 'MultiLineString': _dump_multilinestring, 'MultiPolygon': _dump_multipolygon, 'GeometryCollection': _dump_geometrycollection, } _loads_registry_esri = { 'Point': _load_point_esri, 'LineString': _load_linestring_esri, 'Polygon': _load_polygon_esri, 'MultiPoint': _load_multipoint_esri, 'MultiLineString': _load_multilinestring_esri, 'MultiPolygon': _load_multipolygon_esri }
#!/usr/bin/env python # -*- coding: utf-8 -*- import simplejson as json from alipay.aop.api.constant.ParamConstants import * class ContentPrizeInfoModel(object): def __init__(self): self._prize_id = None self._prize_logo = None self._prize_name = None @property def prize_id(self): return self._prize_id @prize_id.setter def prize_id(self, value): self._prize_id = value @property def prize_logo(self): return self._prize_logo @prize_logo.setter def prize_logo(self, value): self._prize_logo = value @property def prize_name(self): return self._prize_name @prize_name.setter def prize_name(self, value): self._prize_name = value def to_alipay_dict(self): params = dict() if self.prize_id: if hasattr(self.prize_id, 'to_alipay_dict'): params['prize_id'] = self.prize_id.to_alipay_dict() else: params['prize_id'] = self.prize_id if self.prize_logo: if hasattr(self.prize_logo, 'to_alipay_dict'): params['prize_logo'] = self.prize_logo.to_alipay_dict() else: params['prize_logo'] = self.prize_logo if self.prize_name: if hasattr(self.prize_name, 'to_alipay_dict'): params['prize_name'] = self.prize_name.to_alipay_dict() else: params['prize_name'] = self.prize_name return params @staticmethod def from_alipay_dict(d): if not d: return None o = ContentPrizeInfoModel() if 'prize_id' in d: o.prize_id = d['prize_id'] if 'prize_logo' in d: o.prize_logo = d['prize_logo'] if 'prize_name' in d: o.prize_name = d['prize_name'] return o
"""Common IO api utilities""" from __future__ import annotations import bz2 import codecs from collections import abc import dataclasses import gzip from io import BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOWrapper import mmap import os from typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, Union, cast from urllib.parse import ( urljoin, urlparse as parse_url, uses_netloc, uses_params, uses_relative, ) import warnings import zipfile from pandas._typing import ( Buffer, CompressionDict, CompressionOptions, FileOrBuffer, FilePathOrBuffer, StorageOptions, ) from pandas.compat import get_lzma_file, import_lzma from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_file_like lzma = import_lzma() _VALID_URLS = set(uses_relative + uses_netloc + uses_params) _VALID_URLS.discard("") @dataclasses.dataclass class IOArgs: """ Return value of io/common.py:_get_filepath_or_buffer. Note (copy&past from io/parsers): filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] though mypy handling of conditional imports is difficult. See https://github.com/python/mypy/issues/1297 """ filepath_or_buffer: FileOrBuffer encoding: str mode: str compression: CompressionDict should_close: bool = False @dataclasses.dataclass class IOHandles: """ Return value of io/common.py:get_handle Can be used as a context manager. This is used to easily close created buffers and to handle corner cases when TextIOWrapper is inserted. handle: The file handle to be used. created_handles: All file handles that are created by get_handle is_wrapped: Whether a TextIOWrapper needs to be detached. """ handle: Buffer compression: CompressionDict created_handles: List[Buffer] = dataclasses.field(default_factory=list) is_wrapped: bool = False is_mmap: bool = False def close(self) -> None: """ Close all created buffers. Note: If a TextIOWrapper was inserted, it is flushed and detached to avoid closing the potentially user-created buffer. """ if self.is_wrapped: assert isinstance(self.handle, TextIOWrapper) self.handle.flush() self.handle.detach() self.created_handles.remove(self.handle) try: for handle in self.created_handles: handle.close() except (OSError, ValueError): pass self.created_handles = [] self.is_wrapped = False def __enter__(self) -> IOHandles: return self def __exit__(self, *args: Any) -> None: self.close() def is_url(url) -> bool: """ Check to see if a URL has a valid protocol. Parameters ---------- url : str or unicode Returns ------- isurl : bool If `url` has a valid protocol return True otherwise False. """ if not isinstance(url, str): return False return parse_url(url).scheme in _VALID_URLS def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]: """ Return the argument with an initial component of ~ or ~user replaced by that user's home directory. Parameters ---------- filepath_or_buffer : object to be converted if possible Returns ------- expanded_filepath_or_buffer : an expanded filepath or the input if not expandable """ if isinstance(filepath_or_buffer, str): return os.path.expanduser(filepath_or_buffer) return filepath_or_buffer def validate_header_arg(header) -> None: if isinstance(header, bool): raise TypeError( "Passing a bool to header is invalid. Use header=None for no header or " "header=int or list-like of ints to specify " "the row(s) making up the column names" ) def stringify_path( filepath_or_buffer: FilePathOrBuffer[AnyStr], convert_file_like: bool = False, ) -> FileOrBuffer[AnyStr]: """ Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ if not convert_file_like and is_file_like(filepath_or_buffer): # GH 38125: some fsspec objects implement os.PathLike but have already opened a # file. This prevents opening the file a second time. infer_compression calls # this function with convert_file_like=True to infer the compression. return cast(FileOrBuffer[AnyStr], filepath_or_buffer) if isinstance(filepath_or_buffer, os.PathLike): filepath_or_buffer = filepath_or_buffer.__fspath__() return _expand_user(filepath_or_buffer) def urlopen(*args, **kwargs): """ Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of the stdlib. """ import urllib.request return urllib.request.urlopen(*args, **kwargs) def is_fsspec_url(url: FilePathOrBuffer) -> bool: """ Returns true if the given URL looks like something fsspec can handle """ return ( isinstance(url, str) and "://" in url and not url.startswith(("http://", "https://")) ) def _get_filepath_or_buffer( filepath_or_buffer: FilePathOrBuffer, encoding: str = "utf-8", compression: CompressionOptions = None, mode: str = "r", storage_options: StorageOptions = None, ) -> IOArgs: """ If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. Parameters ---------- filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), or buffer compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional encoding : the encoding to use to decode bytes, default is 'utf-8' mode : str, optional storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc., if using a URL that will be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error will be raised if providing this argument with a local path or a file-like buffer. See the fsspec and backend storage implementation docs for the set of allowed keys and values .. versionadded:: 1.2.0 ..versionchange:: 1.2.0 Returns the dataclass IOArgs. """ filepath_or_buffer = stringify_path(filepath_or_buffer) # handle compression dict compression_method, compression = get_compression_method(compression) compression_method = infer_compression(filepath_or_buffer, compression_method) # GH21227 internal compression is not used for non-binary handles. if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode: warnings.warn( "compression has no effect when passing a non-binary object as input.", RuntimeWarning, stacklevel=2, ) compression_method = None compression = dict(compression, method=compression_method) # uniform encoding names if encoding is not None: encoding = encoding.replace("_", "-").lower() # bz2 and xz do not write the byte order mark for utf-16 and utf-32 # print a warning when writing such files if ( "w" in mode and compression_method in ["bz2", "xz"] and encoding in ["utf-16", "utf-32"] ): warnings.warn( f"{compression} will not write the byte order mark for {encoding}", UnicodeWarning, ) # Use binary mode when converting path-like objects to file-like objects (fsspec) # except when text mode is explicitly requested. The original mode is returned if # fsspec is not used. fsspec_mode = mode if "t" not in fsspec_mode and "b" not in fsspec_mode: fsspec_mode += "b" if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): # TODO: fsspec can also handle HTTP via requests, but leaving this # unchanged. using fsspec appears to break the ability to infer if the # server responded with gzipped data storage_options = storage_options or {} # waiting until now for importing to match intended lazy logic of # urlopen function defined elsewhere in this module import urllib.request # assuming storage_options is to be interpreted as headers req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options) with urlopen(req_info) as req: content_encoding = req.headers.get("Content-Encoding", None) if content_encoding == "gzip": # Override compression based on Content-Encoding header compression = {"method": "gzip"} reader = BytesIO(req.read()) return IOArgs( filepath_or_buffer=reader, encoding=encoding, compression=compression, should_close=True, mode=fsspec_mode, ) if is_fsspec_url(filepath_or_buffer): assert isinstance( filepath_or_buffer, str ) # just to appease mypy for this branch # two special-case s3-like protocols; these have special meaning in Hadoop, # but are equivalent to just "s3" from fsspec's point of view # cc #11071 if filepath_or_buffer.startswith("s3a://"): filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://") if filepath_or_buffer.startswith("s3n://"): filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://") fsspec = import_optional_dependency("fsspec") # If botocore is installed we fallback to reading with anon=True # to allow reads from public buckets err_types_to_retry_with_anon: List[Any] = [] try: import_optional_dependency("botocore") from botocore.exceptions import ClientError, NoCredentialsError err_types_to_retry_with_anon = [ ClientError, NoCredentialsError, PermissionError, ] except ImportError: pass try: file_obj = fsspec.open( filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) ).open() # GH 34626 Reads from Public Buckets without Credentials needs anon=True except tuple(err_types_to_retry_with_anon): if storage_options is None: storage_options = {"anon": True} else: # don't mutate user input. storage_options = dict(storage_options) storage_options["anon"] = True file_obj = fsspec.open( filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) ).open() return IOArgs( filepath_or_buffer=file_obj, encoding=encoding, compression=compression, should_close=True, mode=fsspec_mode, ) elif storage_options: raise ValueError( "storage_options passed with file object or non-fsspec file path" ) if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): return IOArgs( filepath_or_buffer=_expand_user(filepath_or_buffer), encoding=encoding, compression=compression, should_close=False, mode=mode, ) if not is_file_like(filepath_or_buffer): msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}" raise ValueError(msg) return IOArgs( filepath_or_buffer=filepath_or_buffer, encoding=encoding, compression=compression, should_close=False, mode=mode, ) def file_path_to_url(path: str) -> str: """ converts an absolute native path to a FILE URL. Parameters ---------- path : a path in native format Returns ------- a valid FILE URL """ # lazify expensive import (~30ms) from urllib.request import pathname2url return urljoin("file:", pathname2url(path)) _compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"} def get_compression_method( compression: CompressionOptions, ) -> Tuple[Optional[str], CompressionDict]: """ Simplifies a compression argument to a compression method string and a mapping containing additional arguments. Parameters ---------- compression : str or mapping If string, specifies the compression method. If mapping, value at key 'method' specifies compression method. Returns ------- tuple of ({compression method}, Optional[str] {compression arguments}, Dict[str, Any]) Raises ------ ValueError on mapping missing 'method' key """ compression_method: Optional[str] if isinstance(compression, Mapping): compression_args = dict(compression) try: compression_method = compression_args.pop("method") except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: compression_args = {} compression_method = compression return compression_method, compression_args def infer_compression( filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] ) -> Optional[str]: """ Get the compression method for filepath_or_buffer. If compression='infer', the inferred compression method is returned. Otherwise, the input compression method is returned unchanged, unless it's invalid, in which case an error is raised. Parameters ---------- filepath_or_buffer : str or file handle File path or object. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). Returns ------- string or None Raises ------ ValueError on invalid compression specified. """ if compression is None: return None # Infer compression if compression == "infer": # Convert all path types (e.g. pathlib.Path) to strings filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True) if not isinstance(filepath_or_buffer, str): # Cannot infer compression of a buffer, assume no compression return None # Infer compression from the filename/URL extension for compression, extension in _compression_to_extension.items(): if filepath_or_buffer.lower().endswith(extension): return compression return None # Compression has been specified. Check that it's valid if compression in _compression_to_extension: return compression # https://github.com/python/mypy/issues/5492 # Unsupported operand types for + ("List[Optional[str]]" and "List[str]") valid = ["infer", None] + sorted( _compression_to_extension ) # type: ignore[operator] msg = ( f"Unrecognized compression type: {compression}\n" f"Valid compression types are {valid}" ) raise ValueError(msg) def get_handle( path_or_buf: FilePathOrBuffer, mode: str, encoding: Optional[str] = None, compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors: Optional[str] = None, storage_options: StorageOptions = None, ) -> IOHandles: """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. compression : str or dict, default None If string, specifies compression mode. If dict, value at key 'method' specifies compression mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). If dict and compression mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above, other entries passed as additional compression options. .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip' and 'bz2' as well as 'zip'. memory_map : boolean, default False See parsers._parser_params for more information. is_text : boolean, default True Whether the type of the content passed to the file/buffer is string or bytes. This is not the same as `"b" not in mode`. If a string content is passed to a binary file/buffer, a wrapper is inserted. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. storage_options: StorageOptions = None Passed to _get_filepath_or_buffer .. versionchanged:: 1.2.0 Returns the dataclass IOHandles """ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior encoding_passed, encoding = encoding, encoding or "utf-8" # read_csv does not know whether the buffer is opened in binary/text mode if _is_binary_mode(path_or_buf, mode) and "b" not in mode: mode += "b" # open URLs ioargs = _get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode, storage_options=storage_options, ) handle = ioargs.filepath_or_buffer handles: List[Buffer] # memory mapping needs to be the first step handle, memory_map, handles = _maybe_memory_map( handle, memory_map, ioargs.encoding, ioargs.mode, errors ) is_path = isinstance(handle, str) compression_args = dict(ioargs.compression) compression = compression_args.pop("method") if compression: # compression libraries do not like an explicit text-mode ioargs.mode = ioargs.mode.replace("t", "") # GZ Compression if compression == "gzip": if is_path: assert isinstance(handle, str) handle = gzip.GzipFile( filename=handle, mode=ioargs.mode, **compression_args, ) else: handle = gzip.GzipFile( fileobj=handle, # type: ignore[arg-type] mode=ioargs.mode, **compression_args, ) # BZ Compression elif compression == "bz2": handle = bz2.BZ2File( handle, # type: ignore[arg-type] mode=ioargs.mode, **compression_args, ) # ZIP Compression elif compression == "zip": handle = _BytesZipFile(handle, ioargs.mode, **compression_args) if handle.mode == "r": handles.append(handle) zip_names = handle.namelist() if len(zip_names) == 1: handle = handle.open(zip_names.pop()) elif len(zip_names) == 0: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # XZ Compression elif compression == "xz": handle = get_lzma_file(lzma)(handle, ioargs.mode) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) assert not isinstance(handle, str) handles.append(handle) elif isinstance(handle, str): # Check whether the filename is to be opened in binary mode. # Binary mode does not support 'encoding' and 'newline'. if ioargs.encoding and "b" not in ioargs.mode: if errors is None and encoding_passed is None: # ignore errors when no encoding is specified errors = "replace" # Encoding handle = open( handle, ioargs.mode, encoding=ioargs.encoding, errors=errors, newline="", ) else: # Binary mode handle = open(handle, ioargs.mode) handles.append(handle) # Convert BytesIO or file objects passed with an encoding is_wrapped = False if is_text and (compression or _is_binary_mode(handle, ioargs.mode)): handle = TextIOWrapper( handle, # type: ignore[arg-type] encoding=ioargs.encoding, errors=errors, newline="", ) handles.append(handle) # only marked as wrapped when the caller provided a handle is_wrapped = not ( isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close ) handles.reverse() # close the most recently added buffer first if ioargs.should_close: assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) assert not isinstance(handle, str) return IOHandles( handle=handle, created_handles=handles, is_wrapped=is_wrapped, is_mmap=memory_map, compression=ioargs.compression, ) # error: Definition of "__exit__" in base class "ZipFile" is incompatible with # definition in base class "BytesIO" [misc] # error: Definition of "__enter__" in base class "ZipFile" is incompatible with # definition in base class "BytesIO" [misc] # error: Definition of "__enter__" in base class "ZipFile" is incompatible with # definition in base class "BinaryIO" [misc] # error: Definition of "__enter__" in base class "ZipFile" is incompatible with # definition in base class "IO" [misc] # error: Definition of "read" in base class "ZipFile" is incompatible with # definition in base class "BytesIO" [misc] # error: Definition of "read" in base class "ZipFile" is incompatible with # definition in base class "IO" [misc] class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc] """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. BytesIO provides attributes of file-like object and ZipFile.writestr writes bytes strings into a member of the archive. """ # GH 17778 def __init__( self, file: FilePathOrBuffer, mode: str, archive_name: Optional[str] = None, **kwargs, ): mode = mode.replace("b", "") self.archive_name = archive_name self.multiple_write_buffer: Optional[Union[StringIO, BytesIO]] = None kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED} kwargs_zip.update(kwargs) super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type] def write(self, data): # buffer multiple write calls, write on flush if self.multiple_write_buffer is None: self.multiple_write_buffer = ( BytesIO() if isinstance(data, bytes) else StringIO() ) self.multiple_write_buffer.write(data) def flush(self) -> None: # write to actual handle and close write buffer if self.multiple_write_buffer is None or self.multiple_write_buffer.closed: return # ZipFile needs a non-empty string archive_name = self.archive_name or self.filename or "zip" with self.multiple_write_buffer: super().writestr(archive_name, self.multiple_write_buffer.getvalue()) def close(self): self.flush() super().close() @property def closed(self): return self.fp is None class _MMapWrapper(abc.Iterator): """ Wrapper for the Python's mmap class so that it can be properly read in by Python's csv.reader class. Parameters ---------- f : file object File object to be mapped onto memory. Must support the 'fileno' method or have an equivalent attribute """ def __init__(self, f: IO): self.attributes = {} for attribute in ("seekable", "readable", "writeable"): if not hasattr(f, attribute): continue self.attributes[attribute] = getattr(f, attribute)() self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) def __getattr__(self, name: str): if name in self.attributes: return lambda: self.attributes[name] return getattr(self.mmap, name) def __iter__(self) -> _MMapWrapper: return self def __next__(self) -> str: newbytes = self.mmap.readline() # readline returns bytes, not str, but Python's CSV reader # expects str, so convert the output to str before continuing newline = newbytes.decode("utf-8") # mmap doesn't raise if reading past the allocated # data but instead returns an empty string, so raise # if that is returned if newline == "": raise StopIteration return newline def _maybe_memory_map( handle: FileOrBuffer, memory_map: bool, encoding: str, mode: str, errors: Optional[str], ) -> Tuple[FileOrBuffer, bool, List[Buffer]]: """Try to memory map file/buffer.""" handles: List[Buffer] = [] memory_map &= hasattr(handle, "fileno") or isinstance(handle, str) if not memory_map: return handle, memory_map, handles # need to open the file first if isinstance(handle, str): if encoding and "b" not in mode: # Encoding handle = open(handle, mode, encoding=encoding, errors=errors, newline="") else: # Binary mode handle = open(handle, mode) handles.append(handle) try: wrapped = cast(mmap.mmap, _MMapWrapper(handle)) # type: ignore[arg-type] handle.close() handles.remove(handle) handles.append(wrapped) handle = wrapped except Exception: # we catch any errors that may have occurred # because that is consistent with the lower-level # functionality of the C engine (pd.read_csv), so # leave the file handler as is then memory_map = False return handle, memory_map, handles def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool: """Test whether file exists.""" exists = False filepath_or_buffer = stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): return exists try: exists = os.path.exists(filepath_or_buffer) # gh-5874: if the filepath is too long will raise here except (TypeError, ValueError): pass return exists def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool: """Whether the handle is opened in binary mode""" # classes that expect string but have 'b' in mode text_classes = (codecs.StreamReaderWriter,) if isinstance(handle, text_classes): return False # classes that expect bytes binary_classes = (BufferedIOBase, RawIOBase) return isinstance(handle, binary_classes) or "b" in getattr(handle, "mode", mode)
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os import random import numpy as np import torch from torch import nn from typing import Dict def to_device(data, device): if isinstance(data, torch.Tensor): return data.to(device) elif isinstance(data, dict): return {k: to_device(v, device) for k, v in data.items()} elif isinstance(data, list): return [to_device(v, device) for v in data] def get_all_files(root, file_extension, contain=None): files = [] for folder, _, fs in os.walk(root): for f in fs: if file_extension is not None: if f.endswith(file_extension): if contain is None or contain in os.path.join(folder, f): files.append(os.path.join(folder, f)) else: if contain in f: files.append(os.path.join(folder, f)) return files def flatten(s): if s == []: return s if isinstance(s[0], list): return flatten(s[0]) + flatten(s[1:]) return s[:1] + flatten(s[1:]) def moving_average(data, period): # padding left_pad = [data[0] for _ in range(period // 2)] right_pad = data[-period // 2 + 1 :] data = left_pad + data + right_pad weights = np.ones(period) / period return np.convolve(data, weights, mode="valid") def mem2str(num_bytes): assert num_bytes >= 0 if num_bytes >= 2 ** 30: # GB val = float(num_bytes) / (2 ** 30) result = "%.3f GB" % val elif num_bytes >= 2 ** 20: # MB val = float(num_bytes) / (2 ** 20) result = "%.3f MB" % val elif num_bytes >= 2 ** 10: # KB val = float(num_bytes) / (2 ** 10) result = "%.3f KB" % val else: result = "%d bytes" % num_bytes return result def sec2str(seconds): seconds = int(seconds) hour = seconds // 3600 seconds = seconds % (24 * 3600) seconds %= 3600 minutes = seconds // 60 seconds %= 60 return "%dH %02dM %02dS" % (hour, minutes, seconds) def num2str(n): if n < 1e3: s = str(n) unit = "" elif n < 1e6: n /= 1e3 s = "%.3f" % n unit = "K" else: n /= 1e6 s = "%.3f" % n unit = "M" s = s.rstrip("0").rstrip(".") return s + unit def get_mem_usage(): import psutil mem = psutil.virtual_memory() result = "" result += "available: %s, " % (mem2str(mem.available)) result += "used: %s, " % (mem2str(mem.used)) result += "free: %s" % (mem2str(mem.free)) return result def flatten_first2dim(batch): if isinstance(batch, torch.Tensor): size = batch.size()[2:] batch = batch.view(-1, *size) return batch elif isinstance(batch, dict): return {key: flatten_first2dim(batch[key]) for key in batch} else: assert False, "unsupported type: %s" % type(batch) def _tensor_slice(t, dim, b, e): if dim == 0: return t[b:e] elif dim == 1: return t[:, b:e] elif dim == 2: return t[:, :, b:e] else: raise ValueError("unsupported %d in tensor_slice" % dim) def tensor_slice(t, dim, b, e): if isinstance(t, dict): return {key: tensor_slice(t[key], dim, b, e) for key in t} elif isinstance(t, torch.Tensor): return _tensor_slice(t, dim, b, e).contiguous() else: assert False, "Error: unsupported type: %s" % (type(t)) def tensor_index(t, dim, i): if isinstance(t, dict): return {key: tensor_index(t[key], dim, i) for key in t} elif isinstance(t, torch.Tensor): return _tensor_slice(t, dim, i, i + 1).squeeze(dim).contiguous() else: assert False, "Error: unsupported type: %s" % (type(t)) def one_hot(x, n): assert x.dim() == 2 and x.size(1) == 1 one_hot_x = torch.zeros(x.size(0), n, device=x.device) one_hot_x.scatter_(1, x, 1) return one_hot_x def set_all_seeds(rand_seed): random.seed(rand_seed) np.random.seed(rand_seed + 1) torch.manual_seed(rand_seed + 2) torch.cuda.manual_seed(rand_seed + 3) def weights_init(m): """custom weights initialization""" if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d): # nn.init.kaiming_normal(m.weight.data) nn.init.orthogonal_(m.weight.data) else: print("%s is not custom-initialized." % m.__class__) def init_net(net, net_file): if net_file: net.load_state_dict(torch.load(net_file)) else: net.apply(weights_init) def count_output_size(input_shape, model): fake_input = torch.FloatTensor(*input_shape) output_size = model.forward(fake_input).view(-1).size()[0] return output_size
import numpy as np from .support import pdfMetalog, quantileMetalog def pdf_quantile_builder(temp, y, term_limit, bounds, boundedness): """Builds the metalog pdf and quantile arrays based on the a coefficients found by fitting metalog distribution. Args: temp (:obj: `numpy.ndarray` of type float): Array of a coefficients found by fitting metalog distribution. - Fit method is specified by metalog.fit_method attribute y (:obj: `numpy.ndarray` of type float): Array of bin widths specified for `a` parameter term_limit (:obj: `int`): The upper limit of the range of metalog terms to use to fit the data. - metalog.term_limit attribute - in range [3,30] bounds (:obj:`list`): Upper and lower limits to filter the data with before calculating metalog quantiles/pdfs. - metalog.bounds attribute - Default: [0,1] boundedness (:obj: `str`): String that is used to specify the type of metalog to fit. - metalog.boundedness attribute Returns: q_dict (:obj:`dict` with keys ['m', 'M', 'y', 'valid']): Initialized output_dict variable from metalog class. - q_dict['m']: (:obj:`numpy.ndarray` of type float): Array of metalog pdf values. * Returned by `pdfMetalog` method * Influenced by `boundedness` parameter * A valid metalog fit will return an array having all elements strictly > 0 - q_dict['M']: (:obj:`numpy.ndarray` of type float): Array of metalog quantile values. * Returned by `quantileMetalog` method * Influenced by `boundedness` parameter - `boundedness` = 'sl': Inserts `bounds`[0] to the front of the quantile array - `boundedness` = 'su': Appends `bounds`[1] to the end of the quantile array - `boundedness` = 'b': Inserts `bounds`[0] to the front of the quantile array and appends `bounds`[1] to the end of the quantile array - q_dict['y']: (:obj:`numpy.ndarray` of type float): Array of bin widths specified for the pdfs/quantiles. * Influenced by `boundedness` parameter - `boundedness` = 'sl': Inserts `bounds`[0] at the front of the quantile array - `boundedness` = 'su': Appends `bounds`[1] to the end of the quantile array - `boundedness` = 'b': Inserts `bounds`[0] at the front of the quantile array and appends `bounds`[1] to the end of the quantile array - q_dict['valid']: (:obj:`str`): A string indicating if the metalog pdf generated by `pdfMetalog` method is valid or not. * If all values in the metalog pdf are >= 0, q_dict['valid'] = 'yes' * If any values in the metalog pdf are < 0, q_dict['valid'] = 'no' """ q_dict = {} # build pdf m = pdfMetalog(temp, y[0], term_limit, bounds=bounds, boundedness=boundedness) for j in range(2, len(y) + 1): tempPDF = pdfMetalog( temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness ) m = np.append(m, tempPDF) # Build quantile values M = quantileMetalog(temp, y[1], term_limit, bounds=bounds, boundedness=boundedness) for j in range(2, len(y) + 1): tempQant = quantileMetalog( temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness ) M = np.append(M, tempQant) # Add trailing and leading zero's for pdf bounds if boundedness == "sl": m = np.append(0, m) M = np.append(bounds[0], M) if boundedness == "su": m = np.append(m, 0) M = np.append(M, bounds[1]) if boundedness == "b": m = np.append(0, m) m = np.append(m, 0) M = np.append(bounds[0], M) M = np.append(M, bounds[1]) # Add y values for bounded models if boundedness == "sl": y = np.append(0, y) if boundedness == "su": y = np.append(y, 1) if boundedness == "b": y = np.append(0, y) y = np.append(y, 1) q_dict["m"] = m q_dict["M"] = M q_dict["y"] = y # PDF validation q_dict["valid"] = pdfMetalogValidation(q_dict["m"]) return q_dict def pdfMetalogValidation(x): """Validation that all calculated metalog pdf values are greater than or equal to 0. Args: x (:obj: `numpy.ndarray` of type float): Array of metalog pdf values. - Returned by `pdfMetalog` method - Influenced by `boundedness` parameter Returns: 'yes' | 'no' (:obj:`str`): 'yes' if all elements strictly >= 0, else 'no'. """ y = np.min(x) if y >= 0: return "yes" else: return "no"
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import concurrent import json from abc import ABC, abstractmethod from copy import deepcopy from datetime import datetime from functools import lru_cache from operator import itemgetter from traceback import format_exc from typing import Any, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union from airbyte_cdk.logger import AirbyteLogger from airbyte_cdk.models.airbyte_protocol import SyncMode from airbyte_cdk.sources.streams import Stream from wcmatch.glob import GLOBSTAR, SPLIT, globmatch from .formats.csv_parser import CsvParser from .formats.parquet_parser import ParquetParser JSON_TYPES = ["string", "number", "integer", "object", "array", "boolean", "null"] LOGGER = AirbyteLogger() class ConfigurationError(Exception): """Client mis-configured""" class FileStream(Stream, ABC): @property def fileformatparser_map(self): """Mapping where every key is equal 'filetype' and values are corresponding parser classes.""" return { "csv": CsvParser, "parquet": ParquetParser, } # TODO: make these user configurable in spec.json ab_additional_col = "_ab_additional_properties" ab_last_mod_col = "_ab_source_file_last_modified" ab_file_name_col = "_ab_source_file_url" airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col] datetime_format_string = "%Y-%m-%dT%H:%M:%S%z" def __init__(self, dataset: str, provider: dict, format: dict, path_pattern: str, schema: str = None): """ :param dataset: table name for this stream :param provider: provider specific mapping as described in spec.json :param format: file format specific mapping as described in spec.json :param path_pattern: glob-style pattern for file-matching (https://facelessuser.github.io/wcmatch/glob/) :param schema: JSON-syntax user provided schema, defaults to None """ self.dataset = dataset self._path_pattern = path_pattern self._provider = provider self._format = format self._schema = {} if schema: self._schema = self._parse_user_input_schema(schema) self.master_schema = None LOGGER.info(f"initialised stream with format: {format}") @staticmethod def _parse_user_input_schema(schema: str) -> Mapping[str, str]: """ If the user provided a schema, we run this method to convert to a python dict and verify it This verifies: - that the provided string is valid JSON - that it is a key:value map with no nested values (objects or arrays) - that all values in the map correspond to a JsonSchema datatype If this passes, we are confident that the user-provided schema is valid and will work as expected with the rest of the code :param schema: JSON-syntax user provided schema :raises ConfigurationError: if any of the verification steps above fail :return: the input schema (json string) as a python dict """ try: py_schema = json.loads(schema) except json.decoder.JSONDecodeError as err: error_msg = f"Failed to parse schema {repr(err)}\n{schema}\n{format_exc()}" raise ConfigurationError(error_msg) from err # enforce all keys and values are of type string as required (i.e. no nesting) if not all([isinstance(k, str) and isinstance(v, str) for k, v in py_schema.items()]): raise ConfigurationError("Invalid schema provided, all column names and datatypes must be in string format") # enforce all values (datatypes) are valid JsonSchema datatypes if not all([datatype in JSON_TYPES for datatype in py_schema.values()]): raise ConfigurationError(f"Invalid schema provided, datatypes must each be one of {JSON_TYPES}") return py_schema @property def name(self) -> str: return self.dataset @property def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]: return None @property def fileformatparser_class(self) -> type: """ :return: reference to the relevant fileformatparser class e.g. CsvParser """ filetype = self._format.get("filetype") file_reader = self.fileformatparser_map.get(self._format.get("filetype")) if not file_reader: raise RuntimeError( f"Detected mismatched file format '{filetype}'. Available values: '{list( self.fileformatparser_map.keys())}''." ) return file_reader @property @abstractmethod def storagefile_class(self) -> type: """ Override this to point to the relevant provider-specific StorageFile class e.g. S3File :return: reference to relevant class """ @abstractmethod def filepath_iterator() -> Iterator[str]: """ Provider-specific method to iterate through bucket/container/etc. and yield each full filepath. This should supply the 'url' to use in StorageFile(). This is possibly better described as blob or file path. e.g. for AWS: f"s3://{aws_access_key_id}:{aws_secret_access_key}@{self.url}" <- self.url is what we want to yield here :yield: url filepath to use in StorageFile() """ def pattern_matched_filepath_iterator(self, filepaths: Iterable[str]) -> Iterator[str]: """ iterates through iterable filepaths and yields only those filepaths that match user-provided path patterns :param filepaths: filepath_iterator(), this is a param rather than method reference in order to unit test this :yield: url filepath to use in StorageFile(), if matching on user-provided path patterns """ for filepath in filepaths: if globmatch(filepath, self._path_pattern, flags=GLOBSTAR | SPLIT): yield filepath @lru_cache(maxsize=None) def get_time_ordered_filepaths(self) -> Iterable[Tuple[datetime, str]]: """ Iterates through pattern_matched_filepath_iterator(), acquiring last_modified property of each file to return in time ascending order. Uses concurrent.futures to thread this asynchronously in order to improve performance when there are many files (network I/O) Caches results after first run of method to avoid repeating network calls as this is used more than once :return: list in time-ascending order """ def get_storagefile_with_lastmod(filepath: str) -> Tuple[datetime, str]: fc = self.storagefile_class(filepath, self._provider) return (fc.last_modified, filepath) storagefiles = [] # use concurrent future threads to parallelise grabbing last_modified from all the files # TODO: don't hardcode max_workers like this with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor: filepath_gen = self.pattern_matched_filepath_iterator(self.filepath_iterator()) futures = [executor.submit(get_storagefile_with_lastmod, fp) for fp in filepath_gen] for future in concurrent.futures.as_completed(futures): # this will failfast on any errors storagefiles.append(future.result()) # The array storagefiles contain tuples of (last_modified, filepath), so sort by last_modified return sorted(storagefiles, key=itemgetter(0)) def _get_schema_map(self) -> Mapping[str, Any]: if self._schema != {}: return_schema = deepcopy(self._schema) else: # we have no provided schema or schema state from a previous incremental run return_schema = self._get_master_schema() return_schema[self.ab_additional_col] = "object" return_schema[self.ab_last_mod_col] = "string" return_schema[self.ab_file_name_col] = "string" return return_schema def get_json_schema(self) -> Mapping[str, Any]: """ :return: the JSON schema representing this stream. """ # note: making every non-airbyte column nullable for compatibility # TODO: ensure this behaviour still makes sense as we add new file formats properties = {} for column, typ in self._get_schema_map().items(): properties[column] = {"type": ["null", typ]} if column not in self.airbyte_columns else {"type": typ} properties[self.ab_last_mod_col]["format"] = "date-time" return {"type": "object", "properties": properties} def _get_master_schema(self, min_datetime: datetime = None) -> Mapping[str, Any]: """ In order to auto-infer a schema across many files and/or allow for additional properties (columns), we need to determine the superset of schemas across all relevant files. This method iterates through get_time_ordered_filepaths() obtaining the inferred schema (process implemented per file format), to build up this superset schema (master_schema). This runs datatype checks to Warn or Error if we find incompatible schemas (e.g. same column is 'date' in one file but 'float' in another). This caches the master_schema after first run in order to avoid repeated compute and network calls to infer schema on all files. :param min_datetime: if passed, will only use files with last_modified >= this to determine master schema :raises RuntimeError: if we find datatype mismatches between files or between a file and schema state (provided or from previous inc. batch) :return: A dict of the JSON schema representing this stream. """ # TODO: could implement a (user-beware) 'lazy' mode that skips schema checking to improve performance # TODO: could utilise min_datetime to add a start_date parameter in spec for user if self.master_schema is None: master_schema = deepcopy(self._schema) file_reader = self.fileformatparser_class(self._format) for last_mod, filepath in self.get_time_ordered_filepaths(): # skip this file if it's earlier than min_datetime if (min_datetime is not None) and (last_mod < min_datetime): continue storagefile = self.storagefile_class(filepath, self._provider) with storagefile.open(file_reader.is_binary) as f: this_schema = file_reader.get_inferred_schema(f) if this_schema == master_schema: continue # exact schema match so go to next file # creates a superset of columns retaining order of master_schema with any additional columns added to end column_superset = list(master_schema.keys()) + [c for c in this_schema.keys() if c not in master_schema.keys()] # this compares datatype of every column that the two schemas have in common for col in column_superset: if (col in master_schema.keys()) and (col in this_schema.keys()) and (master_schema[col] != this_schema[col]): # if this column exists in a provided schema or schema state, we'll WARN here rather than throw an error # this is to allow more leniency as we may be able to coerce this datatype mismatch on read according to provided schema state # if not, then the read will error anyway if col in self._schema.keys(): LOGGER.warn( f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. " + f"Should be '{master_schema[col]}', but found '{this_schema[col]}'. " + f"Airbyte will attempt to coerce this to {master_schema[col]} on read." ) # else we're inferring the schema (or at least this column) from scratch and therefore throw an error on mismatching datatypes else: raise RuntimeError( f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. " + f"Should be '{master_schema[col]}', but found '{this_schema[col]}'." ) # missing columns in this_schema doesn't affect our master_schema so we don't check for it here # add to master_schema any columns from this_schema that aren't already present for col, datatype in this_schema.items(): if col not in master_schema.keys(): master_schema[col] = datatype LOGGER.info(f"determined master schema: {master_schema}") self.master_schema = master_schema return self.master_schema def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: """ This builds full-refresh stream_slices regardless of sync_mode param. For full refresh, 1 file == 1 stream_slice. The structure of a stream slice is [ {file}, ... ]. In incremental mode, a stream slice may have more than one file so we mirror that format here. Incremental stream_slices are implemented in the IncrementalFileStream child class. """ # TODO: this could be optimised via concurrent reads, however we'd lose chronology and need to deal with knock-ons of that # we could do this concurrently both full and incremental by running batches in parallel # and then incrementing the cursor per each complete batch for last_mod, filepath in self.get_time_ordered_filepaths(): storagefile = self.storagefile_class(filepath, self._provider) yield [{"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile}] def _match_target_schema(self, record: Mapping[str, Any], target_columns: List) -> Mapping[str, Any]: """ This method handles missing or additional fields in each record, according to the provided target_columns. All missing fields are added, with a value of None (null) All additional fields are packed into the _ab_additional_properties object column We start off with a check to see if we're already lined up to target in order to avoid unnecessary iterations (useful if many columns) :param record: json-like representation of a data row {column:value} :param target_columns: list of column names to mutate this record into (obtained via self._get_schema_map().keys() as of now) :return: mutated record with columns lining up to target_columns """ compare_columns = [c for c in target_columns if c not in [self.ab_last_mod_col, self.ab_file_name_col]] # check if we're already matching to avoid unnecessary iteration if set(list(record.keys()) + [self.ab_additional_col]) == set(compare_columns): record[self.ab_additional_col] = {} return record # missing columns for c in [col for col in compare_columns if col != self.ab_additional_col]: if c not in record.keys(): record[c] = None # additional columns record[self.ab_additional_col] = {c: deepcopy(record[c]) for c in record.keys() if c not in compare_columns} for c in record[self.ab_additional_col].keys(): del record[c] return record def _add_extra_fields_from_map(self, record: Mapping[str, Any], extra_map: Mapping[str, Any]) -> Mapping[str, Any]: """ Simple method to take a mapping of columns:values and add them to the provided record :param record: json-like representation of a data row {column:value} :param extra_map: map of additional columns and values to add :return: mutated record with additional fields """ for key, value in extra_map.items(): record[key] = value return record def _read_from_slice( self, file_reader, stream_slice: Mapping[str, Any], stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """ Uses provider-relevant StorageFile to open file and then iterates through stream_records() using format-relevant AbstractFileParser. Records are mutated on the fly using _match_target_schema() and _add_extra_fields_from_map() to achieve desired final schema. Since this is called per stream_slice, this method works for both full_refresh and incremental. """ # TODO: read all files in a stream_slice concurrently for file_info in stream_slice: with file_info["storagefile"].open(file_reader.is_binary) as f: # TODO: make this more efficient than mutating every record one-by-one as they stream for record in file_reader.stream_records(f): schema_matched_record = self._match_target_schema(record, list(self._get_schema_map().keys())) complete_record = self._add_extra_fields_from_map( schema_matched_record, { self.ab_last_mod_col: datetime.strftime(file_info["last_modified"], self.datetime_format_string), self.ab_file_name_col: file_info["unique_url"], }, ) yield complete_record LOGGER.info("finished reading a stream slice") # Always return an empty generator just in case no records were ever yielded yield from [] def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """ The heavy lifting sits in _read_from_slice() which is full refresh / incremental agnostic """ stream_slice = stream_slice if stream_slice is not None else [] file_reader = self.fileformatparser_class(self._format, self._get_master_schema()) yield from self._read_from_slice(file_reader, stream_slice) class IncrementalFileStream(FileStream, ABC): # TODO: ideally want to checkpoint after every file or stream slice rather than N records state_checkpoint_interval = None @property def cursor_field(self) -> str: """ :return: The name of the cursor field. """ return self.ab_last_mod_col def _get_datetime_from_stream_state(self, stream_state: Mapping[str, Any] = None) -> datetime: """if no state, we default to 1970-01-01 in order to pick up all files present.""" if stream_state is not None and self.cursor_field in stream_state.keys(): return datetime.strptime(stream_state[self.cursor_field], self.datetime_format_string) else: return datetime.strptime("1970-01-01T00:00:00+0000", self.datetime_format_string) def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]: """ Inspects the latest record extracted from the data source and the current state object and return an updated state object. In the case where current_stream_state is null, we default to 1970-01-01 in order to pick up all files present. We also save the schema into the state here so that we can use it on future incremental batches, allowing for additional/missing columns. :param current_stream_state: The stream's current state object :param latest_record: The latest record extracted from the stream :return: An updated state object """ state_dict = {} current_parsed_datetime = self._get_datetime_from_stream_state(current_stream_state) latest_record_datetime = datetime.strptime( latest_record.get(self.cursor_field, "1970-01-01T00:00:00+0000"), self.datetime_format_string ) state_dict[self.cursor_field] = datetime.strftime(max(current_parsed_datetime, latest_record_datetime), self.datetime_format_string) state_dict["schema"] = self._get_schema_map() return state_dict def stream_slices( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None ) -> Iterable[Optional[Mapping[str, Any]]]: """ Builds either full_refresh or incremental stream_slices based on sync_mode. An incremental stream_slice is a group of all files with the exact same last_modified timestamp. This ensures we only update the cursor state to a given timestamp after ALL files with that timestamp have been successfully read. Slight nuance: as we iterate through get_time_ordered_filepaths(), we yield the stream_slice containing file(s) up to and EXcluding the file on the current iteration. The stream_slice is then cleared (if we yielded it) and this iteration's file appended to the (next) stream_slice """ if sync_mode == SyncMode.full_refresh: yield from super().stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state) else: # if necessary and present, let's update this object's schema attribute to the schema stored in state # TODO: ideally we could do this on __init__ but I'm not sure that's possible without breaking from cdk style implementation if self._schema == {} and stream_state is not None and "schema" in stream_state.keys(): self._schema = stream_state["schema"] # logic here is to bundle all files with exact same last modified timestamp together in each slice prev_file_last_mod = None # init variable to hold previous iterations last modified stream_slice = [] for last_mod, filepath in self.get_time_ordered_filepaths(): # skip this file if last_mod is earlier than our cursor value from state if ( stream_state is not None and self.cursor_field in stream_state.keys() and last_mod <= self._get_datetime_from_stream_state(stream_state) ): continue storagefile = self.storagefile_class(filepath, self._provider) # check if this storagefile belongs in the next slice, if so yield the current slice before this file if (prev_file_last_mod is not None) and (last_mod != prev_file_last_mod): yield stream_slice stream_slice.clear() # now we either have an empty stream_slice or a stream_slice that this file shares a last modified with, so append it stream_slice.append({"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile}) # update our prev_file_last_mod to the current one for next iteration prev_file_last_mod = last_mod # now yield the final stream_slice. This is required because our loop only yields the slice previous to its current iteration. if len(stream_slice) > 0: yield stream_slice # in case we have no files yield from [None] def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """ The heavy lifting sits in _read_from_slice() which is full refresh / incremental agnostic. We override this for incremental so we can pass our minimum datetime from state into _get_master_schema(). This means we only parse the schema of new files on incremental runs rather than all files in the bucket. """ if sync_mode == SyncMode.full_refresh: yield from super().read_records(sync_mode, cursor_field, stream_slice, stream_state) else: stream_slice = stream_slice if stream_slice is not None else [] file_reader = self.fileformatparser_class( self._format, self._get_master_schema(self._get_datetime_from_stream_state(stream_state)) ) yield from self._read_from_slice(file_reader, stream_slice)
# -*- coding: utf-8 -*- """Python's built-in :mod:`functools` module builds several useful utilities on top of Python's first-class function support. ``funcutils`` generally stays in the same vein, adding to and correcting Python's standard metaprogramming facilities. """ from __future__ import print_function import sys import re import inspect import functools import itertools from types import MethodType, FunctionType try: xrange make_method = MethodType except NameError: # Python 3 make_method = lambda desc, obj, obj_type: MethodType(desc, obj) basestring = (str, bytes) # Python 3 compat _IS_PY2 = False else: _IS_PY2 = True try: _inspect_iscoroutinefunction = inspect.iscoroutinefunction except AttributeError: # Python 3.4 _inspect_iscoroutinefunction = lambda func: False try: from boltons.typeutils import make_sentinel NO_DEFAULT = make_sentinel(var_name='NO_DEFAULT') except ImportError: NO_DEFAULT = object() _IS_PY35 = sys.version_info >= (3, 5) if not _IS_PY35: # py35+ wants you to use signature instead, but # inspect_formatargspec is way simpler for what it is. Copied the # vendoring approach from alembic: # https://github.com/sqlalchemy/alembic/blob/4cdad6aec32b4b5573a2009cc356cb4b144bd359/alembic/util/compat.py#L92 from inspect import formatargspec as inspect_formatargspec else: from inspect import formatannotation def inspect_formatargspec( args, varargs=None, varkw=None, defaults=None, kwonlyargs=(), kwonlydefaults={}, annotations={}, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), formatreturns=lambda text: ' -> ' + text, formatannotation=formatannotation): """Copy formatargspec from python 3.7 standard library. Python 3 has deprecated formatargspec and requested that Signature be used instead, however this requires a full reimplementation of formatargspec() in terms of creating Parameter objects and such. Instead of introducing all the object-creation overhead and having to reinvent from scratch, just copy their compatibility routine. """ def formatargandannotation(arg): result = formatarg(arg) if arg in annotations: result += ': ' + formatannotation(annotations[arg]) return result specs = [] if defaults: firstdefault = len(args) - len(defaults) for i, arg in enumerate(args): spec = formatargandannotation(arg) if defaults and i >= firstdefault: spec = spec + formatvalue(defaults[i - firstdefault]) specs.append(spec) if varargs is not None: specs.append(formatvarargs(formatargandannotation(varargs))) else: if kwonlyargs: specs.append('*') if kwonlyargs: for kwonlyarg in kwonlyargs: spec = formatargandannotation(kwonlyarg) if kwonlydefaults and kwonlyarg in kwonlydefaults: spec += formatvalue(kwonlydefaults[kwonlyarg]) specs.append(spec) if varkw is not None: specs.append(formatvarkw(formatargandannotation(varkw))) result = '(' + ', '.join(specs) + ')' if 'return' in annotations: result += formatreturns(formatannotation(annotations['return'])) return result def get_module_callables(mod, ignore=None): """Returns two maps of (*types*, *funcs*) from *mod*, optionally ignoring based on the :class:`bool` return value of the *ignore* callable. *mod* can be a string name of a module in :data:`sys.modules` or the module instance itself. """ if isinstance(mod, basestring): mod = sys.modules[mod] types, funcs = {}, {} for attr_name in dir(mod): if ignore and ignore(attr_name): continue try: attr = getattr(mod, attr_name) except Exception: continue try: attr_mod_name = attr.__module__ except AttributeError: continue if attr_mod_name != mod.__name__: continue if isinstance(attr, type): types[attr_name] = attr elif callable(attr): funcs[attr_name] = attr return types, funcs def mro_items(type_obj): """Takes a type and returns an iterator over all class variables throughout the type hierarchy (respecting the MRO). >>> sorted(set([k for k, v in mro_items(int) if not k.startswith('__') and 'bytes' not in k and not callable(v)])) ['denominator', 'imag', 'numerator', 'real'] """ # TODO: handle slots? return itertools.chain.from_iterable(ct.__dict__.items() for ct in type_obj.__mro__) def dir_dict(obj, raise_exc=False): """Return a dictionary of attribute names to values for a given object. Unlike ``obj.__dict__``, this function returns all attributes on the object, including ones on parent classes. """ # TODO: separate function for handling descriptors on types? ret = {} for k in dir(obj): try: ret[k] = getattr(obj, k) except Exception: if raise_exc: raise return ret def copy_function(orig, copy_dict=True): """Returns a shallow copy of the function, including code object, globals, closure, etc. >>> func = lambda: func >>> func() is func True >>> func_copy = copy_function(func) >>> func_copy() is func True >>> func_copy is not func True Args: orig (function): The function to be copied. Must be a function, not just any method or callable. copy_dict (bool): Also copy any attributes set on the function instance. Defaults to ``True``. """ ret = FunctionType(orig.__code__, orig.__globals__, name=orig.__name__, argdefs=getattr(orig, "__defaults__", None), closure=getattr(orig, "__closure__", None)) if copy_dict: ret.__dict__.update(orig.__dict__) return ret def partial_ordering(cls): """Class decorator, similar to :func:`functools.total_ordering`, except it is used to define `partial orderings`_ (i.e., it is possible that *x* is neither greater than, equal to, or less than *y*). It assumes the presence of the ``__le__()`` and ``__ge__()`` method, but nothing else. It will not override any existing additional comparison methods. .. _partial orderings: https://en.wikipedia.org/wiki/Partially_ordered_set >>> @partial_ordering ... class MySet(set): ... def __le__(self, other): ... return self.issubset(other) ... def __ge__(self, other): ... return self.issuperset(other) ... >>> a = MySet([1,2,3]) >>> b = MySet([1,2]) >>> c = MySet([1,2,4]) >>> b < a True >>> b > a False >>> b < c True >>> a < c False >>> c > a False """ def __lt__(self, other): return self <= other and not self >= other def __gt__(self, other): return self >= other and not self <= other def __eq__(self, other): return self >= other and self <= other if not hasattr(cls, '__lt__'): cls.__lt__ = __lt__ if not hasattr(cls, '__gt__'): cls.__gt__ = __gt__ if not hasattr(cls, '__eq__'): cls.__eq__ = __eq__ return cls class InstancePartial(functools.partial): """:class:`functools.partial` is a huge convenience for anyone working with Python's great first-class functions. It allows developers to curry arguments and incrementally create simpler callables for a variety of use cases. Unfortunately there's one big gap in its usefulness: methods. Partials just don't get bound as methods and automatically handed a reference to ``self``. The ``InstancePartial`` type remedies this by inheriting from :class:`functools.partial` and implementing the necessary descriptor protocol. There are no other differences in implementation or usage. :class:`CachedInstancePartial`, below, has the same ability, but is slightly more efficient. """ def __get__(self, obj, obj_type): return make_method(self, obj, obj_type) class CachedInstancePartial(functools.partial): """The ``CachedInstancePartial`` is virtually the same as :class:`InstancePartial`, adding support for method-usage to :class:`functools.partial`, except that upon first access, it caches the bound method on the associated object, speeding it up for future accesses, and bringing the method call overhead to about the same as non-``partial`` methods. See the :class:`InstancePartial` docstring for more details. """ def __get__(self, obj, obj_type): # These assignments could've been in __init__, but there was # no simple way to do it without breaking one of PyPy or Py3. self.__name__ = None self.__doc__ = self.func.__doc__ self.__module__ = self.func.__module__ name = self.__name__ if name is None: for k, v in mro_items(obj_type): if v is self: self.__name__ = name = k if obj is None: return make_method(self, obj, obj_type) try: # since this is a data descriptor, this block # is probably only hit once (per object) return obj.__dict__[name] except KeyError: obj.__dict__[name] = ret = make_method(self, obj, obj_type) return ret partial = CachedInstancePartial def format_invocation(name='', args=(), kwargs=None): """Given a name, positional arguments, and keyword arguments, format a basic Python-style function call. >>> print(format_invocation('func', args=(1, 2), kwargs={'c': 3})) func(1, 2, c=3) >>> print(format_invocation('a_func', args=(1,))) a_func(1) >>> print(format_invocation('kw_func', kwargs=[('a', 1), ('b', 2)])) kw_func(a=1, b=2) """ kwargs = kwargs or {} a_text = ', '.join([repr(a) for a in args]) if isinstance(kwargs, dict): kwarg_items = kwargs.items() else: kwarg_items = kwargs kw_text = ', '.join(['%s=%r' % (k, v) for k, v in kwarg_items]) all_args_text = a_text if all_args_text and kw_text: all_args_text += ', ' all_args_text += kw_text return '%s(%s)' % (name, all_args_text) def format_exp_repr(obj, pos_names, req_names=None, opt_names=None, opt_key=None): """Render an expression-style repr of an object, based on attribute names, which are assumed to line up with arguments to an initializer. >>> class Flag(object): ... def __init__(self, length, width, depth=None): ... self.length = length ... self.width = width ... self.depth = depth ... That's our Flag object, here are some example reprs for it: >>> flag = Flag(5, 10) >>> print(format_exp_repr(flag, ['length', 'width'], [], ['depth'])) Flag(5, 10) >>> flag2 = Flag(5, 15, 2) >>> print(format_exp_repr(flag2, ['length'], ['width', 'depth'])) Flag(5, width=15, depth=2) By picking the pos_names, req_names, opt_names, and opt_key, you can fine-tune how you want the repr to look. Args: obj (object): The object whose type name will be used and attributes will be checked pos_names (list): Required list of attribute names which will be rendered as positional arguments in the output repr. req_names (list): List of attribute names which will always appear in the keyword arguments in the output repr. Defaults to None. opt_names (list): List of attribute names which may appear in the keyword arguments in the output repr, provided they pass the *opt_key* check. Defaults to None. opt_key (callable): A function or callable which checks whether an opt_name should be in the repr. Defaults to a ``None``-check. """ cn = obj.__class__.__name__ req_names = req_names or [] opt_names = opt_names or [] uniq_names, all_names = set(), [] for name in req_names + opt_names: if name in uniq_names: continue uniq_names.add(name) all_names.append(name) if opt_key is None: opt_key = lambda v: v is None assert callable(opt_key) args = [getattr(obj, name, None) for name in pos_names] kw_items = [(name, getattr(obj, name, None)) for name in all_names] kw_items = [(name, val) for name, val in kw_items if not (name in opt_names and opt_key(val))] return format_invocation(cn, args, kw_items) def format_nonexp_repr(obj, req_names=None, opt_names=None, opt_key=None): """Format a non-expression-style repr Some object reprs look like object instantiation, e.g., App(r=[], mw=[]). This makes sense for smaller, lower-level objects whose state roundtrips. But a lot of objects contain values that don't roundtrip, like types and functions. For those objects, there is the non-expression style repr, which mimic's Python's default style to make a repr like so: >>> class Flag(object): ... def __init__(self, length, width, depth=None): ... self.length = length ... self.width = width ... self.depth = depth ... >>> flag = Flag(5, 10) >>> print(format_nonexp_repr(flag, ['length', 'width'], ['depth'])) <Flag length=5 width=10> If no attributes are specified or set, utilizes the id, not unlike Python's built-in behavior. >>> print(format_nonexp_repr(flag)) <Flag id=...> """ cn = obj.__class__.__name__ req_names = req_names or [] opt_names = opt_names or [] uniq_names, all_names = set(), [] for name in req_names + opt_names: if name in uniq_names: continue uniq_names.add(name) all_names.append(name) if opt_key is None: opt_key = lambda v: v is None assert callable(opt_key) items = [(name, getattr(obj, name, None)) for name in all_names] labels = ['%s=%r' % (name, val) for name, val in items if not (name in opt_names and opt_key(val))] if not labels: labels = ['id=%s' % id(obj)] ret = '<%s %s>' % (cn, ' '.join(labels)) return ret # # # # # # Function builder # # # def wraps(func, injected=None, expected=None, **kw): """Modeled after the built-in :func:`functools.wraps`, this function is used to make your decorator's wrapper functions reflect the wrapped function's: * Name * Documentation * Module * Signature The built-in :func:`functools.wraps` copies the first three, but does not copy the signature. This version of ``wraps`` can copy the inner function's signature exactly, allowing seamless usage and :mod:`introspection <inspect>`. Usage is identical to the built-in version:: >>> from boltons.funcutils import wraps >>> >>> def print_return(func): ... @wraps(func) ... def wrapper(*args, **kwargs): ... ret = func(*args, **kwargs) ... print(ret) ... return ret ... return wrapper ... >>> @print_return ... def example(): ... '''docstring''' ... return 'example return value' >>> >>> val = example() example return value >>> example.__name__ 'example' >>> example.__doc__ 'docstring' In addition, the boltons version of wraps supports modifying the outer signature based on the inner signature. By passing a list of *injected* argument names, those arguments will be removed from the outer wrapper's signature, allowing your decorator to provide arguments that aren't passed in. Args: func (function): The callable whose attributes are to be copied. injected (list): An optional list of argument names which should not appear in the new wrapper's signature. expected (list): An optional list of argument names (or (name, default) pairs) representing new arguments introduced by the wrapper (the opposite of *injected*). See :meth:`FunctionBuilder.add_arg()` for more details. update_dict (bool): Whether to copy other, non-standard attributes of *func* over to the wrapper. Defaults to True. inject_to_varkw (bool): Ignore missing arguments when a ``**kwargs``-type catch-all is present. Defaults to True. For more in-depth wrapping of functions, see the :class:`FunctionBuilder` type, on which wraps was built. """ if injected is None: injected = [] elif isinstance(injected, basestring): injected = [injected] else: injected = list(injected) expected_items = _parse_wraps_expected(expected) if isinstance(func, (classmethod, staticmethod)): raise TypeError('wraps does not support wrapping classmethods and' ' staticmethods, change the order of wrapping to' ' wrap the underlying function: %r' % (getattr(func, '__func__', None),)) update_dict = kw.pop('update_dict', True) inject_to_varkw = kw.pop('inject_to_varkw', True) if kw: raise TypeError('unexpected kwargs: %r' % kw.keys()) fb = FunctionBuilder.from_func(func) for arg in injected: try: fb.remove_arg(arg) except MissingArgument: if inject_to_varkw and fb.varkw is not None: continue # keyword arg will be caught by the varkw raise for arg, default in expected_items: fb.add_arg(arg, default) # may raise ExistingArgument if fb.is_async: fb.body = 'return await _call(%s)' % fb.get_invocation_str() else: fb.body = 'return _call(%s)' % fb.get_invocation_str() def wrapper_wrapper(wrapper_func): execdict = dict(_call=wrapper_func, _func=func) fully_wrapped = fb.get_func(execdict, with_dict=update_dict) fully_wrapped.__wrapped__ = func # ref to the original function (#115) return fully_wrapped return wrapper_wrapper def _parse_wraps_expected(expected): # expected takes a pretty powerful argument, it's processed # here. admittedly this would be less trouble if I relied on # OrderedDict (there's an impl of that in the commit history if # you look if expected is None: expected = [] elif isinstance(expected, basestring): expected = [(expected, NO_DEFAULT)] expected_items = [] try: expected_iter = iter(expected) except TypeError as e: raise ValueError('"expected" takes string name, sequence of string names,' ' iterable of (name, default) pairs, or a mapping of ' ' {name: default}, not %r (got: %r)' % (expected, e)) for argname in expected_iter: if isinstance(argname, basestring): # dict keys and bare strings try: default = expected[argname] except TypeError: default = NO_DEFAULT else: # pairs try: argname, default = argname except (TypeError, ValueError): raise ValueError('"expected" takes string name, sequence of string names,' ' iterable of (name, default) pairs, or a mapping of ' ' {name: default}, not %r') if not isinstance(argname, basestring): raise ValueError('all "expected" argnames must be strings, not %r' % (argname,)) expected_items.append((argname, default)) return expected_items class FunctionBuilder(object): """The FunctionBuilder type provides an interface for programmatically creating new functions, either based on existing functions or from scratch. Values are passed in at construction or set as attributes on the instance. For creating a new function based of an existing one, see the :meth:`~FunctionBuilder.from_func` classmethod. At any point, :meth:`~FunctionBuilder.get_func` can be called to get a newly compiled function, based on the values configured. >>> fb = FunctionBuilder('return_five', doc='returns the integer 5', ... body='return 5') >>> f = fb.get_func() >>> f() 5 >>> fb.varkw = 'kw' >>> f_kw = fb.get_func() >>> f_kw(ignored_arg='ignored_val') 5 Note that function signatures themselves changed quite a bit in Python 3, so several arguments are only applicable to FunctionBuilder in Python 3. Except for *name*, all arguments to the constructor are keyword arguments. Args: name (str): Name of the function. doc (str): `Docstring`_ for the function, defaults to empty. module (str): Name of the module from which this function was imported. Defaults to None. body (str): String version of the code representing the body of the function. Defaults to ``'pass'``, which will result in a function which does nothing and returns ``None``. args (list): List of argument names, defaults to empty list, denoting no arguments. varargs (str): Name of the catch-all variable for positional arguments. E.g., "args" if the resultant function is to have ``*args`` in the signature. Defaults to None. varkw (str): Name of the catch-all variable for keyword arguments. E.g., "kwargs" if the resultant function is to have ``**kwargs`` in the signature. Defaults to None. defaults (tuple): A tuple containing default argument values for those arguments that have defaults. kwonlyargs (list): Argument names which are only valid as keyword arguments. **Python 3 only.** kwonlydefaults (dict): A mapping, same as normal *defaults*, but only for the *kwonlyargs*. **Python 3 only.** annotations (dict): Mapping of type hints and so forth. **Python 3 only.** filename (str): The filename that will appear in tracebacks. Defaults to "boltons.funcutils.FunctionBuilder". indent (int): Number of spaces with which to indent the function *body*. Values less than 1 will result in an error. dict (dict): Any other attributes which should be added to the functions compiled with this FunctionBuilder. All of these arguments are also made available as attributes which can be mutated as necessary. .. _Docstring: https://en.wikipedia.org/wiki/Docstring#Python """ if _IS_PY2: _argspec_defaults = {'args': list, 'varargs': lambda: None, 'varkw': lambda: None, 'defaults': lambda: None} @classmethod def _argspec_to_dict(cls, f): args, varargs, varkw, defaults = inspect.getargspec(f) return {'args': args, 'varargs': varargs, 'varkw': varkw, 'defaults': defaults} else: _argspec_defaults = {'args': list, 'varargs': lambda: None, 'varkw': lambda: None, 'defaults': lambda: None, 'kwonlyargs': list, 'kwonlydefaults': dict, 'annotations': dict} @classmethod def _argspec_to_dict(cls, f): argspec = inspect.getfullargspec(f) return dict((attr, getattr(argspec, attr)) for attr in cls._argspec_defaults) _defaults = {'doc': str, 'dict': dict, 'is_async': lambda: False, 'module': lambda: None, 'body': lambda: 'pass', 'indent': lambda: 4, "annotations": dict, 'filename': lambda: 'boltons.funcutils.FunctionBuilder'} _defaults.update(_argspec_defaults) _compile_count = itertools.count() def __init__(self, name, **kw): self.name = name for a, default_factory in self._defaults.items(): val = kw.pop(a, None) if val is None: val = default_factory() setattr(self, a, val) if kw: raise TypeError('unexpected kwargs: %r' % kw.keys()) return # def get_argspec(self): # TODO if _IS_PY2: def get_sig_str(self, with_annotations=True): """Return function signature as a string. with_annotations is ignored on Python 2. On Python 3 signature will omit annotations if it is set to False. """ return inspect_formatargspec(self.args, self.varargs, self.varkw, []) def get_invocation_str(self): return inspect_formatargspec(self.args, self.varargs, self.varkw, [])[1:-1] else: def get_sig_str(self, with_annotations=True): """Return function signature as a string. with_annotations is ignored on Python 2. On Python 3 signature will omit annotations if it is set to False. """ if with_annotations: annotations = self.annotations else: annotations = {} return inspect_formatargspec(self.args, self.varargs, self.varkw, [], self.kwonlyargs, {}, annotations) _KWONLY_MARKER = re.compile(r""" \* # a star \s* # followed by any amount of whitespace , # followed by a comma \s* # followed by any amount of whitespace """, re.VERBOSE) def get_invocation_str(self): kwonly_pairs = None formatters = {} if self.kwonlyargs: kwonly_pairs = dict((arg, arg) for arg in self.kwonlyargs) formatters['formatvalue'] = lambda value: '=' + value sig = inspect_formatargspec(self.args, self.varargs, self.varkw, [], kwonly_pairs, kwonly_pairs, {}, **formatters) sig = self._KWONLY_MARKER.sub('', sig) return sig[1:-1] @classmethod def from_func(cls, func): """Create a new FunctionBuilder instance based on an existing function. The original function will not be stored or modified. """ # TODO: copy_body? gonna need a good signature regex. # TODO: might worry about __closure__? if not callable(func): raise TypeError('expected callable object, not %r' % (func,)) kwargs = {'name': func.__name__, 'doc': func.__doc__, 'module': func.__module__, 'annotations': getattr(func, "__annotations__", {}), 'dict': getattr(func, '__dict__', {})} kwargs.update(cls._argspec_to_dict(func)) if _inspect_iscoroutinefunction(func): kwargs['is_async'] = True return cls(**kwargs) def get_func(self, execdict=None, add_source=True, with_dict=True): """Compile and return a new function based on the current values of the FunctionBuilder. Args: execdict (dict): The dictionary representing the scope in which the compilation should take place. Defaults to an empty dict. add_source (bool): Whether to add the source used to a special ``__source__`` attribute on the resulting function. Defaults to True. with_dict (bool): Add any custom attributes, if applicable. Defaults to True. To see an example of usage, see the implementation of :func:`~boltons.funcutils.wraps`. """ execdict = execdict or {} body = self.body or self._default_body tmpl = 'def {name}{sig_str}:' tmpl += '\n{body}' if self.is_async: tmpl = 'async ' + tmpl body = _indent(self.body, ' ' * self.indent) name = self.name.replace('<', '_').replace('>', '_') # lambdas src = tmpl.format(name=name, sig_str=self.get_sig_str(with_annotations=False), doc=self.doc, body=body) self._compile(src, execdict) func = execdict[name] func.__name__ = self.name func.__doc__ = self.doc func.__defaults__ = self.defaults if not _IS_PY2: func.__kwdefaults__ = self.kwonlydefaults func.__annotations__ = self.annotations if with_dict: func.__dict__.update(self.dict) func.__module__ = self.module # TODO: caller module fallback? if add_source: func.__source__ = src return func def get_defaults_dict(self): """Get a dictionary of function arguments with defaults and the respective values. """ ret = dict(reversed(list(zip(reversed(self.args), reversed(self.defaults or []))))) kwonlydefaults = getattr(self, 'kwonlydefaults', None) if kwonlydefaults: ret.update(kwonlydefaults) return ret def get_arg_names(self, only_required=False): arg_names = tuple(self.args) + tuple(getattr(self, 'kwonlyargs', ())) if only_required: defaults_dict = self.get_defaults_dict() arg_names = tuple([an for an in arg_names if an not in defaults_dict]) return arg_names if _IS_PY2: def add_arg(self, arg_name, default=NO_DEFAULT): "Add an argument with optional *default* (defaults to ``funcutils.NO_DEFAULT``)." if arg_name in self.args: raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name)) self.args.append(arg_name) if default is not NO_DEFAULT: self.defaults = (self.defaults or ()) + (default,) return else: def add_arg(self, arg_name, default=NO_DEFAULT, kwonly=False): """Add an argument with optional *default* (defaults to ``funcutils.NO_DEFAULT``). Pass *kwonly=True* to add a keyword-only argument """ if arg_name in self.args: raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name)) if arg_name in self.kwonlyargs: raise ExistingArgument('arg %r already in func %s kwonly arg list' % (arg_name, self.name)) if not kwonly: self.args.append(arg_name) if default is not NO_DEFAULT: self.defaults = (self.defaults or ()) + (default,) else: self.kwonlyargs.append(arg_name) if default is not NO_DEFAULT: self.kwonlydefaults[arg_name] = default return def remove_arg(self, arg_name): """Remove an argument from this FunctionBuilder's argument list. The resulting function will have one less argument per call to this function. Args: arg_name (str): The name of the argument to remove. Raises a :exc:`ValueError` if the argument is not present. """ args = self.args d_dict = self.get_defaults_dict() try: args.remove(arg_name) except ValueError: try: self.kwonlyargs.remove(arg_name) except (AttributeError, ValueError): # py2, or py3 and missing from both exc = MissingArgument('arg %r not found in %s argument list:' ' %r' % (arg_name, self.name, args)) exc.arg_name = arg_name raise exc else: self.kwonlydefaults.pop(arg_name, None) else: d_dict.pop(arg_name, None) self.defaults = tuple([d_dict[a] for a in args if a in d_dict]) return def _compile(self, src, execdict): filename = ('<%s-%d>' % (self.filename, next(self._compile_count),)) try: code = compile(src, filename, 'single') exec(code, execdict) except Exception: raise return execdict class MissingArgument(ValueError): pass class ExistingArgument(ValueError): pass def _indent(text, margin, newline='\n', key=bool): "based on boltons.strutils.indent" indented_lines = [(margin + line if key(line) else line) for line in text.splitlines()] return newline.join(indented_lines) try: from functools import total_ordering # 2.7+ except ImportError: # python 2.6 def total_ordering(cls): """Class decorator that fills in missing comparators/ordering methods. Backport of :func:`functools.total_ordering` to work with Python 2.6. Code from http://code.activestate.com/recipes/576685/ """ convert = { '__lt__': [ ('__gt__', lambda self, other: not (self < other or self == other)), ('__le__', lambda self, other: self < other or self == other), ('__ge__', lambda self, other: not self < other)], '__le__': [ ('__ge__', lambda self, other: not self <= other or self == other), ('__lt__', lambda self, other: self <= other and not self == other), ('__gt__', lambda self, other: not self <= other)], '__gt__': [ ('__lt__', lambda self, other: not (self > other or self == other)), ('__ge__', lambda self, other: self > other or self == other), ('__le__', lambda self, other: not self > other)], '__ge__': [ ('__le__', lambda self, other: (not self >= other) or self == other), ('__gt__', lambda self, other: self >= other and not self == other), ('__lt__', lambda self, other: not self >= other)] } roots = set(dir(cls)) & set(convert) if not roots: raise ValueError('must define at least one ordering operation:' ' < > <= >=') root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ for opname, opfunc in convert[root]: if opname not in roots: opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls # end funcutils.py
# coding=utf-8 # Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com> # # License: BSD 3 clause import numpy as np from deslib.dcs.base import BaseDCS class APosteriori(BaseDCS): """A Posteriori Dynamic classifier selection. The A Posteriori method uses the probability of correct classification of a given base classifier :math:`c_{i}` for each neighbor :math:`x_{k}` with respect to a single class. Consider a classifier :math:`c_{i}` that assigns a test sample to class :math:`w_{l}`. Then, only the samples belonging to class :math:`w_{l}` are taken into account during the competence level estimates. Base classifiers with a higher probability of correct classification have a higher competence level. Moreover, the method also weights the influence of each neighbor :math:`x_{k}` according to its Euclidean distance to the query sample. The closest neighbors have a higher influence on the competence level estimate. In cases where no sample in the region of competence belongs to the predicted class, :math:`w_{l}`, the competence level estimate of the base classifier is equal to zero. A single classifier is selected only if its competence level is significantly higher than that of the other base classifiers in the pool (higher than a pre-defined threshold). Otherwise, all classifiers in the pool are combined using the majority voting rule. The selection methodology can be modified by modifying the hyper-parameter selection_method. Parameters ---------- pool_classifiers : list of classifiers (Default = None) The generated_pool of classifiers trained for the corresponding classification problem. Each base classifiers should support the method "predict" and "predict_proba". If None, then the pool of classifiers is a bagging classifier. k : int (Default = 7) Number of neighbors used to estimate the competence of the base classifiers. DFP : Boolean (Default = False) Determines if the dynamic frienemy pruning is applied. with_IH : Boolean (Default = False) Whether the hardness level of the region of competence is used to decide between using the DS algorithm or the KNN for classification of a given query sample. safe_k : int (default = None) The size of the indecision region. IH_rate : float (default = 0.3) Hardness threshold. If the hardness level of the competence region is lower than the IH_rate the KNN classifier is used. Otherwise, the DS algorithm is used for classification. selection_method : String (Default = "best") Determines which method is used to select the base classifier after the competences are estimated. diff_thresh : float (Default = 0.1) Threshold to measure the difference between the competence level of the base classifiers for the random and diff selection schemes. If the difference is lower than the threshold, their performance are considered equivalent. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. knn_classifier : {'knn', 'faiss', None} (Default = 'knn') The algorithm used to estimate the region of competence: - 'knn' will use :class:`KNeighborsClassifier` from sklearn :class:`KNNE` available on `deslib.utils.knne` - 'faiss' will use Facebook's Faiss similarity search through the class :class:`FaissKNNClassifier` - None, will use sklearn :class:`KNeighborsClassifier`. knne : bool (Default=False) Whether to use K-Nearest Neighbor Equality (KNNE) for the region of competence estimation. DSEL_perc : float (Default = 0.5) Percentage of the input data used to fit DSEL. Note: This parameter is only used if the pool of classifier is None or unfitted. n_jobs : int, default=-1 The number of parallel jobs to run. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors. Doesn’t affect fit method. References ---------- G. Giacinto and F. Roli, Methods for Dynamic Classifier Selection 10th Int. Conf. on Image Anal. and Proc., Venice, Italy (1999), 659-664. Ko, Albert HR, Robert Sabourin, and Alceu Souza Britto Jr. "From dynamic classifier selection to dynamic ensemble selection." Pattern Recognition 41.5 (2008): 1718-1731. Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. "Dynamic selection of classifiers—a comprehensive review." Pattern Recognition 47.11 (2014): 3665-3680. R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier selection: Recent advances and perspectives,” Information Fusion, vol. 41, pp. 195 – 216, 2018. """ def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False, safe_k=None, IH_rate=0.30, selection_method='diff', diff_thresh=0.1, random_state=None, knn_classifier='knn', knne=False, DSEL_perc=0.5, n_jobs=-1): super(APosteriori, self).__init__(pool_classifiers=pool_classifiers, k=k, DFP=DFP, with_IH=with_IH, safe_k=safe_k, IH_rate=IH_rate, selection_method=selection_method, diff_thresh=diff_thresh, knn_classifier=knn_classifier, random_state=random_state, knne=knne, DSEL_perc=DSEL_perc, n_jobs=n_jobs) def fit(self, X, y): """Prepare the DS model by setting the KNN algorithm and pre-processing the information required to apply the DS method. Parameters ---------- X : array of shape (n_samples, n_features) Data used to fit the model. y : array of shape (n_samples) class labels of each example in X. Returns ------- self """ super(APosteriori, self).fit(X, y) self._check_predict_proba() self.dsel_scores_ = self._predict_proba_base(self.DSEL_data_) return self def estimate_competence(self, competence_region, distances, predictions=None): """Estimate the competence of each base classifier :math:`c_{i}` for the classification of the query sample using the A Posteriori method. The competence level is estimated based on the probability of correct classification of the base classifier :math:`c_{i}`, for each neighbor :math:`x_{k}` belonging to a specific class :math:`w_{l}`. In this case, :math:`w_{l}` is the class predicted by the base classifier :math:`c_{i}`, for the query sample. This method also weights the influence of each training sample according to its Euclidean distance to the query instance. The closest samples have a higher influence in the computation of the competence level. The competence level estimate is represented by the following equation: .. math:: \\delta_{i,j} = \\frac{\\sum_{\\mathbf{x}_{k} \\in \\omega_{l}}P(\\omega_{l} \\mid \\mathbf{x}_{k}, c_{i} )W_{k}} {\\sum_{k = 1}^{K}P(\\omega_{l} \\mid \\mathbf{x}_{k}, c_{i} )W_{k}} where :math:`\\delta_{i,j}` represents the competence level of :math:`c_{i}` for the classification of query. Parameters ---------- competence_region : array of shape (n_samples, n_neighbors) Indices of the k nearest neighbors. distances : array of shape (n_samples, n_neighbors) Distances from the k nearest neighbors to the query. predictions : array of shape (n_samples, n_classifiers) Predictions of the base classifiers for the test examples. Returns ------- competences : array of shape (n_samples, n_classifiers) Competence level estimated for each base classifier and test example. """ # Guarantee that these arrays are view as a 2D array for the case where # a single test sample is passed down. predictions = np.atleast_2d(predictions) distances[distances == 0] = 1e-10 # Normalize the distances dists_normalized = 1.0 / distances # Expanding the dimensions of the predictions and target arrays in # order to compare both. predictions_3d = np.expand_dims(predictions, axis=1) target_3d = self.DSEL_target_[competence_region, np.newaxis] # Create a mask to remove the neighbors belonging to a different class # than the predicted by the base classifier mask = (predictions_3d != target_3d) # Broadcast the distance array to the same shape as the pre-processed # information for future calculations dists_normalized = np.repeat(np.expand_dims(dists_normalized, axis=2), self.n_classifiers_, axis=2) # Multiply the pre-processed correct predictions by the base # classifiers to the distance array scores_target = self.dsel_scores_[competence_region, :, self.DSEL_target_[competence_region]] scores_target_norm = scores_target * dists_normalized # Create masked arrays to remove samples with different label in the # calculations masked_preprocessed = np.ma.MaskedArray(scores_target_norm, mask=mask) masked_dist = np.ma.MaskedArray(dists_normalized, mask=mask) # Consider only the neighbor samples where the predicted label is # equals to the neighbor label competences_masked = np.ma.sum(masked_preprocessed, axis=1) / np.ma.sum(masked_dist, axis=1) # Fill 0 to the masked values in the resulting array (when no neighbors # belongs to the class predicted by the corresponding base classifier) competences = np.ma.filled(competences_masked, 0) return competences