text
stringlengths 2
999k
|
|---|
"""ContainerWidget class.
Represents a container that can be used to group other widgets.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, Tuple, TraitError
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ContainerWidget(DOMWidget):
_view_name = Unicode('ContainerView', sync=True)
# Child widgets in the container.
# Using a tuple here to force reassignment to update the list.
# When a proper notifying-list trait exists, that is what should be used here.
children = Tuple()
_children = Tuple(sync=True)
def __init__(self, **kwargs):
super(ContainerWidget, self).__init__(**kwargs)
self.on_displayed(ContainerWidget._fire_children_displayed)
def _fire_children_displayed(self):
for child in self._children:
child._handle_displayed()
def _children_changed(self, name, old, new):
"""Validate children list.
Makes sure only one instance of any given model can exist in the
children list.
An excellent post on uniqifiers is available at
http://www.peterbe.com/plog/uniqifiers-benchmark
which provides the inspiration for using this implementation. Below
I've implemented the `f5` algorithm using Python comprehensions."""
if new is not None:
seen = {}
def add_item(i):
seen[i.model_id] = True
return i
self._children = [add_item(i) for i in new if not i.model_id in seen]
class PopupWidget(ContainerWidget):
_view_name = Unicode('PopupView', sync=True)
description = Unicode(sync=True)
button_text = Unicode(sync=True)
|
import argparse
# from AnnotatorCore import *
import sys
import csv
import requests
import os.path
import logging
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import date
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('CnaAnnotator')
def main(argv):
if argv.help:
log.info('\n'
'CnaAnnotator.py -i <input CNA file> -o <output CNA file> [-p previous results] [-c <input clinical file>] [-s sample list filter] [-t <default tumor type>] [-u oncokb-base-url] [-b oncokb_api_bear_token] [-z annotate_gain_loss]\n'
' Input CNA file should follow the GISTIC output (https://docs.cbioportal.org/5.1-data-loading/data-loading/file-formats#data-file-1)\n'
' Essential clinical columns:\n'
' SAMPLE_ID: sample ID\n'
' Cancer type will be assigned based on the following priority:\n'
' 1) ONCOTREE_CODE in clinical data file\n'
' 2) ONCOTREE_CODE exist in MAF\n'
' 3) default tumor type (-t)\n'
' We do not annotate Gain and Loss by default, add -z to include the analysis. See https://github.com/oncokb/oncokb-annotator/issues/51 for more information.\n'
' Default OncoKB base url is https://www.oncokb.org')
sys.exit()
if argv.input_file == '' or argv.output_file == '' or argv.oncokb_api_bearer_token == '':
log.info('for help: python CnaAnnotator.py -h')
sys.exit(2)
if argv.sample_ids_filter:
setsampleidsfileterfile(argv.sample_ids_filter)
if argv.oncokb_api_url:
setoncokbbaseurl(argv.oncokb_api_url)
setoncokbapitoken(argv.oncokb_api_bearer_token)
cancertypemap = {}
if argv.input_clinical_file:
readCancerTypes(argv.input_clinical_file, cancertypemap)
log.info('annotating %s ...' % argv.input_file)
processcnagisticdata(argv.input_file, argv.output_file, argv.previous_result_file, argv.default_cancer_type,
cancertypemap, argv.annotate_gain_loss)
log.info('done!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='help', action="store_true", default=False)
parser.add_argument('-i', dest='input_file', default='', type=str)
parser.add_argument('-o', dest='output_file', default='', type=str)
parser.add_argument('-p', dest='previous_result_file', default='', type=str)
parser.add_argument('-c', dest='input_clinical_file', default='', type=str)
parser.add_argument('-s', dest='sample_ids_filter', default='', type=str)
parser.add_argument('-t', dest='default_cancer_type', default='', type=str)
parser.add_argument('-u', dest='oncokb_api_url', default='', type=str)
parser.add_argument('-b', dest='oncokb_api_bearer_token', default='', type=str)
parser.add_argument('-z', dest='annotate_gain_loss', action="store_true", default=False)
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
|
import numpy as np
import scipy
from scipy.spatial.distance import cdist
import lap # 0.4.0
from cython_bbox import bbox_overlaps as bbox_ious
from . import kalman_filter
def merge_matches(m1, m2, shape):
O,P,Q = shape
m1 = np.asarray(m1)
m2 = np.asarray(m2)
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
mask = M1*M2
match = mask.nonzero()
match = list(zip(match[0], match[1]))
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
return match, unmatched_O, unmatched_Q
def linear_assignment(cost_matrix, thresh):
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
matches, unmatched_a, unmatched_b = [], [], []
cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
for ix, mx in enumerate(x):
if mx >= 0:
matches.append([ix, mx])
unmatched_a = np.where(x < 0)[0]
unmatched_b = np.where(y < 0)[0]
matches = np.asarray(matches)
return matches, unmatched_a, unmatched_b
def ious(atlbrs, btlbrs):
"""
Compute cost based on IoU
:type atlbrs: list[tlbr] | np.ndarray
:type atlbrs: list[tlbr] | np.ndarray
:rtype ious np.ndarray
"""
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
return ious
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float),
np.ascontiguousarray(btlbrs, dtype=np.float)
)
return ious
def iou_distance(atracks, btracks):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
def embedding_distance(tracks, detections, metric='cosine'):
"""
:param tracks: list[STrack]
:param detections: list[BaseTrack]
:param metric:
:return: cost_matrix np.ndarray
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
cost_matrix = np.maximum(0.0, cdist(track_features, det_features)) # Nomalized features
return cost_matrix
def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position, metric='maha')
cost_matrix[row, gating_distance > gating_threshold] = np.inf
cost_matrix[row] = lambda_ * cost_matrix[row] + (1-lambda_)* gating_distance
return cost_matrix
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {'tardis.io.tests':['data/*.dat', 'data/*.yml']}
|
import pytest
import itertools
import pandas as pd
import numpy as np
from scTenifoldXct.core import null_test
def generate_fake_df_nn(n_ligand=3000, n_receptors=3000, n_cands=200):
gene_names = [f"GENE{i}" for i in range(max(n_ligand, n_receptors))]
iteration = itertools.product(gene_names, gene_names)
inds, ligands, receptors = [], [], []
for i, j in iteration:
inds.append(f"{i}_{j}")
ligands.append(i)
receptors.append(j)
df = pd.DataFrame({"ligand": ligands,
"receptor": receptors,
"dist": np.random.chisquare(1, (n_ligand * n_receptors,)),
"correspondence": np.random.lognormal(0, 4, size=(n_ligand * n_receptors,))},
index=inds)
return df, np.random.choice(df.index, size=(n_cands,), replace=False)
@pytest.mark.parametrize("df_nn,candidates", [
generate_fake_df_nn(3000, 3000, 200),
generate_fake_df_nn(1000, 1000, 200),
])
@pytest.mark.parametrize("filter_zeros", [True])
def test_null_test(df_nn, candidates, filter_zeros):
null_test(df_nn=df_nn, candidates=candidates, filter_zeros=filter_zeros)
def test_chi2_test(xct_skin):
xct_skin.train_nn(n_steps= 1000, lr = 0.001)
xct_skin.chi2_test(dof=3, pval=0.05, cal_FDR=True, plot_result=True)
|
#Review Seperator
def reviewToList(strDataLocation): #reviewToList(str_DataLocation)
file = open(strDataLocation)
listFile=(file.readlines())
firstReviewItem=0
lastReviewItem=0
listReviews = []
reviewText =""
for item in range(len(listFile)):
if('<review_text>\n'==listFile[item]):
firstReviewItem = item+1
if('</review_text>\n'==listFile[item]):
ReviewItemRange = item - firstReviewItem
for i in range(ReviewItemRange):
reviewText = reviewText + (listFile[firstReviewItem])
firstReviewItem = firstReviewItem + 1
reviewText = reviewText.rstrip('\n')
listReviews.append(reviewText)
reviewText =""
return listReviews
|
"""
Lab 4
"""
import re
from ngrams.ngram_trie import NGramTrie
def tokenize_by_sentence(text: str) -> tuple:
if not isinstance(text, str):
raise ValueError
sents = re.split(r'[.?!]', text)
tokenized_sent = []
for sent in sents:
tokens = re.sub(r'[^a-z \n]', '', sent.lower()).split()
if tokens:
tokenized_sent += tokens + ['<END>']
return tuple(tokenized_sent)
class WordStorage:
def __init__(self):
self.storage = {}
def _put_word(self, word: str):
if not isinstance(word, str) or not word:
raise ValueError
if word not in self.storage:
self.storage[word] = len(self.storage) + 1
return self.storage[word]
def get_id(self, word: str) -> int:
if not isinstance(word, str) or not word:
raise ValueError
if word not in self.storage:
raise KeyError
return self.storage[word]
def get_word(self, word_id: int) -> str:
if not isinstance(word_id, int):
raise ValueError
for key, value in self.storage.items():
if value == word_id:
return key
raise KeyError
def update(self, corpus: tuple):
if not isinstance(corpus, tuple):
raise ValueError
for word in corpus:
self._put_word(word)
def encode_text(storage: WordStorage, text: tuple) -> tuple:
if not isinstance(storage, WordStorage) or not isinstance(text, tuple):
raise ValueError
encoded_text = [storage.get_id(word) for word in text]
return tuple(encoded_text)
class NGramTextGenerator:
def __init__(self, word_storage: WordStorage, n_gram_trie: NGramTrie):
self._word_storage = word_storage
self._n_gram_trie = n_gram_trie
def _generate_next_word(self, context: tuple) -> int:
if not isinstance(context, tuple) or len(context) + 1 != self._n_gram_trie.size:
raise ValueError
top_word = ''
word_freq = 0
for n_gram, n_gram_freq in self._n_gram_trie.n_gram_frequencies.items():
if context == n_gram[:-1] and n_gram_freq > word_freq:
top_word = n_gram[-1]
word_freq = n_gram_freq
if not top_word:
top_word = max(self._n_gram_trie.uni_grams, key=self._n_gram_trie.uni_grams.get)[0]
return top_word
def _generate_sentence(self, context: tuple) -> tuple:
if not isinstance(context, tuple):
raise ValueError
sent = self.sent_is(context)
for _ in range(20):
sent.append(self._generate_next_word(context))
context = tuple(list(context) + sent)[-len(context):]
if sent[-1] == self._word_storage.get_id('<END>'):
return tuple(sent)
sent.append(self._word_storage.get_id('<END>'))
return tuple(sent)
def sent_is(self, context):
if context[-1] == self._word_storage.get_id('<END>'):
sent = []
else:
sent = list(context)
return sent
def generate_text(self, context: tuple, number_of_sentences: int) -> tuple:
if not isinstance(context, tuple) or not isinstance(number_of_sentences, int) \
or isinstance(number_of_sentences, bool):
raise ValueError
text = []
for _ in range(number_of_sentences):
sentence = self._generate_sentence(context)
text.extend(sentence)
context = tuple(text[-len(context):])
return tuple(text)
class LikelihoodBasedTextGenerator(NGramTextGenerator):
def _calculate_maximum_likelihood(self, word: int, context: tuple) -> float:
type_check = [isinstance(word, int),
isinstance(context, tuple)]
if not all(type_check) or word not in self._word_storage.storage.values() or \
len([wrd for wrd in context if wrd in self._word_storage.storage.values()]) != len(context):
raise ValueError
wrd_freq = 0
avrg_freq = 0
length = self._n_gram_trie.size - 1
for n_gram in self._n_gram_trie.n_grams:
if context == n_gram[:length]:
avrg_freq += 1
if word == n_gram[-1]:
wrd_freq += 1
try:
likelihood = wrd_freq / avrg_freq
except ZeroDivisionError:
likelihood = 0.0
return likelihood
def _generate_next_word(self, context: tuple) -> int:
if not isinstance(context, tuple) or \
len([w for w in context if w in self._word_storage.storage.values()]) != len(context):
raise ValueError
next_wrd = 0
word_freq = 0.0
for word in self._word_storage.storage.values():
frequency = self._calculate_maximum_likelihood(word, context)
if frequency > word_freq:
word_freq = frequency
next_wrd = word
next_word = self.if_not_freq(next_wrd, word_freq)
return next_word
def if_not_freq(self, next_wrd, word_freq):
if not word_freq:
next_wrd = max(self._n_gram_trie.uni_grams, key=self._n_gram_trie.uni_grams.get)[0]
return next_wrd
class BackOffGenerator(NGramTextGenerator):
def __init__(self, word_storage: WordStorage, n_gram_trie: NGramTrie, *args):
super().__init__(word_storage, n_gram_trie)
def _generate_next_word(self, context: tuple) -> int:
pass
def decode_text(storage: WordStorage, encoded_text: tuple) -> tuple:
if not isinstance(storage, WordStorage) or not isinstance(encoded_text, tuple) or not encoded_text:
raise ValueError
decoded_text = [[]]
for encoded_word in encoded_text:
decoded_word = storage.get_word(encoded_word)
if decoded_word == '<END>':
decoded_text.append([])
else:
decoded_text[-1].append(decoded_word)
decoded_text = [sentence[0][0].upper() + sentence[0][1:] + ' ' + ' '.join(sentence[1:])
for sentence in decoded_text if sentence]
return tuple(decoded_text)
def save_model(model: NGramTextGenerator, path_to_saved_model: str):
pass
def load_model(path_to_saved_model: str) -> NGramTextGenerator:
pass
|
from django.apps import AppConfig
class ConfConfig(AppConfig):
name = 'conf'
|
# Copyright 2017 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gdb
class HelloGdb(gdb.Command):
def __init__ (self):
super (HelloGdb, self).__init__("hello-gdb", gdb.COMMAND_USER)
def invoke(self, arg, from_tty):
print "Hello GDB from Python!"
HelloGdb()
|
#!/usr/bin/env python
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer = vtk.vtkRenderer()
renWin.AddRenderer(renderer)
src1 = vtk.vtkSphereSource()
src1.SetRadius(5)
src1.SetPhiResolution(20)
src1.SetThetaResolution(20)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(src1.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Load the material. Here, we are loading a material
# defined in the Vtk Library. One can also specify
# a filename to a material description xml.
actor.GetProperty().LoadMaterial("CgTwisted")
# Turn shading on. Otherwise, shaders are not used.
actor.GetProperty().ShadingOn()
# Pass a shader variable need by CgTwisted.
actor.GetProperty().AddShaderVariable("Rate",1.0)
renderer.AddActor(actor)
renWin.Render()
renderer.GetActiveCamera().Azimuth(-50)
renderer.GetActiveCamera().Roll(70)
renWin.Render()
# --- end of script --
|
#! /usr/bin/env python
# Like mkdir, but also make intermediate directories if necessary.
# It is not an error if the given directory already exists (as long
# as it is a directory).
# Errors are not treated specially -- you just get a Python exception.
import sys, os
def main():
for p in sys.argv[1:]:
makedirs(p)
def makedirs(p):
if p and not os.path.isdir(p):
head, tail = os.path.split(p)
makedirs(head)
os.mkdir(p, 0777)
if __name__ == "__main__":
main()
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module holds util functions that are used in more than one test module.
from mobly import records
def validate_test_result(result):
"""Validate basic properties of a test result.
The records in each bucket of the test result should have the corresponding
result enum.
Args:
result: The `records.TestResult` object to validate.
"""
buckets = [
(result.passed, records.TestResultEnums.TEST_RESULT_PASS),
(result.failed, records.TestResultEnums.TEST_RESULT_FAIL),
(result.error, records.TestResultEnums.TEST_RESULT_ERROR),
(result.skipped, records.TestResultEnums.TEST_RESULT_SKIP),
]
for bucket_list, expected_enum in buckets:
for record in bucket_list:
if record.result != expected_enum:
raise AssertionError('Expected result %s, got %s.' %
(expected_enum, record.result))
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""ResNet models for Keras.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.applications.resnet import ResNet101
from tensorflow.python.keras.applications.resnet import ResNet152
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.applications.resnet import decode_predictions
from tensorflow.python.keras.applications.resnet import preprocess_input
del _print_function
|
################################################################
# System's dependencies
################################################################
import os
import sys
import time
import argparse
################################################################
# Local dependencies
################################################################
from org.gesis.lib import io
from org.gesis.lib import graph
from org.gesis.lib import homophily
################################################################
# Constants
################################################################
DATASETS = ['aps','hate','blogs','wikipedia']
################################################################
# Main
################################################################
def run(datapath, dataset, steps, njobs, output):
if dataset not in DATASETS:
raise Exception("dataset " + dataset +" does not exist.")
print(dataset, steps, njobs)
g = graph.get_graph(datapath, dataset)
N, fm, d, plo_M, plo_m, pli_M, pli_m, EMM, EMm, EmM, Emm, hMM, hmm, _N, _d, _mindiff = homophily.get_metadata(g, steps,
njobs=njobs, verbose=True, seed=None)
print("N:{}".format(N))
print("fm:{}".format(fm))
print("d:{}".format(d))
print("plo_M:{}".format(plo_M))
print("plo_m:{}".format(plo_m))
print("pli_M:{}".format(pli_M))
print("pli_m:{}".format(pli_m))
print("EMM:{}".format(EMM))
print("EMm:{}".format(EMm))
print("EmM:{}".format(EmM))
print("Emm:{}".format(Emm))
print("hMM:{}".format(hMM))
print("hmm:{}".format(hmm))
print("_N:{}".format(_N))
print("_d:{}".format(_d))
print("_mindiff:{}".format(_mindiff))
### Storing metadata info into .csv file
t1 = "dataset,N,fm,d,plo_M,plo_m,pli_M,pli_m,EMM,EMm,EmM,Emm,hMM,hmm,_N,_d,_mindiff"
t2 = ",".join([dataset, str(N), str(fm), str(d), str(plo_M), str(plo_m), str(pli_M), str(pli_m),
str(EMM), str(EMm), str(EmM), str(Emm), str(hMM), str(hmm), str(_N), str(_d), str(_mindiff)])
path = os.path.join(output,dataset,"network_metadata.csv")
io.save_text("{}\n{}".format(t1,t2), path)
################################################################
# Main
################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", help=",".join(DATASETS), type=str, required=True)
parser.add_argument("--steps", help="decimals (eg. 0.01, 0.05) to compute homophily", type=float, required=True)
parser.add_argument("--njobs", help="parallel jobs", type=int, default=1)
parser.add_argument("--datapath", help="path/folder where the .gpickle files are.", type=str, required=True)
parser.add_argument("--output", help="path/folder where to store csv file", type=str, default='.')
args = parser.parse_args()
start_time = time.time()
run(args.datapath, args.dataset, args.steps, args.njobs, args.output)
print("--- %s seconds ---" % (time.time() - start_time))
|
import json
import sys
from . import app
from . import bdev
from . import iscsi
from . import log
from . import lvol
from . import nbd
from . import net
from . import nvmf
from . import pmem
from . import subsystem
from . import vhost
def start_subsystem_init(client):
return client.call('start_subsystem_init')
def get_rpc_methods(client, args):
params = {}
if args.current:
params['current'] = args.current
return client.call('get_rpc_methods', params)
def save_config(client, args):
config = {
'subsystems': []
}
for elem in client.call('get_subsystems'):
cfg = {
'subsystem': elem['subsystem'],
'config': client.call('get_subsystem_config', {"name": elem['subsystem']})
}
config['subsystems'].append(cfg)
indent = args.indent
if args.filename is None:
if indent is None:
indent = 2
elif indent < 0:
indent = None
json.dump(config, sys.stdout, indent=indent)
sys.stdout.write('\n')
else:
if indent is None or indent < 0:
indent = None
with open(args.filename, 'w') as file:
json.dump(config, file, indent=indent)
file.write('\n')
def load_config(client, args):
if not args.filename or args.filename == '-':
json_config = json.load(sys.stdin)
else:
with open(args.filename, 'r') as file:
json_config = json.load(file)
subsystems = json_config['subsystems']
while subsystems:
allowed_methods = client.call('get_rpc_methods', {'current': True})
allowed_found = False
for subsystem in list(subsystems):
if not subsystem['config']:
subsystems.remove(subsystem)
continue
config = subsystem['config']
for elem in list(config):
if not elem or 'method' not in elem or elem['method'] not in allowed_methods:
continue
client.call(elem['method'], elem['params'])
config.remove(elem)
allowed_found = True
if not config:
subsystems.remove(subsystem)
if 'start_subsystem_init' in allowed_methods:
client.call('start_subsystem_init')
allowed_found = True
if subsystems and not allowed_found:
raise JSONRPCException("Some config left but did not found any allowed method to execute")
|
import numpy as np
import pandas as pd
import pickle
import tensorflow as tf
import sklearn.metrics
import matplotlib.pyplot as plt
# Load the training and test data from the Pickle file
with open("../datasets/credit_card_default_dataset.pickle", "rb") as f:
train_data, train_labels, test_data, test_labels = pickle.load(f)
# Get some lengths
n_inputs = train_data.shape[1]
nsamples = train_data.shape[0]
# Training constants
n_nodes_l1 = 5
batch_size = 32
learning_rate = .001 # Initial rate for Adam
n_epochs = 1000
eval_step = 5
n_batches = int(np.ceil(nsamples / batch_size))
# Print the configuration
print("Batch size: {} Num batches: {} Num epochs: {} Learning rate: {}".format(batch_size, n_batches, n_epochs, learning_rate))
print("Num nodes in L1: {} Activation function: ELU".format(n_nodes_l1))
# TensorFlow constants
# Input vector placeholders. Length is unspecified.
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
Y = tf.placeholder(tf.float32, shape=(None, 1), name="Y")
# Hidden layer 1:
# Inputs: n_inputs
# Outputs: n_nodes_l1
# Activation: ELU
W_L1 = tf.Variable(tf.truncated_normal([n_inputs, n_nodes_l1], stddev=2/np.sqrt(n_inputs)))
b_L1 = tf.Variable(tf.zeros(n_nodes_l1))
Y_L1 = tf.nn.elu(tf.add(tf.matmul(X, W_L1), b_L1))
#Y_L1 = tf.nn.relu(tf.add(tf.matmul(X, W_L1), b_L1))
# Output layer:
# Inputs: n_nodes_l1
# Outputs: 1
# Activation: logistic
W_L2 = tf.Variable(tf.truncated_normal([n_nodes_l1, 1], stddev=1/np.sqrt(n_nodes_l1)))
b_L2 = tf.Variable(tf.zeros(1))
Y_L2_linear = tf.add(tf.matmul(Y_L1, W_L2), b_L2)
# Cost function, plus the sigmoid part of the prediction
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = Y_L2_linear, labels = Y))
# Optimize cost through gradient descent
#optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
update_op = optimizer.minimize(cost)
# Prediction probability values
Y_pred_proba_calc = tf.nn.sigmoid(Y_L2_linear)
# Create TensorFlow session and initialize it
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# Initialize lists to hold the history of metrics per epoch
trn_cost_hist = []
test_cost_hist = []
trn_auroc_hist = []
test_auroc_hist = []
epoch = 0
while epoch < n_epochs:
batch = 0
# Save a vector of cost values per batch
cost_vals = np.zeros(n_batches)
while batch < n_batches:
# Select the data for the next batch
dataidx = batch * batch_size
X_batch = train_data[dataidx:(dataidx+batch_size)]
Y_batch = train_labels[dataidx:(dataidx+batch_size)].values.reshape(-1,1)
feed_dict = {X: X_batch, Y: Y_batch}
# Run one iteration of the computation session to update coefficients
_, cost_vals[batch] = sess.run([update_op, cost], feed_dict=feed_dict)
batch += 1
# Evaluate and print the results so far
if (epoch % eval_step == 0):
# Compute the average cost for all mini-batches in this epoch
trn_cost_avg = np.mean(cost_vals)
# Compute the ROC AUC against the full training data
feed_dict = {X: train_data, Y: train_labels.values.reshape(-1,1)}
Y_pred_proba_train = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)
train_auroc = sklearn.metrics.roc_auc_score(train_labels, Y_pred_proba_train)
# Compute the cost and ROC AUC against the test data
feed_dict = {X: test_data, Y: test_labels.values.reshape(-1,1)}
Y_pred_proba_test = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)
test_cost = sess.run(cost, feed_dict=feed_dict)
test_auroc = sklearn.metrics.roc_auc_score(test_labels, Y_pred_proba_test)
print("Epoch: {:4d} trn_cost: {:.5f} test_cost: {:.5f} trn_auroc: {:.4f} test_auroc: {:.4f}".\
format(epoch, trn_cost_avg, test_cost, train_auroc, test_auroc))
# Save the metrics to the history
trn_cost_hist.append(trn_cost_avg)
test_cost_hist.append(test_cost)
trn_auroc_hist.append(train_auroc)
test_auroc_hist.append(test_auroc)
epoch += 1
# Print the best results (as if we had done early stopping)
epoch_hist = [i for i in range(0, n_epochs, eval_step)]
best_idx = test_auroc_hist.index(max(test_auroc_hist))
print("Max test ROC AUC: {:.4f} at epoch: {}".format(test_auroc_hist[best_idx], epoch_hist[best_idx]))
best_idx = trn_auroc_hist.index(max(trn_auroc_hist))
print("Max train ROC AUC: {:.4f} at epoch: {}".format(trn_auroc_hist[best_idx], epoch_hist[best_idx]))
best_idx = test_cost_hist.index(min(test_cost_hist))
print("Min test cost: {:.5f} at epoch: {}".format(test_cost_hist[best_idx], epoch_hist[best_idx]))
best_idx = trn_cost_hist.index(min(trn_cost_hist))
print("Min train cost: {:.5f} at epoch: {}".format(trn_cost_hist[best_idx], epoch_hist[best_idx]))
# Plot the metrics history
plt.plot(epoch_hist, trn_cost_hist, "b")
plt.plot(epoch_hist, test_cost_hist, "r")
plt.xlabel("epoch")
plt.ylabel("cost")
plt.title("Cost vs. epoch")
plt.figure()
plt.plot(epoch_hist, trn_auroc_hist, "b")
plt.plot(epoch_hist, test_auroc_hist, "r")
plt.xlabel("epoch")
plt.ylabel("ROC AUC")
plt.title("ROC AUC vs. epoch")
plt.show()
|
# flake8: noqa
from user.views.user_views import *
from user.views.gatekeeper_view import GatekeeperViewSet
from user.views.organization_view import OrganizationViewSet
|
import pytest
from dagster import DagsterInvalidConfigDefinitionError, Noneable, Selector, execute_solid, solid
def test_kitchen_sink():
@solid(
config_schema={
'str_field': str,
'int_field': int,
'list_int': [int],
'list_list_int': [[int]],
'dict_field': {'a_string': str},
'list_dict_field': [{'an_int': int}],
'selector_of_things': Selector(
{'select_list_dict_field': [{'an_int': int}], 'select_int': int}
),
# this is a good argument to use () instead of [] for type parameterization in
# the config system
'optional_list_of_optional_string': Noneable([Noneable(str)]),
}
)
def kitchen_sink(context):
return context.solid_config
solid_config_one = {
'str_field': 'kjf',
'int_field': 2,
'list_int': [3],
'list_list_int': [[1], [2, 3]],
'dict_field': {'a_string': 'kdjfkd'},
'list_dict_field': [{'an_int': 2}, {'an_int': 4}],
'selector_of_things': {'select_int': 3},
'optional_list_of_optional_string': ['foo', None],
}
assert (
execute_solid(
kitchen_sink, run_config={'solids': {'kitchen_sink': {'config': solid_config_one}}},
).output_value()
== solid_config_one
)
solid_config_two = {
'str_field': 'kjf',
'int_field': 2,
'list_int': [3],
'list_list_int': [[1], [2, 3]],
'dict_field': {'a_string': 'kdjfkd'},
'list_dict_field': [{'an_int': 2}, {'an_int': 4}],
'selector_of_things': {'select_list_dict_field': [{'an_int': 5}]},
'optional_list_of_optional_string': None,
}
assert (
execute_solid(
kitchen_sink, run_config={'solids': {'kitchen_sink': {'config': solid_config_two}}},
).output_value()
== solid_config_two
)
def test_bad_solid_config_argument():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config='dkjfkd')
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: 'dkjfkd'. 'dkjfkd' cannot be resolved."
)
def test_bad_solid_config_argument_nested():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'field': 'kdjkfjd'})
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'field': 'kdjkfjd'}. "
"Error at stack path :field. 'kdjkfjd' cannot be resolved."
)
def test_bad_solid_config_argument_list_wrong_length():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_list': []})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': []}. "
"Error at stack path :bad_list. [] cannot be resolved. "
"Reason: List must be of length 1."
)
def test_bad_solid_config_argument_list_bad_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_list': ['kdjfkd']})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': ['kdjfkd']}. "
"Error at stack path :bad_list. ['kdjfkd'] cannot be resolved. "
"Reason: List have a single item and contain a valid type i.e. [int]. "
"Got item 'kdjfkd'."
)
def test_bad_solid_config_argument_list_bad_nested_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config={'bad_nested_list': [{'bad_field': 'kjdkfd'}]})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_nested_list': "
"[{'bad_field': 'kjdkfd'}]}. Error at stack path "
":bad_nested_list:bad_field. 'kjdkfd' cannot be resolved."
)
|
"""
1、case顺序:加-除-减-乘
2、fixture方法在case前打印【开始计算】,结束后打印【计算结束】
3、fixture方法存在在conftest.py,设置scope=module
4、控制case只执行顺序为:加-减-乘-除
5、结合allure生成本地测试报告
"""
import allure
import pytest
import yaml
from test_Calculator.src.calculator import Calculator
def get_data():
with open('./data.yml') as data_x:
data = yaml.safe_load(data_x)
data_data = data['datas']
data_name = data['ids']
return [data_data, data_name]
data = get_data()
get_cal = Calculator()
@pytest.mark.feature("测试方法")
class Test_Calculator:
@pytest.mark.story('加法测试')
@pytest.mark.run(order=0)
@pytest.mark.usefixtures("prints")
@pytest.mark.parametrize("a, b, result", data[0]['data_add'], ids=data[1]['ids_add'])
def test_add(self, a, b, result):
assert get_cal.add(a, b) == result
@pytest.mark.story('除法测试')
@pytest.mark.run(order=3)
@pytest.mark.parametrize("a, b, result", data[0]['data_div'], ids=data[1]['ids_div'])
def test_div(self, a, b, result):
assert get_cal.div(a, b) == result
@pytest.mark.story('减法测试')
@pytest.mark.run(order=1)
@pytest.mark.parametrize("a, b, result", data[0]['data_sub'], ids=data[1]['ids_sub'])
def test_sub(self, a, b, result):
assert get_cal.sub(a, b) == result
@pytest.mark.story('乘法测试')
@pytest.mark.run(order=2)
@pytest.mark.parametrize("a, b, result", data[0]['data_mul'], ids=data[1]['ids_mul'])
def test_mul(self, a, b, result):
assert get_cal.mul(a, b) == result
if __name__ == '__main__':
pytest.main('test_cal_plus.py', '-vs')
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Define the siamese network for one-shot learning,
for french short labels
02/06/2021
@author: milena-git, from jeremylhour courtesy
"""
import torch
import torch.nn as nn
def _createEmbeddingLayer(weights_matrix, non_trainable=False):
"""
_createEmbeddingLayer:
create a layer from pre-trained embeddings
@param weights_matrix (np.array):
@param non_trainable (bool):
"""
weights_matrix = torch.tensor(weights_matrix)
num_embeddings, embedding_dim = weights_matrix.size()
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({'weight': weights_matrix})
if non_trainable:
emb_layer.weight.requires_grad = False
return emb_layer, num_embeddings, embedding_dim
class SiamesePreTrainedQuadruplet(nn.Module):
def __init__(self, weights_matrix, length, dim=100):
"""
Initialize the siamese network with pre-trained embeddings
@param weights_matrix (torch.tensor):
@param length (int): longueur des inputs
@param dim (int): dimension of the output embedding space
"""
super(SiamesePreTrainedQuadruplet, self).__init__()
self.dim = dim
self.length = length
self.embedding = nn.Embedding.from_pretrained(weights_matrix, padding_idx=0)
self.fc1 = nn.Sequential(
nn.Linear(self.length * weights_matrix.size()[1], 1000),
nn.ReLU(inplace=True),
nn.Linear(1000, 800),
nn.Dropout(0.2),
nn.Linear(800, 500),
nn.Dropout(0.2),
nn.Linear(500, self.dim)
)
def forward_once(self, x):
"""
Run one of the network on a single image
@param x (): img output from SiameseNetworkDataset
"""
embedded = self.embedding(x)
embedded = torch.reshape(embedded, (embedded.size()[0], embedded.size()[1] * embedded.size()[2]))
output = self.fc1(embedded)
return output
def forward(self, anchor, positive, negative1, negative2):
"""
Run the model forward, by applying forward_once to each inputs
Main forward that is used during train, wraps forward_once().
@param anchor, positive, negative1, negative2 (): output from SiameseNetworkDataset
"""
anchor_o, positive_o, negative1_o, negative2_o = self.forward_once(anchor), self.forward_once(
positive), self.forward_once(negative1), self.forward_once(negative2)
return anchor_o, positive_o, negative1_o, negative2_o
if __name__ == '__main__':
pass
|
# Copyright 2021 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
from fastestimator.op.numpyop.univariate import AutoContrast
class TestAutoContrast(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.single_input = [np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)]
cls.single_output_shape = (28, 28, 3)
cls.multi_input = [
np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8),
np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)
]
cls.multi_output_shape = (28, 28, 3)
def test_single_input(self):
autocontrast = AutoContrast(inputs='x', outputs='x')
output = autocontrast.forward(data=self.single_input, state={})
with self.subTest('Check output type'):
self.assertEqual(type(output), list)
with self.subTest('Check output image shape'):
self.assertEqual(output[0].shape, self.single_output_shape)
def test_multi_input(self):
autocontrast = AutoContrast(inputs='x', outputs='x')
output = autocontrast.forward(data=self.multi_input, state={})
with self.subTest('Check output type'):
self.assertEqual(type(output), list)
with self.subTest('Check output list length'):
self.assertEqual(len(output), 2)
for img_output in output:
with self.subTest('Check output image shape'):
self.assertEqual(img_output.shape, self.multi_output_shape)
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import face_pb2 as face__pb2
class FaceServiceStub(object):
"""faceRecognition.FaceService 人脸服务
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Compare = channel.unary_unary(
'/faceRecognition.FaceService/Compare',
request_serializer=face__pb2.CompareRequest.SerializeToString,
response_deserializer=face__pb2.CompareResponse.FromString,
)
self.Search = channel.unary_unary(
'/faceRecognition.FaceService/Search',
request_serializer=face__pb2.SearchRequest.SerializeToString,
response_deserializer=face__pb2.SearchResponse.FromString,
)
class FaceServiceServicer(object):
"""faceRecognition.FaceService 人脸服务
"""
def Compare(self, request, context):
"""Compare 实现两张人脸图片对比识别,返回两张人脸图片对比的可信度
开发管理平台功能参考: http://10.10.10.2/face/compare
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Search(self, request, context):
"""Search 从FaceSet中搜索近似人脸数据
若存在匹配数据时返回一个FaceDetail及可信度
开发管理平台功能参考: http://10.10.10.2/face/compare
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FaceServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Compare': grpc.unary_unary_rpc_method_handler(
servicer.Compare,
request_deserializer=face__pb2.CompareRequest.FromString,
response_serializer=face__pb2.CompareResponse.SerializeToString,
),
'Search': grpc.unary_unary_rpc_method_handler(
servicer.Search,
request_deserializer=face__pb2.SearchRequest.FromString,
response_serializer=face__pb2.SearchResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'faceRecognition.FaceService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class FaceService(object):
"""faceRecognition.FaceService 人脸服务
"""
@staticmethod
def Compare(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/faceRecognition.FaceService/Compare',
face__pb2.CompareRequest.SerializeToString,
face__pb2.CompareResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Search(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/faceRecognition.FaceService/Search',
face__pb2.SearchRequest.SerializeToString,
face__pb2.SearchResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
|
"""
CardIO is a library that works with electrocardiograms.
Documentation - https://analysiscenter.github.io/cardio/
"""
from setuptools import setup, find_packages
import re
with open('cardio/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
with open('docs/index.rst', 'r') as f:
long_description = f.read()
setup(
name='cardio',
packages=find_packages(exclude=['tutorials', 'examples', 'docs']),
version=version,
url='https://github.com/analysiscenter/cardio',
license='Apache License 2.0',
author='Data Analysis Center team',
author_email='cardio@analysiscenter.ru',
description='A framework for deep research of electrocardiograms',
long_description=long_description,
zip_safe=False,
platforms='any',
install_requires=[
'numpy>=1.13.1',
'scipy>=0.19.1',
'pandas>=0.21.1',
'scikit-learn==0.19.1',
'numba>=0.35.0',
'pywavelets>=0.5.2',
'matplotlib>=2.1.0',
'dill>=0.2.7.1',
'pydicom>=0.9.9',
'pyedflib>=0.1.11',
'wfdb==2.2.1',
'pint>=0.8.1',
],
extras_require={
'tensorflow': ['tensorflow>=1.4'],
'tensorflow-gpu': ['tensorflow-gpu>=1.4'],
'keras': ['keras>=2.0.0'],
'hmmlearn': ['hmmlearn==0.2.0']
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'
],
)
|
#!/usr/bin/env python3.8
from account import Account
from credential import Credential
from termcolor import colored, cprint
import os
import time
import pickle
# Functions that implement the behaviours in account class.
def create_account(username, fname, lname, p_word):
'''
Function to create new account
'''
new_account = Account(username, fname, lname, p_word)
return new_account
def save_account(account):
'''
Function to save account
'''
account.save_account()
def delete_account(account):
'''
Function to delete an account
'''
account.delete_account()
def check_account_exists(username):
'''
Function that check if an account with that username already exists and return a Boolean
'''
return Account.account_exists(username)
def auth_user(username, password):
'''
Function to authenicate user during login
'''
return Account.auth_user(username, password)
# Functions that implement the behaviours in credential class.
def create_credential(page, username, password):
'''
Function to create credentials
'''
new_credential = Credential(page, username, password)
return new_credential
def save_credential(credential):
'''
Function to save credential
'''
credential.save_credential()
def delete_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential()
def find_cred_by_pagename(pagename):
"""
Function that finds a credential by pagename and returns the credentials
"""
return Credential.find_by_pagename(pagename)
def copy_cred_pass(pagename):
'''
Function to copy credential password
'''
return Credential.copy_cred_password(pagename)
def check_credential_exists(pagename):
'''
Function that check if a credential exists with that pagename and return a Boolean
'''
return Credential.credential_exists(pagename)
def display_credentials():
'''
Function that returns all the saved credentials
'''
return Credential.display_credentials()
def generate_password(length):
'''
Function that generte a random password
'''
return Credential.generate_password(length)
def main():
login = False # Set initial login value to false
sign_name = '' # Name of user currently logged in
logged = True
def load_pickles():
try:
file_object = open('accounts.pydata', 'rb')
Account.accounts_list = pickle.load(file_object)
file_object.close()
print("\nLOADED PICKLES ACCOUNTS")
except:
print("\nCLDN'T LOAD PICKLES ACCOUNTS")
Account.accounts_list = []
try:
file_objectt = open('credentials.pydata', 'rb')
Credential.credentials_list = pickle.load(file_objectt)
file_object.close()
print("\nLOADED PICKLES CREDENTIALS")
except:
print("\nCLDN'T LOAD PICKLES CREDENTIALS")
Credential.credentials_list = []
def pickle_save():
try:
file_object = open('accounts.pydata', 'wb')
pickle.dump(Account.accounts_list, file_object)
file_object.close()
print("\nSAVED ACCOUNTS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T ACCOUNTS SAVE TO PICKLES.")
try:
file_objectt = open('credentials.pydata', 'wb')
pickle.dump(display_credentials(), file_objectt)
file_objectt.close()
print("\nSAVED CREDENTIALS TO PICKLE")
except Exception as e:
print(e)
print("\nCOULDN'T CREDENTIALS SAVE TO PICKLES.")
def display_title():
os.system('clear')
'''
Function to display app title bar
'''
cprint("""
\n\t\t\t\t**********************************************
\t\t**************************************************************************
\t*******************************************************************************************
\n
\t\t\t\t
\t\t\t\t
\t\t\t\t |\ /|
\t\t\t\t | \ / |
\t\t\t\t | \/ |
\n\t\t\t\t*** WELCOME TO PASSWORD LOCKER ***
\n`\t\t\t******************************************************************
""", "magenta")
while logged:
display_title()
load_pickles()
while login == False:
cprint("""
Use the following short codes to manage your password locker account
'ln' - Login
'xx' - Close app
""", "blue")
s_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if s_code == 'ln':
acc_code = input(
colored('\tDo you have an account? Y/N >> ', 'cyan')).upper()
if acc_code == 'Y':
cprint(
'\tEnter your username and password to login >>>\n', 'pink')
login_user_name = input(
colored('\tEnter username >> ', 'cyan'))
login_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in...")
time.sleep(1.5)
if auth_user(login_user_name, login_password):
cprint('\n\t\tLOGIN SUCCESSFUL',
'green', attrs=['bold'])
sign_name = login_user_name
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY',
'red', attrs=['bold'])
elif acc_code == 'N':
cprint(
'\tEnter your username,firstname,lastname and password to register account >>>\n', 'blue')
reg_user_name = input(
colored('\tEnter username >> ', 'cyan'))
reg_f_name = input(
colored('\tEnter firstname >> ', 'cyan'))
reg_l_name = input(colored('\tEnter lastname >> ', 'cyan'))
reg_password = input(
colored('\tEnter password >> ', 'cyan'))
print("\n\t\tRegistering ...")
time.sleep(1.5)
if check_account_exists(reg_user_name):
cprint(
f"\n\t\tACCOUNT WITH, {reg_user_name.upper()} USERNAME ALREADY CREATED", "red", attrs=['bold'])
else:
new_acc = create_account(
reg_user_name, reg_f_name, reg_l_name, reg_password)
save_account(new_acc)
cprint(
"\n\t\tCONGRATULATIONS, YOUR ACCOUNT HAS BEEN CREATED", "green", attrs=['bold'])
cprint("\n\tSign into your new account", "blue")
sign_username = input(
colored('\n\tEnter username >> ', 'cyan'))
sign_password = input(
colored('\n\tEnter password >> ', 'cyan'))
print("\n\t\tSigning in ...")
time.sleep(1.5)
if auth_user(sign_username, sign_password):
cprint("\n\t\tLOGIN SUCCESSFUL",
"green", attrs=['bold'])
sign_name = sign_username
login = True
else:
cprint('\n\t\tSORRY COULD NOT VERIFY USER',
'red', attrs=['bold'])
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
elif s_code == 'xx':
cprint(f"""\n\t\tTHANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
logged = False
break
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
while login == True:
time.sleep(1.5)
cprint(f"""
{sign_name.upper()}, WELCOME TO YOUR PASSWORD LOCKER:
Use the following commands to navigate the application:
'sc' >> Save existing page credentials
'cc' >> Create new page credentials
'dc' >> Display all credentials saved
'fc' >> Find credential saved by page name
'cp' >> Copy pagename credential password to clipboard
'dl' >> Delete page credential
'lgo' >> Log out
'ex' >> Close App
""", "blue")
app_code = input(
colored('\tWhat would you like to do? >> ', 'cyan')).lower()
if app_code == 'sc':
cprint(
'\tEnter pagename,username and password to save credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tSaving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'cc':
cprint(
'\tEnter pagename,username and password to create and save new page credentials >>>\n', 'blue')
page_name = input(
colored('\n\tEnter pagename >> ', 'cyan')).lower()
user_name = input(
colored('\n\tEnter page username >> ', 'cyan'))
gen_pass_code = input(colored(
'\tWould you like to generate a random password? Y/N >> ', 'cyan')).upper()
pass_word = ''
if gen_pass_code == 'Y':
pass_len = int(input(colored(
'\tHow long would you like your password? Provide numbers only >> ', 'cyan')))
pass_word = generate_password(pass_len)
else:
pass_word = input(
colored('\n\tEnter page password >> ', 'cyan'))
print("\n\t\tCreating and Saving credentials ...")
time.sleep(1.5)
if check_credential_exists(page_name):
cprint('\n\t\tCREDENTIALS FOR '+page_name.upper() +
' ALREADY EXISTS', 'red', attrs=['bold'])
else:
new_credential = create_credential(
page_name, user_name, pass_word)
save_credential(new_credential)
cprint("\n\t\t"+page_name.upper() +
", CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'dc':
if len(display_credentials()) > 0:
cprint("\n\t\t"+sign_name.upper() +
", CREDENTIALS", "green", attrs=['bold'])
for credential in display_credentials():
cprint(f'''
-------------------------------------------------------
Page Name >>>> {credential.page_name.upper()}
Page Username >>>> {credential.user_name}
Page Password >>>> {credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint("\n\t\t"+sign_name.upper() +
",HAS NO CREDENTIALS SAVED", "green", attrs=['bold'])
elif app_code == 'fc':
search_page = input(
colored('\n\tEnter page name to search credentials >> ', 'cyan')).lower()
print("\n\t\tLoading ...")
time.sleep(1.5)
if check_credential_exists(search_page):
found_credential = find_cred_by_pagename(search_page)
cprint(f'''
-------------------------------------------------------
Page Name >>>> {found_credential.page_name.upper()}
Page Username >>>> {found_credential.user_name}
Page Password >>>> {found_credential.pass_word}
-------------------------------------------------------
''', 'green')
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'cp':
search_page = input(colored(
'\n\tEnter page name to copy password to clipboard >> ', 'cyan')).lower()
print("\n\t\tSearching ...")
time.sleep(1.5)
if check_credential_exists(search_page):
copy_cred_pass(search_page)
cprint("\n\t\t"+search_page.upper() +
", PASSWORD COPIED TO CLIPBOARD", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{search_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'dl':
del_page = input(
colored('\n\tEnter page name you want to delete >> ', 'cyan')).lower()
print("\n\t\tDeleting ...")
time.sleep(1.5)
if check_credential_exists(del_page):
found_page = find_cred_by_pagename(del_page)
found_page.delete_credential()
cprint("\n\t\t"+del_page.upper() +
", CREDENTIALS DELETED", "green", attrs=['bold'])
else:
cprint(
f'\n\t\t{del_page.upper()} DOES NOT EXISTS', 'red', attrs=['bold'])
elif app_code == 'lgo':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tLogin out >>>>>
""", "green", attrs=['bold'])
time.sleep(1.5)
login = False
elif app_code == 'ex':
cprint(f"""\n\t\t{sign_name.upper()}, THANK YOU FOR USING PASSWORD LOCKER
\t\tBye...
\t\t\t\t\tClosing App >>>>>
""", "red", attrs=['bold'])
pickle_save()
time.sleep(1.5)
login = False
logged = False
else:
cprint('\n\t\tPLEASE USE THE GIVEN SHORT CODES',
'red', attrs=['bold'])
if __name__ == '__main__':
main()
|
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
template = """
# Verify the latest-ci version from the {{branch}} branch of kops
# Runs a small subset of the e2e tests.
# Publishes the version to latest-ci-updown-green on success.
- interval: 60m
name: {{name}}
decorate: true
decoration_config:
timeout: 45m
labels:
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
spec:
containers:
- image: {{e2e_image}}
command:
- runner.sh
- kubetest
args:
# Generic e2e test args
- --up
- --test
- --down
- --dump=$(ARTIFACTS)
- --timeout=45m
- --gcp-service-account=$(E2E_GOOGLE_APPLICATION_CREDENTIALS)
# kops-specific test args
- --deployment=kops
- --provider=aws
- --cluster={{name}}.test-cncf-aws.k8s.io
- --kops-ssh-user={{ssh_user}}
- --kops-nodes=4
- --extract={{extract}}
- --kops-state=s3://k8s-kops-prow/
- --kops-ssh-key=$(AWS_SSH_PRIVATE_KEY_FILE)
- --kops-ssh-public-key=$(AWS_SSH_PUBLIC_KEY_FILE)
- --kops-publish=gs://k8s-staging-kops/kops/releases/markers/{{branch}}/latest-ci-updown-green.txt
- --kops-version=https://storage.googleapis.com/k8s-staging-kops/kops/releases/markers/{{branch}}/latest-ci.txt
#- --kops-kubernetes-version should be inferred by kubetest from --extract
#- --kops-zone should be randomized by kubetest
# Specific test args
- --test_args=--ginkgo.focus=\\[k8s.io\\]\\sNetworking.*\\[Conformance\\] --ginkgo.skip=\\[Slow\\]|\\[Serial\\]
- --ginkgo-parallel
annotations:
testgrid-dashboards: sig-cluster-lifecycle-kops, google-aws, kops-misc, kops-k8s-{{k8s_version}}
testgrid-tab-name: {{tab}}
"""
def build_tests(branch, k8s_version, ssh_user):
def expand(s):
subs = {}
if k8s_version:
subs['k8s_version'] = k8s_version
if branch:
subs['branch'] = branch
return s.format(**subs)
if branch == 'master':
extract = "release/latest-1.19"
e2e_image = "gcr.io/k8s-testimages/kubekins-e2e:v20200713-e9b3d9d-1.19"
else:
extract = expand("release/stable-{k8s_version}")
# Hack to stop the autobumper getting confused
e2e_image = "gcr.io/k8s-testimages/kubekins-e2e:v20200713-e9b3d9d-1.18"
e2e_image = e2e_image[:-4] + k8s_version
tab = expand('kops-pipeline-updown-{branch}')
# Names must be valid pod and DNS names
name = expand('e2e-kops-pipeline-updown-kops{branch}')
name = name.replace('.', '')
y = template
y = y.replace('{{extract}}', extract)
y = y.replace('{{e2e_image}}', e2e_image)
y = y.replace('{{k8s_version}}', k8s_version)
y = y.replace('{{name}}', name)
y = y.replace('{{ssh_user}}', ssh_user)
y = y.replace('{{tab}}', tab)
if branch == 'master':
y = y.replace('{{branch}}', "master")
else:
y = y.replace('{{branch}}', "release-" + branch)
spec = {
'branch': branch,
'k8s_version': k8s_version,
}
jsonspec = json.dumps(spec, sort_keys=True)
print("")
print("# " + jsonspec)
print(y.strip())
branches = [
"master",
"1.16",
"1.17",
"1.18",
]
def generate():
print("# Test scenarios generated by build-pipeline.py (do not manually edit)")
print("periodics:")
for branch in branches:
k8s_version = "1.19" if branch == "master" else branch
ssh_user = "admin" if branch in ("1.16", "1.17") else "ubuntu"
build_tests(branch=branch, k8s_version=k8s_version, ssh_user=ssh_user)
generate()
|
from rest_framework import permissions
class IsAuthenticated(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.user
and request.user.is_authenticated
and request.user.is_email_verified
)
class IsAuthenticatedOrReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS
or request.user
and request.user.is_authenticated
and request.user.is_email_verified
)
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.is_owner(request.user)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#~ The MIT License (MIT)
#~ Copyright 2018 ©klo86min
#~ Permission is hereby granted, free of charge, to any person obtaining a copy
#~ of this software and associated documentation files (the "Software"), to deal
#~ in the Software without restriction, including without limitation the rights
#~ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#~ copies of the Software, and to permit persons to whom the Software is
#~ furnished to do so, subject to the following conditions:
#~ The above copyright notice and this permission notice shall be included in
#~ all copies or substantial portions of the Software.
#~ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#~ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#~ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#~ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#~ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#~ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#~ SOFTWARE.
import argparse
import csv
import cv2
import mvnc.mvncapi as mvnc
import numpy as np
import os.path
# image settings
IMAGE_DIM = 299
###############################################################################
#
# Modified code from https://github.com/ashwinvijayakumar/ncappzoo/apps/
# rapid-image-classifier/rapid-image-classifier.py
# also under the MIT License
#
###############################################################################
# ---- Step 1: Open the enumerated device and get a handle to it -------------
def open_ncs_device(verbose=False):
if verbose:
mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)
# Look for enumerated NCS device(s); quit program if none found.
devices = mvnc.EnumerateDevices()
if len( devices ) == 0:
print( 'No devices found' )
quit()
# Get a handle to the first enumerated device and open it
device = mvnc.Device( devices[0] )
device.OpenDevice()
return device
# ---- Step 2: Load a graph file onto the NCS device -------------------------
def load_graph( device, graph_file):
# Read the graph file into a buffer
with open( graph_file, mode='rb' ) as f:
blob = f.read()
# Load the graph buffer into the NCS
graph = device.AllocateGraph( blob )
return graph
# ---- Step 5: Unload the graph and close the device -------------------------
def close_ncs_device( device, graph ):
graph.DeallocateGraph()
device.CloseDevice()
##################### End of ncappzoo code ################################
class MovidiusImage(object):
"""Image metadata and loader for Movidius NCS
Args:
name (str): image reference name as used in CSV files
path (str): image path
class_index (int): 1-based class label index
Attributes:
top_k (list): list of predicted (class_index, proba)
inference_time (float): computation time in ms
"""
def __init__(self, name, path, class_index = None):
self.name = name
self.path = path
self.class_index = class_index
self.top_k = None
self.inference_time = None
def load_BGR(self, dim, dtype=np.float16):
"""Return image data in BGR order
Args:
dim (tuple): image dimensions
dtype (numpy.dtype): new type for the BGR blob
Returns:
numpy.ndarray: the transformed BGR blob
"""
mean = 128
std = 1/128
img = cv2.imread(self.path).astype(np.float32)
dx,dy,dz= img.shape
delta=float(abs(dy-dx))
if dx > dy: #crop the x dimension
img=img[int(0.5*delta):dx-int(0.5*delta),0:dy]
else:
img=img[0:dx,int(0.5*delta):dy-int(0.5*delta)]
img = cv2.resize(img, (dim, dim))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
for i in range(3):
img[:,:,i] = (img[:,:,i] - mean) * std
img = img.astype(dtype)
return img
def save_top_k(self, predictions, labels, k=5):
"""Save the top_k predicted probabilities
Args:
predictions (numpy.ndarray): the probabilities for each class
k (int): Number of top_k probas
"""
order_k = predictions.argsort()[::-1][:k]
# class_index is 1-based
self.top_k = [(labels[pos], np.float(predictions[pos]))
for pos in order_k]
def result_string(self):
""" Return image results with the following fields:
[name, top1, proba1, ... top5, proba5, time]
Returns:
str: formatted CSV string
"""
res = [ self.name, ]
for k, prob in self.top_k:
res += [k, prob]
res += [self.inference_time]
pattern = "%s," + "%d,%.9f," * len(self.top_k) + "%.9f"
return pattern % tuple(res)
def init_images(data_dir, images_file):
"""Parse image_file CSV and create one MovidiusImage per row.
Args:
data_dir (str): path of the folder containing images
image_file (str): CSV file (one image path per row)
Returns:
list: list of MovidiusImage instances
"""
images_dir = {}
images = []
for file in sorted(os.listdir(data_dir)):
if file.endswith(".jpg"):
image = MovidiusImage(file, os.path.realpath(data_dir) + "/" + "/" + file, -1)
images_dir[file] = image
images.append(image)
if os.path.isfile(images_file):
images = []
with open(images_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
# skip header
next(reader)
for row_pos, row in enumerate(reader):
name = row[0]
truth = int(row[1])
img = images_dir[name]
img.class_index = truth
images.append(img)
return images
def write_inferences_csv(output_path, images):
""" For each image, retrieve and write results.
Args:
output_path (str): path for the CSV output
images (list): list of processed MovidiusImage instances
"""
with open(output_path, 'w') as output_file:
for image in images:
output_file.write(image.result_string() + '\n')
def score_inferences(images, min_proba = 1e-15, mult = 100, n_classes=200,
log_loss_max=15.0, time_limit=1000.0):
""" Compute the logLoss and reference computation time
Args:
images (list): list of processed MovidiusImage instances
min_proba (float): minimum probability to be used in logLoss
mult (int): number of images used for the reference time
n_classes (int): total number of classes
log_loss_limit (float): minimum log_loss requirement
time_limit (float): maximum time per image (in ms)
Returns:
tuple: LogLoss and reference_time float values
"""
min_proba = np.float(min_proba)
max_proba = 1.0 - min_proba
n_images = len(images)
probas = np.zeros(n_images, dtype=np.float)
image_time = 0.0
top_1_accuracy = 0.0
top_k_accuracy = 0.0
for i, image in enumerate(images):
class_probas = dict(image.top_k)
if image.class_index == image.top_k[0][0]:
top_1_accuracy += 1.0
if image.class_index in class_probas:
top_k_accuracy += 1.0
probas[i] = class_probas[image.class_index]
if probas[i] > 0:
sum_probas = sum(class_probas.values())
probas[i] /= sum_probas
probas[i] = max(min_proba, min(max_proba, probas[i]))
image_time += image.inference_time
log_loss = np.mean(-np.log(probas))
top_1_accuracy /= n_images
top_k_accuracy /= n_images
image_time /= n_images
t = mult * image_time
print("top_1_accuracy = %.9f" % top_1_accuracy)
print("top_k_accuracy = %.9f" % top_k_accuracy )
print("log_loss = %.9f" % log_loss)
print("image_time = %.9f" % image_time)
if image_time > time_limit or log_loss > log_loss_max:
score = 0.0
else:
t_max = mult * time_limit
score = 1e6 * (1.0 - log_loss * np.log(t) / (log_loss_max * np.log(t_max)))
print("score = %.2f" % score)
return score
def main(args):
parser = argparse.ArgumentParser(description='TopCoder Movidius MM')
parser.add_argument(
"-images-dir",
dest="images_dir",
help="""Folder containing images to classify"""
)
parser.add_argument(
"-output-file",
dest="output_file",
default="",
help="""Output CSV file to save inference results"""
)
parser.add_argument(
"-graph-file",
dest="graph_file",
default="",
help="""Movidius graph file path"""
)
parser.add_argument(
"-labels-map-file",
dest="labels_map_file",
default="",
help="""Labels map file"""
)
parser.add_argument(
"-images-file",
dest="images_file",
default="",
help="""CSV file containing list of images filenames to classify in images-dir folder, only filenames listed here will be processed"""
)
args = parser.parse_args()
if not os.path.isdir(args.images_dir):
print("data is not a directory: %s" % args.images_dir)
print("Please use the right path as argument, and/or change the Makefile MOVIDIUSDIR variable")
return 0
print("IMAGE_DIM", IMAGE_DIM)
# start NCS
device = open_ncs_device()
graph = load_graph(device, args.graph_file)
# prepare images
images = init_images(args.images_dir, args.images_file)
n_images = len(images)
info_frequency = 100
print("n_images = %d" % n_images)
# load labels map file
labelsLines = [line.rstrip('\n') for line in open(args.labels_map_file)]
labels = {}
for label in labelsLines:
split = label.split(":")
labels[int(split[0])] = int(split[1])
# process images
for i, image in enumerate(images):
if (i+1) % info_frequency == 0:
print("progess %d/%d ..." % (i+1, n_images), flush=True)
bgr_blob = image.load_BGR(IMAGE_DIM)
graph.LoadTensor(bgr_blob, 'user object')
output, userobj = graph.GetResult()
#print(output)
image.inference_time = np.sum(
graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN ) )
image.save_top_k(output, labels, 5)
# stop NCS
close_ncs_device(device, graph)
# process results
write_inferences_csv(args.output_file, images)
if os.path.isfile(args.images_file):
score_inferences(images)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
# -*- coding: utf-8 -*-
# Copyright 2020 Tomaz Muraus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from radio_bridge.dtmf import FFTDTMFDecoderImplementation
__all__ = ["TestFFTDTMFDecoder"]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.abspath(os.path.join(BASE_DIR, "../fixtures/dtmf"))
class TestFFTDTMFDecoder(unittest.TestCase):
def test_decode_anytone_578_dtmf_data(self):
values = [
("1.wav", "1"),
("2.wav", "2"),
("3.wav", "3"),
("4.wav", "4"),
("5.wav", "5"),
("6.wav", "6"),
("7.wav", "7"),
("8.wav", "8"),
("9.wav", "9"),
("*.wav", "*"),
("0.wav", "0"),
("#.wav", "#"),
]
for file_path, expected_code in values:
file_path = os.path.join(FIXTURES_DIR, "anytone_578/", file_path)
decoder = FFTDTMFDecoderImplementation(file_path=file_path)
self.assertEqual(decoder.decode(), expected_code)
def test_decode_audio_check_tone_generator_data(self):
values = [
("audiocheck.net_dtmf_1.wav", "1"),
("audiocheck.net_dtmf_2.wav", "2"),
("audiocheck.net_dtmf_3.wav", "3"),
("audiocheck.net_dtmf_4.wav", "4"),
("audiocheck.net_dtmf_5.wav", "5"),
("audiocheck.net_dtmf_6.wav", "6"),
("audiocheck.net_dtmf_7.wav", "7"),
("audiocheck.net_dtmf_8.wav", "8"),
("audiocheck.net_dtmf_9.wav", "9"),
("audiocheck.net_dtmf_*.wav", "*"),
("audiocheck.net_dtmf_0.wav", "0"),
("audiocheck.net_dtmf_#.wav", "#"),
]
for file_path, expected_code in values:
file_path = os.path.join(FIXTURES_DIR, "audiochecknet/", file_path)
decoder = FFTDTMFDecoderImplementation(file_path=file_path)
self.assertEqual(decoder.decode(), expected_code)
|
"""
Django settings for sharinator project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ofj2gu)@$2xahppvk%25217+y!-1d4#@1-*#)c6zssk%&s87ai'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'sharinator.administration',
'sharinator.dashboard',
'sharinator.equipment',
'sharinator.peers',
'sharinator.shares',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'sharinator.administration.middleware.ForceLogoutMiddleware',
]
ROOT_URLCONF = 'sharinator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sharinator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
MESSAGE_TAGS = {
messages.DEBUG: 'alert alert-dark',
messages.INFO: 'alert alert-info',
messages.SUCCESS: 'alert alert-success',
messages.WARNING: 'alert alert-warning',
messages.ERROR: 'alert alert-danger',
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static.dist')
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "sharinator", "static"),
]
LOGIN_URL = "/admin/dbadmin/login"
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage',
'--cover-package=sharinator',
'--logging-level=WARN'
]
|
import numpy as np
from qmt.geometry import PropertyMap, MaterialPropertyMap
from qmt.materials import Materials
class DummyPartMap:
def __init__(self, part_ids):
assert len(part_ids) == 2
self.partIds = part_ids
def __call__(self, x):
assert np.ndim(x) >= 1
x = np.asanyarray(x)
if np.ndim(x) == 1:
return self.partIds[x[0] > 0]
else:
return np.where(x[..., 0] > 0, self.partIds[1], self.partIds[0])
def test_property_map():
int_map = DummyPartMap([0, 1])
str_map = DummyPartMap(['part1', 'part2'])
prop_map1 = PropertyMap(int_map, np.vectorize(lambda p: 'yes' if p > 0 else 'no'))
assert prop_map1.get_part((1., 2.)) == 1
assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)
assert prop_map1((1., 2.)) == 'yes'
assert np.all(prop_map1(-np.ones((2, 3))) == 'no')
props = {'part1': 'yes', 'part2': 'no'}
prop_map2 = PropertyMap(str_map, np.vectorize(lambda p: props[p]))
assert prop_map2.get_part((1., 2.)) == 'part2'
assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')
assert prop_map1((1., 2.)) == 'yes'
assert np.all(prop_map1(-np.ones((2, 3))) == 'no')
def test_materials_property_map():
int_map = DummyPartMap([0, 1])
str_map = DummyPartMap(['part1', 'part2'])
part_materials1 = {0: 'InAs', 1: 'GaSb'}
part_materials2 = {'part1': 'InAs', 'part2': 'Al'}
mat_lib = Materials(matDict={})
mat_lib.add_material('InAs', 'semi', electronMass=0.026, directBandGap=417.,
valenceBandOffset=-590.)
mat_lib.add_material('GaSb', 'semi', electronMass=.039, directBandGap=812.,
valenceBandOffset=-30.)
mat_lib.add_material('Al', 'metal', workFunction=4280.)
prop_map1 = MaterialPropertyMap(int_map, part_materials1, mat_lib, 'electronMass')
assert prop_map1.get_part((1., 2.)) == 1
assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)
assert prop_map1((1., 2.)) == mat_lib['GaSb']['electronMass']
assert np.all(prop_map1(-np.ones((2, 3))) == mat_lib['InAs']['electronMass'])
prop_map2 = MaterialPropertyMap(str_map, part_materials2, mat_lib, 'directBandGap', eunit='eV',
fill_value=0.)
assert prop_map2.get_part((1., 2.)) == 'part2'
assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')
assert prop_map2((1., 2.)) == 0.
assert np.all(prop_map2(-np.ones((2, 3))) == mat_lib.find('InAs', 'eV')['directBandGap'])
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
indx = i[:-1] + (indx,)
ref[indx] = updates[i]
_TF_OPS_TO_NUMPY = {
state_ops.batch_scatter_update: _NumpyUpdate,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False,
method=False):
np.random.seed(8)
with self.cached_session(use_gpu=False):
for indices_shape in (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
sparse_dim = len(indices_shape) - 1
indices = np.random.randint(
indices_shape[sparse_dim], size=indices_shape, dtype=itype)
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
ref.initializer.run()
if method:
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
vtypes = [np.float32, np.float64]
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(
state_ops.batch_scatter_update, vtype, itype)
@test_util.run_deprecated_v1
def testBooleanScatterUpdate(self):
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.batch_scatter_update(var, [1], [True])
update1 = state_ops.batch_scatter_update(
var, constant_op.constant(
[0], dtype=dtypes.int64), [False])
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_deprecated_v1
def testScatterOutOfRange(self):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
state_ops.batch_scatter_update(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = \[-1\] does not index into shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = \[6\] does not index into '
r'shape \[6\]'):
state_ops.batch_scatter_update(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
from oslo_log import log as logging
from nova.i18n import _LI
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class RetryFilter(filters.BaseHostFilter):
"""Filter out nodes that have already been attempted for scheduling
purposes
"""
# NOTE(danms): This does not affect _where_ an instance lands, so not
# related to rebuild.
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
"""Skip nodes that have already been attempted."""
retry = spec_obj.retry
if not retry:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled")
return True
# TODO(sbauza): Once the HostState is actually a ComputeNode, we could
# easily get this one...
host = [host_state.host, host_state.nodename]
# TODO(sbauza)... and we wouldn't need to primitive the hosts into
# lists
hosts = [[cn.host, cn.hypervisor_hostname] for cn in retry.hosts]
passes = host not in hosts
if not passes:
LOG.info(_LI("Host %(host)s fails. Previously tried hosts: "
"%(hosts)s"), {'host': host, 'hosts': hosts})
msg = ('Previously tried: %(hosts)s' % {'hosts': hosts})
self.filter_reject(host_state, spec_obj, msg, append=True)
# Host passes if it's not in the list of previously attempted hosts:
return passes
|
# Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import os
from hamcrest import assert_that, empty, equal_to, contains_exactly
from ycmd.user_options_store import DefaultOptions
from ycmd.completers.all import identifier_completer as ic
from ycmd.completers.all.identifier_completer import IdentifierCompleter
from ycmd.request_wrap import RequestWrap
from ycmd.tests import PathToTestFile
from ycmd.tests.test_utils import BuildRequest
def BuildRequestWrap( contents, column_num, line_num = 1 ):
return RequestWrap( BuildRequest( column_num = column_num,
line_num = line_num,
contents = contents ) )
def GetCursorIdentifier_StartOfLine_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
1 ) ) ) )
assert_that( 'fooBar', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'fooBar',
1 ) ) ) )
def GetCursorIdentifier_EndOfLine_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
3 ) ) ) )
def GetCursorIdentifier_PastEndOfLine_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
11 ) ) ) )
def GetCursorIdentifier_NegativeColumn_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo',
-10 ) ) ) )
def GetCursorIdentifier_StartOfLine_StopsAtNonIdentifierChar_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo(goo)',
1 ) ) ) )
def GetCursorIdentifier_AtNonIdentifier_test():
assert_that( 'goo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo(goo)',
4 ) ) ) )
def GetCursorIdentifier_WalksForwardForIdentifier_test():
assert_that( 'foo', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( ' foo',
1 ) ) ) )
def GetCursorIdentifier_FindsNothingForward_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foo ()***()',
5 ) ) ) )
def GetCursorIdentifier_SingleCharIdentifier_test():
assert_that( 'f', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( ' f ',
1 ) ) ) )
def GetCursorIdentifier_StartsInMiddleOfIdentifier_test():
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( 'foobar',
4 ) ) ) )
def GetCursorIdentifier_LineEmpty_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '',
12 ) ) ) )
def GetCursorIdentifier_IgnoreIdentifierFromCommentsAndStrings_test():
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '"foobar"',
4 ) ) ) )
assert_that( '', equal_to(
ic._GetCursorIdentifier( False,
BuildRequestWrap( '/*\n' ' * foobar\n' ' */',
5,
2 ) ) ) )
def GetCursorIdentifier_CollectIdentifierFromCommentsAndStrings_test():
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( True,
BuildRequestWrap( '"foobar"',
4 ) ) ) )
assert_that( 'foobar', equal_to(
ic._GetCursorIdentifier( True,
BuildRequestWrap( '/*\n' ' * foobar\n' ' */',
5,
2 ) ) ) )
def PreviousIdentifier_Simple_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo',
4 ) ) ) )
def PreviousIdentifier_WholeIdentShouldBeBeforeColumn_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foobar',
column_num = 4 ) ) ) )
def PreviousIdentifier_DoNotWrap_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foobar\n bar',
column_num = 4 ) ) ) )
def PreviousIdentifier_IgnoreForwardIdents_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo bar zoo',
4 ) ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 4,
False,
BuildRequestWrap( 'foo',
4 ) ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_DontContinueLooking_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 4,
False,
BuildRequestWrap( 'abcde foo',
10 ) ) ) )
def PreviousIdentifier_WhitespaceAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ',
6 ) ) ) )
def PreviousIdentifier_JunkAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ;;()** ',
13 ) ) ) )
def PreviousIdentifier_IdentInMiddleOfJunk_test():
assert_that( 'aa', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo ;;(aa)** ',
13 ) ) ) )
def PreviousIdentifier_IdentOnPreviousLine_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo\n ',
column_num = 3,
line_num = 2 ) ) ) )
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo\n',
column_num = 1,
line_num = 2 ) ) ) )
def PreviousIdentifier_IdentOnPreviousLine_JunkAfterIdent_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( 'foo **;()\n ',
column_num = 3,
line_num = 2 ) ) ) )
def PreviousIdentifier_NoGoodIdentFound_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 5,
False,
BuildRequestWrap( 'foo\n ',
column_num = 2,
line_num = 2 ) ) ) )
def PreviousIdentifier_IgnoreIdentifierFromCommentsAndStrings_test():
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( '"foo"\n',
column_num = 1,
line_num = 2 ) ) ) )
assert_that( '', equal_to(
ic._PreviousIdentifier( 2,
False,
BuildRequestWrap( '/*\n' ' * foo\n' ' */',
column_num = 2,
line_num = 3 ) ) ) )
def PreviousIdentifier_CollectIdentifierFromCommentsAndStrings_test():
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
True,
BuildRequestWrap( '"foo"\n',
column_num = 1,
line_num = 2 ) ) ) )
assert_that( 'foo', equal_to(
ic._PreviousIdentifier( 2,
True,
BuildRequestWrap( '/*\n' ' * foo\n' ' */',
column_num = 2,
line_num = 3 ) ) ) )
def FilterUnchangedTagFiles_NoFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
assert_that( list( ident_completer._FilterUnchangedTagFiles( [] ) ),
empty() )
def FilterUnchangedTagFiles_SkipBadFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
assert_that( list( ident_completer._FilterUnchangedTagFiles(
[ '/some/tags' ] ) ),
empty() )
def FilterUnchangedTagFiles_KeepGoodFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
tag_file = PathToTestFile( 'basic.tags' )
assert_that( ident_completer._FilterUnchangedTagFiles( [ tag_file ] ),
contains_exactly( tag_file ) )
def FilterUnchangedTagFiles_SkipUnchangesFiles_test():
ident_completer = IdentifierCompleter( DefaultOptions() )
# simulate an already open tags file that didn't change in the meantime.
tag_file = PathToTestFile( 'basic.tags' )
ident_completer._tags_file_last_mtime[ tag_file ] = os.path.getmtime(
tag_file )
assert_that( list( ident_completer._FilterUnchangedTagFiles( [ tag_file ] ) ),
empty() )
|
"""Edge weights.
"""
__author__ = "Rémi Barat"
__version__ = "1.0"
import math
import random
from crack.models.weights import condition_models, format_crit
#####################################################
### Format the models for init_EWeights functions ###
#####################################################
def _init_EWeights(init_fct):
"""Decorator that prepares the [models] to the [init_fct].
"""
def wrapper(models, records, crit=0, key_out="eweights", **kwargs):
condition_models(init_fct, models, records, crit, key_out, "eweights", **kwargs)
return wrapper
######################
### Initialization ###
######################
def init_EWeights_from_args(models, records, wgts, key_in=None, key_out="eweights"):
if isinstance(key_in, str):
key_in = [key_in]
nbr_n = len(wgts)
nbr_c = len(wgts[0])
models[key_out] = {
"entity" : "eweights",
"nbr_n" : nbr_n,
"nbr_c" : nbr_c,
"weights": wgts,
"totals" : [sum(w[c] for w in wgts) for c in range(nbr_c)],
"keys" : key_in,
}
@_init_EWeights
def init_EWeights_from_HWeights(models, records, key_out="eweights", key_graph="graph", key_hypergraph="hypergraph", key_hweights="hweights", f=None, f_args="sum_centers"):
# Arguments #
nbr_e = models[key_graph]["nbr_e"]
edges = models[key_graph]["edges"]
hwgts = models[key_hweights]["weights"]
hedges = models[key_hypergraph]["edges"]
if f is None:
def f(*hwgts): return sum(hwgt[0] for hwgt in hwgts)
#############
if f_args == "sum_centers":
wgts = [f(hwgts[i], hwgts[j]) for i, j in edges]
else:
crack_error(
ValueError, "init_EWeights_from_HWeights",
"Unknown 'f_args'. Possible values are: 'sum_centers'."
)
return wgts
@_init_EWeights
def init_EWeights_from_NWeights(
models, records,
key_out="eweights", key_in="graph",
key_nweights="nweights", nweights_crit=0,
f=None, f_args="all_ends",
):
"""Returns Weights based on the weights of the nodes for a
given criterion.
"""
nbr_e = models[key_in]["nbr_e"]
edges = models[key_in]["edges"]
nwgts = models[key_nweights]["weights"]
crit = format_crit(nweights_crit)
if f is None:
def f(*nwgts): return sum(nwgt[c] for c in crit for nwgt in nwgts)
if f_args == "all_ends":
wgts = [f(*[nwgts[i] for i in edges[e]]) for e in range(nbr_e)]
else:
crack_error(
ValueError, "init_EWeights_from_NWeights",
"Unknown 'f_args'. Possible values are: 'all_ends'."
)
return wgts
@_init_EWeights
def init_EWeights_random(models, records, key_in=None, nbr_e=None, inf=1, sup=100, **kwargs):
"""Generates (uniformly) random eweights.
"""
if nbr_e is None:
nbr_e = models[key_in]["nbr_e"]
return [random.randint(inf, sup) for e in range(nbr_e)]
@_init_EWeights
def init_EWeights_unit(models, records, key_in=None, nbr_e=None):
"""Give a unit weight to every element.
Options:
key_in: str: Key of the entity the weights will correspond to.
"""
if nbr_e is None:
nbr_e = models[key_in]["nbr_e"]
return [1] * nbr_e
@_init_EWeights
def init_EWeights_topologic_mountains(structs, inf=1, sup=100, npeaks=2):
"""Some Edges are picked randomly to serve as peaks. The more an
Edge is close to a peak, the higher is its weight.
"""
# TODO
pass
###############
### Coarsen ###
###############
def coarsen_EWeights(models, records, c_models, key_eweights, aggregation):
"""Add the coarsen edge weights to [c_models], under [key_weights].
"""
nbr_c = models[key_eweights]["nbr_c"]
ewgts = models[key_eweights]["weights"]
key_in = models[key_eweights]["keys"]
key_topo = key_in[0]
edges = models[key_topo]["edges"]
nbr_e_ = c_models[key_topo]["nbr_e"]
edges_ = c_models[key_topo]["edges"]
nodes_ = c_models[key_topo]["nodes"]
ewgts_ = [[0] * nbr_c for _ in range(nbr_e_)]
tots_ = [0] * nbr_c
for e, edge in enumerate(edges):
i = aggregation[edge[0]]
j = aggregation[edge[1]]
if i != j:
e_ = nodes_[i][1][
next(f for f, j_ in enumerate(nodes_[i][0]) if j_ == j)
]
for c in range(nbr_c):
ewgts_[e_][c] += ewgts[e][c]
tots_[c] += ewgts[e][c]
c_models[key_eweights] = {
"entity" : "eweights",
"nbr_n" : nbr_e_,
"nbr_c" : nbr_c,
"weights": ewgts_,
"totals" : models[key_eweights]["totals"],
"keys" : models[key_eweights]["keys"],
}
####################
### Function IDs ###
####################
INIT_EWGT_FCTS = {
"init_EWeights_from_HWeights" : init_EWeights_from_HWeights,
"init_EWeights_from_NWeights" : init_EWeights_from_NWeights,
"init_EWeights_topologic_mountains": init_EWeights_topologic_mountains,
"init_EWeights_random" : init_EWeights_random,
"init_EWeights_unit" : init_EWeights_unit,
}
|
# This file defines the back end of the Tetris game
#
# GameState is the base class of GameClient.
#
# GameClient.Run() will start two threads:
# - _ProcessActions: Process the action list every x seconds
# - _AutoDrop: Auto drops the current piece.
#
# GameClient:
# - current piece
# - held piece
# - piece list
# - color_map: game board
# - InputActions(...): Inputs a list of actions.
# - ProcessActions(...): Lets the game client process a list of actions
# directly
# - ProcessAction(...): Lets the game client process one actions directly
# - PutPiece(...): Puts the current piece if the position is valid.
# - GetState(...): Gets game state, useful to AI
# - CheckValidity(...): Checks if a move is valid
# - SpawnPiece(...): Sets the current piece.
# - Restart(...): Restarts the game.
# - Rotate(...): Alternatively, callers can directly call Rotate to rotate
# current_piece
# - Move(...): Alternatively, callers can directly call Move to move the
# current_piece
#
import copy
import queue
import threading
import time
from threading import Lock
from typing import Tuple, List
import numpy as np
import actions
import shape
# Some global settings
DEFAULT_LENGTH = 20
DEFAULT_WIDTH = 10
MAP_PADDING_SIZE = 4
# When there are less than threshold pieces, spawn a new bag.
REFILL_THRESHOLD = 5
# Disable the auto drop in next few seconds
MAXIMUM_LOCK_TIME = 4
INCREMENTAL_LOCK_TIME = 1
# Scores
SINGLE = 5
DOUBLE = 10
TSS = 20
TRIPLE = 40
QUAD = 50
TSD = 60
TST = 80
PC = 120
# ATTACKS
ATTACK_DOUBLE = 1
ATTACK_TSS = 2
ATTACK_TRIPLE = 2
ATTACK_QUAD = 4
ATTACK_TSD = 4
ATTACK_TST = 6
ATTACK_PC = 10
class InternalError(Exception):
"""Any internal errors."""
class GameState:
def __init__(self):
self.height = 0
self.width = 0
self.color_map = np.array([])
self.current_piece = None
self.held_piece = None
self.score = 0
self.piece_list = []
self.is_gameover = False
self.can_swap = True
self.accumulated_lines_eliminated = 0
self.piece_dropped = 0
self.blevel_increase = False
self.level = 0
self.line_sent = 0
self.line_received = 0
def __deepcopy__(self, memodict=None):
if memodict is None:
memodict = dict()
another = copy.copy(self)
another.color_map = self.color_map.copy()
if self.current_piece is not None:
another.current_piece = self.current_piece.copy()
if self.held_piece is not None:
another.held_piece = self.held_piece.copy()
another.piece_list = copy.deepcopy(self.piece_list.copy())
return another
def copy(self):
return self.__deepcopy__()
def __str__(self):
ret = ""
ret += f"""height: {self.height}
width: {self.width}
color_map: {self.color_map}
current_piece: {self.current_piece}
held_piece: {self.held_piece}
score: {self.score}
piece_list: {self.piece_list}
is_gameover: {self.is_gameover}
can_swap: {self.can_swap}
piece_dropped: {self.piece_dropped}
level: {self.level}
"""
class GameClient(GameState):
def __init__(self, height: int = DEFAULT_LENGTH, width: int = DEFAULT_WIDTH, map_height_padding=MAP_PADDING_SIZE,
map_side_padding=MAP_PADDING_SIZE):
super().__init__()
self.height = height
self.width = width
self.map_height_padding = map_height_padding
self.map_side_padding = map_side_padding
self.dtype = np.uint8
self.dtype_length = 8
if self.width + 2 * map_side_padding > 8:
self.dtype = np.uint16
self.dtype_length = 16
if self.width + 2 * map_side_padding > 16:
self.dtype = np.uint32
self.dtype_length = 32
if self.width + 2 * map_side_padding > 32:
self.dtype = np.uint64
self.dtype_length = 64
if self.width + 2 * map_side_padding > 64:
self.dtype = np.uint128
self.dtype_length = 128
if self.width + 2 * map_side_padding > 128:
raise InternalError(
"width too long to support bit map. Consider chaning it to a smaller value.")
# Lock time settings
# When the lock is enabled, count the lock time.
# When the accumulated lock time is greater than the current maximum lock time,
# force to perform the auto drop. Otherwise autodop is disabled for this turn.
# When current locktime is reached but an refresh lock time request is genertaed.
# increase the current maximum lock time by incremental lock time.
self.maximum_lock_time = MAXIMUM_LOCK_TIME
self.current_maximum_lock_time = 0
self.incremental_lock_time = INCREMENTAL_LOCK_TIME
self.accumulate_lock_time = 0
# Only when move or rotate at bottom locks the auto drop
self._enable_lock_time = False
# Color map marks the color for each cell.
self.color_map = np.array([[]], dtype=self.dtype)
# Bit map for a better performance in some calculation.
self.bit_map = np.array([], dtype=self.dtype)
# Lock for current_piece
self.mutex_current_piece = Lock()
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500 # 500 ms at level 0
self._current_spawn_interval = 500
# actions.Action
self.last_action = None
self.disable_autodrop = False
self.line_tobesent = 0
# Used when calculate the auto drop interval decrease based on current level.
# Generated from the sigmoid function
# x = np.linspace(0, 40, 40)
# interval_decrease = 110 / (1 + np.exp(0.16 * x))
# interval_decrease = np.cumsum(interval_decrease)
# print(repr(np.cumsum(interval_decrease)))
self.interval_decrease = np.array(
[55., 100.49727968, 150.55179446, 190.28030383,
230.85041422, 260.47244367, 290.38990828, 320.86947489,
345.19115272, 350.63934095, 380.49515164, 400.03022699,
410.5020957, 420.15098155, 430.19789113, 440.8437644,
450.26946046, 455.63636342, 461.08741849, 465.74844074,
469.72957119, 473.12678557, 476.02338748, 478.4914391,
480.59310001, 482.38185737, 483.90364044, 485.19781892,
486.29808909, 487.23325451, 488.02790975, 488.70303602,
489.27651798, 489.76359062, 490.17722443, 490.52845671,
490.82667585, 491.07986489, 491.2948099, 491.47727802])
self._RefillPieces()
self._TakePieceFromList()
self.accumulated_lines_eliminated = 0
# When soft-dropping, temporarily disable auto-drop
self.soft_drop = False
self.piece_dropped = 0
# Must be put after the initializations above
self._InitMap()
def _InitMap(self):
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
bottom_padding = (1 << (self.width + 2 * self.map_side_padding)) - 1
self.bit_map = np.concatenate((
np.array((self.map_height_padding + self.height) * [init_row], dtype=self.dtype),
np.array(self.map_height_padding * [bottom_padding], dtype=self.dtype)), dtype=self.dtype)
self.color_map = np.array([[0 for i in range(self.width)] for x in range(self.height + self.map_height_padding)],
dtype=self.dtype)
def Restart(self):
self._InitMap()
self.piece_list = []
self.held_piece = None
self.current_piece = None
# Lock of the game state
self.mutex_current_piece = Lock()
self.is_gameover = False
self.last_put_piece = None
# List of actions to process
self.action_list = queue.Queue()
self._init_spawn_interval = 500.0
self._current_spawn_interval = 500.0
# actions.Action
self.last_action = []
self.can_swap = True
self.score = 0
self.accumulate_lock_time = 0
self.accumulated_lines_eliminated = 0
self.soft_drop = False
self.piece_dropped = 0
self.line_sent = 0
self.line_received = 0
self.line_tobesent = 0
self._enable_lock_time = False
self._RefillPieces()
self._TakePieceFromList()
def Run(self):
auto_drop_th = threading.Thread(target=self.AutoDrop, name="auto_drop", daemon=True)
process_input_th = threading.Thread(target=self._ProcessActionsThread, daemon=True)
if not self.disable_autodrop:
auto_drop_th.start()
process_input_th.start()
if not self.disable_autodrop:
auto_drop_th.join()
process_input_th.join()
print("game ends")
def GetState(self) -> GameState:
"""Gets game state.
Returns the objects ref instead of copy For better performance.
"""
return copy.deepcopy(super())
def GetCell(self, i: int, j: int) -> int:
"""Gets cell at [i,j].
Notes: This function doesn't check the index out of boundary error.
"""
return self.color_map[i, j]
def GetMap(self):
"""Gets whole color_map."""
return self.color_map
def GetMapArea(self, corner: Tuple[int, int],
size: Tuple[int, int]) -> np.array:
"""Gets an area of
:param top_left:
:param bottom_right:
:return: The area of the color_map.
"""
size = (np.min([size[0], self.color_map.shape[0] - corner[0]]),
np.min([size[1], self.color_map.shape[1] - corner[1]]))
return self.color_map[corner[0]: corner[0] + size[0],
corner[1]: corner[1] + size[1]]
def SetMap(self, pos: Tuple[int, int], v: int, map: np.array = None):
"""Sets the cell at [i,j] to value v."""
(i, j) = pos
bit_map = self.bit_map.copy()
if map is None or map is self.color_map:
map = self.color_map
bit_map = self.bit_map
map[i, j] = v
# Set a bit to value: Clear to bit to 0 and then set to value
bit_v = 0 if v == 0 else 1
bit_j_pos = self.width + self.map_side_padding - 1 - j
bit_map[i] = (bit_map[i] & ~(1 << bit_j_pos)) | (bit_v << bit_j_pos)
def SetWholeMap(self, map: np.array):
if map.shape != self.color_map.shape:
raise InternalError(
f"Map shape {map.shape}"
f" must match the color_map shape: {self.color_map.shape}")
self.color_map = map
# Convert the map to Bollean map
bit_color_map = map != 0
# Revert the order and padding, then call the packbits(..., order="little") fn
bit_color_map = bit_color_map[:, ::-1]
bit_color_map = np.pad(
bit_color_map,
((0, 0), (self.map_side_padding, self.map_side_padding)),
"constant", constant_values=(1,))
padding0_len = self.dtype_length - bit_color_map.shape[1]
bit_color_map = np.pad(bit_color_map, ((0, 0), (0, padding0_len)),
"constant", constant_values=(0,))
int_color_map = np.packbits(bit_color_map, bitorder="little").view(self.dtype)
self.bit_map[0:self.map_height_padding + self.height] = int_color_map
print(int_color_map)
print(self.bit_map)
def copy(self):
another = copy.copy(self)
another.last_action = copy.copy(self.last_action)
if self.last_put_piece is not None:
another.last_put_piece = self.last_put_piece.copy()
another.color_map = np.copy(self.color_map)
another.bit_map = np.copy(self.bit_map)
another.action_list = copy.copy(self.action_list)
another.piece_list = self.piece_list.copy()
another.current_piece = self.current_piece.copy()
if self.held_piece is None:
another.held_piece = None
else:
another.held_piece = self.held_piece.copy()
return another
def AutoDrop(self):
while True:
if self.soft_drop:
# If it is soft dropping, we don't perform auto drop.
self.soft_drop = False
else:
if self.CheckValidity(self.current_piece, offset=(1, 0)):
self.Move(actions.Action(down=True, source_user_or_ai=False))
else:
if (not self._enable_lock_time or
self.accumulate_lock_time >= self.current_maximum_lock_time):
self.PutPiece()
else:
self.accumulate_lock_time += self._current_spawn_interval / 1000
time.sleep(self._current_spawn_interval / 1000)
def InputActions(self, acts: List[actions.Action]):
if self.is_gameover:
return
if len(acts) > 30:
print("len:", len(acts))
acts = acts[-30:]
for act in acts:
if self.action_list.qsize() > 50:
break
self.action_list.put(act)
def ProcessActions(self, actions: List[actions.Action], post_processing=True):
for a in actions:
self.ProcessAction(a, post_processing=post_processing)
def ProcessAction(self, action: actions.Action, post_processing=True):
if self.is_gameover:
return
# print(f"Processed action: {action.direction}, {action.rotation}, {action.swap}")
# self.test += 1
# print(self.test)
if action.swap:
self.Swap()
self.Rotate(action.rotation)
self.Move(action, post_processing=post_processing)
def _ProcessActionsThread(self):
while True:
while not self.action_list.empty():
act = self.action_list.get()
self.ProcessAction(act)
self.action_list.task_done()
time.sleep(0.001)
def SetLevel(self, level: int = 0):
"""Let the front end set!"""
self.level = level
i = min(len(self.interval_decrease), self.level)
self._current_spawn_interval = max(
10, self._init_spawn_interval - self.interval_decrease[i])
def IncreaseLevel(self, inc: int = 1):
"""Let the front end decide!"""
self.level += inc
self.SetLevel(self.level)
def Move(self, action: actions.Action, post_processing=True) -> bool:
"""Moves the current piece.
:param direction: Direction to move
:param post_processing: if True, put the piece to color_map and
apply line eliminate. Otherwise just update the current_piece's states.
:return True if moved; False otherwise
"""
if (action.direction == actions.NONE and
not action.down):
return False
moved = False
if action.down:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
self.soft_drop = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.LEFT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, -1)):
self.current_piece.y += -1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.RIGHT:
try:
self.mutex_current_piece.acquire()
if self.CheckValidity(self.current_piece, (0, 1)):
self.current_piece.y += 1
moved = True
finally:
self.mutex_current_piece.release()
if action.direction == actions.HARD_DROP or action.direction == actions.SOFT_DROP:
try:
self.mutex_current_piece.acquire()
while self.CheckValidity(self.current_piece, (1, 0)):
self.current_piece.x += 1
moved = True
finally:
self.mutex_current_piece.release()
if post_processing and action.direction == actions.HARD_DROP:
self.PutPiece()
if moved:
self.last_action = action
at_bottom = not self.CheckValidity(self.current_piece, (1, 0))
if (at_bottom and action.direction != actions.HARD_DROP and
action.source_user):
self._RefreshLockTime()
return moved
def _RefreshLockTime(self):
self._enable_lock_time = True
if self.accumulate_lock_time >= self.current_maximum_lock_time:
self.current_maximum_lock_time = min(
self.current_maximum_lock_time + self.incremental_lock_time,
self.maximum_lock_time)
def _ResetLockTime(self):
self._enable_lock_time = False
self.accumulate_lock_time = 0
self.current_maximum_lock_time = 0
def Swap(self):
"""Swaps the held piece and the current if its swappable"""
if not self.can_swap:
return
try:
self.mutex_current_piece.acquire()
t = self.held_piece
self.held_piece = self.current_piece
self.current_piece = t
if not self.current_piece:
self._TakePieceFromList()
self.current_piece.Init()
self.held_piece.Init()
self.can_swap = False
finally:
self.mutex_current_piece.release()
def CheckGameOver(self):
self.is_gameover = np.any(
self.GetMapArea((0, 0), (self.map_height_padding, self.width)) != 0)
return self.is_gameover
def _AnalyzeElimination(self, n_eliminate: int) -> int:
ret = 0
is_last_put_t = isinstance(self.last_put_piece, shape.T)
if n_eliminate == 1:
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSS")
ret += TSS
self.line_tobesent += ATTACK_TSS
else:
ret += SINGLE
if n_eliminate == 2:
# TSD
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TSD")
ret += TSD
self.line_tobesent += ATTACK_TSD
# Normal Double
else:
ret += DOUBLE
self.line_tobesent += ATTACK_DOUBLE
if n_eliminate == 3:
# TST
if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
print("TST")
ret += TST
self.line_tobesent += ATTACK_TST
else:
ret += TRIPLE
self.line_tobesent += ATTACK_TRIPLE
if n_eliminate == 4:
ret += QUAD
self.line_tobesent += ATTACK_QUAD
# Checks for PC
if np.all(self.color_map == 0):
print("PC")
ret += PC
self.line_tobesent += ATTACK_PC
return ret * (self.level + 3)
def _LineClear(self):
elimated_lines = []
elimated_cnt = 0
# Checks the 4 lines... This is not adapt to shape with higher than 4 lines
# but that's not a part of this game. I don't have plan to support custom
# shapes.
for row in range(4):
if not (self.last_put_piece.x + row >= 0 and
self.last_put_piece.x + row < self.height + self.map_height_padding):
continue
if np.all(self.color_map[self.last_put_piece.x + row, :] != 0):
elimated_lines.append(row + self.last_put_piece.x)
elimated_cnt += 1
self.color_map = np.vstack((np.zeros((elimated_cnt, self.width),
dtype=self.dtype),
np.delete(self.color_map, elimated_lines, axis=0)))
# Updates the bit_map
side_padding = (1 << self.map_side_padding) - 1
init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
self.bit_map = np.concatenate((elimated_cnt * [init_row],
np.delete(self.bit_map, elimated_lines))).astype(self.dtype)
self.accumulated_lines_eliminated += elimated_cnt
self.score += self._AnalyzeElimination(n_eliminate=elimated_cnt)
def _SendAttack(self):
"""Send attack to target."""
# This feature has not been implemented yet.
self.line_sent += self.line_tobesent
self.line_tobesent = 0
def PutPiece(self, piece: shape.Shape = None):
""" Puts a piece to color_map if it is a valid placement then execute the post processing.
:param piece: The piece to put, if None, put the self.current_piece
:param color_map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
if self._PrePutPiece(piece):
self._PostPutPiece(piece)
return True
else:
return False
def _PrePutPiece(self, piece: shape.Shape = None, map: np.array = None):
""" Puts a piece to color_map if it is a valid placement.
Post put processing such as self._LineClear will not be executed
:param piece: The piece to put, if None, put the self.current_piece
:param map: The color_map where the piece puts, if None, self.color_map will be used.
:returns: True if the piece has been put. False otherwise.
"""
try:
if not piece:
self.mutex_current_piece.acquire()
piece = self.current_piece
if map is None:
map = self.color_map
if not self.CheckValidity(piece):
return False
for (i, j) in piece.GetShape():
self.SetMap((piece.x + i, piece.y + j), piece.id, map)
return True
finally:
if self.mutex_current_piece.locked():
self.mutex_current_piece.release()
def _PostPutPiece(self, piece: shape.Shape = None):
if piece is not None:
self.last_put_piece = piece
else:
self.last_put_piece = self.current_piece
# LineClear should be called prior to SendAttack
self._LineClear()
if piece is None:
self._TakePieceFromList()
self.CheckGameOver()
self._ResetLockTime()
self._SendAttack()
self.can_swap = True
self.piece_dropped += 1
def TextDraw(self):
preview_map = self.color_map.copy()
self._PrePutPiece(self.current_piece, preview_map)
for i in preview_map:
print(i)
print()
def SpawnPiece(self, piece: shape.Shape = None) -> bool:
if not piece:
self._TakePieceFromList()
else:
self.current_piece = piece.copy()
return self.CheckValidity(self.current_piece)
def _FindFittedPiece(self, piece: shape.Shape = None, num_90rotations: int = 0):
"""Finds a location that fits this piece with n 90rotations.
Ref: https://tetris.fandom.com/wiki/SRS
:param piece: The piece to be put in the color_map. If none, it will be set to the current_piece
:param num_90rotations: How many 90 rotations
:return: piece - shape.Shape: the piece with rotations that fits the color_map.
"""
if not piece:
piece = self.current_piece
def _IsJLSTZ(piece: shape.Shape):
jlstz = [shape.J, shape.L, shape.S, shape.T, shape.Z]
for s in jlstz:
if isinstance(piece, s):
return True
return False
# The 180 rotation wall kick table is copied from
# https://tetris.fandom.com/wiki/SRS#180.C2.B0_rotation
# which is origined from
# https://github.com/JoshuaWebb/nullpomino/blob/master/src/mu/nu/nullpo/game/subsystem/wallkick/StandardWallkick.java
offset_map_jlstz = [
# state 0
([(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)], # 0>>1
# 0>>2, 180 rotation
# [(0,0), (1, 0), (2, 0), (1, 1), (2, 1), (-1, 0), (-2, 0), (-1, 1), (-2, 1), (0, -1), (3, 0), (-3, 0)],
[(0, 0)],
[(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)]), # 0>>3
# state 1
([(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)], # 1>>2
# l>>3, 180 rotation
# [(0,0), (0, 1), (0, 2), (-1, 1), (-1, 2), (0, -1), (0, -2), (-1, -1), (-1, -2), (1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)]), # 1>>0
# state 2
([(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)], # 2>>3
# [(0,0), (-1, 0), (-2, 0), (-1, -1), (-2, -1), (1, 0), (2, 0), (1, -1), (2, -1), (0, 1), (-3, 0), (3, 0)], # 2>>0,
[(0, 0)],
[(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)]), # 2>>1
# state 3
([(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)], # 3>>0
# 3>>1, 180 rotation
# [(0,0), (0, 1), (0, 2), (1, 1), (1, 2), (0, -1), (0, -2), (1, -1), (1, -2), (-1, 0), (0, 3), (0, -3)],
[(0, 0)],
[(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)]), # 3>>2
]
offset_map_i = [
# state 0
[[(0, 0), (0, -2), (0, 1), (1, -2), (-2, 1), ], # 0>>1
# [(0,0), (-1, 0), (-2, 0), (1, 0), (2, 0), (0, 1)], # 0>>2, 180 rotation
[(0, 0)],
[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)]], # 0>>3
# state 1
[[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)], # 1>>2
# [(0,0), (0, 1), (0, 2), (0, -1), (0, -2), (-1, 0)], # 1>>3, 180 rotation,
[(0, 0)],
[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)]], # 1>>0
# state 2
[[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)], # 2>>3
# [(0, 0), (1, 0), (2, 0), (-1, 0), (-2, 0), (0, -1)], # 2>>0, 180 rotation
[(0, 0)],
[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)]], # 2>>1
# state 3
[[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)], # 3>>0
# [(0, 0), (0, 1), (0, 2), (0, -1), (0, -2), (1, 0)], # 3>>1, 180 rotation
[(0, 0)],
[(0, 0), (0, -2), (0, 1), (1, -2), (2, 1)]], # 3>>2
]
state = piece.state
num_90rotations %= 4
offset_piece = piece.copy()
ori_x = offset_piece.x
ori_y = offset_piece.y
for _ in range(num_90rotations):
offset_piece.Rotate90()
if num_90rotations == 0:
if self.CheckValidity(offset_piece):
return offset_piece
num_90rotations -= 1
if _IsJLSTZ(piece):
for (offset_x, offset_y) in offset_map_jlstz[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
else:
for (offset_x, offset_y) in offset_map_i[state][num_90rotations]:
offset_piece.x = ori_x + offset_x
offset_piece.y = ori_y + offset_y
if (offset_piece.y >= self.width or
offset_piece.x >= self.height + self.map_height_padding):
continue
if self.CheckValidity(offset_piece):
return offset_piece
return None
def Rotate(self, n: int) -> bool:
"""Rotates the current piece.
:param n: rotations, in range [0,4)
:return: True if the current piece can be rotated. False otherwise.
"""
n %= 4
if n == 0:
return False
fitted_piece = self._FindFittedPiece(num_90rotations=n)
if fitted_piece:
self.current_piece = fitted_piece
self.last_action = actions.Action(dir=0, rotation=n)
if not self.CheckValidity(self.current_piece, (1, 0)):
self._RefreshLockTime()
return fitted_piece is not None
def CheckValidity(self, piece: shape.Shape, offset: Tuple[int, int] = (0, 0)):
"""Checks if the piece with offset can be put in the color_map
:param piece: The piece to be put.
:param offset: The inital offset to the piece
:return: True if the current state can fit into the color_map. False otherwise.
"""
(ox, oy, os) = (piece.x, piece.y, piece.state)
piece.x += offset[0]
piece.y += offset[1]
a = self.bit_map[piece.x: piece.x + 4]
b = self.width - piece.y
c = piece.GetBitMap().astype(self.dtype)
d = c << b
e = a & d
check_rst = e == 0
(piece.x, piece.y, piece.state) = (ox, oy, os)
return np.all(check_rst)
def _GetNextBag(self):
start_y = int((self.width - 3) / 2)
assert start_y >= 0
bag = [shape.I(start_y=start_y),
shape.J(start_y=start_y),
shape.L(start_y=start_y),
shape.O(start_y=start_y),
shape.S(start_y=start_y),
shape.T(start_y=start_y),
shape.Z(start_y=start_y)]
np.random.shuffle(bag)
return bag
def _RefillPieces(self):
"""
When there are less than REFILL_THRESHOLD pieces in the list,
refill it with a new bag.
"""
if len(self.piece_list) <= REFILL_THRESHOLD:
self.piece_list.extend(self._GetNextBag())
def _TakePieceFromList(self):
self._RefillPieces()
self.current_piece = self.piece_list[0].copy()
self.piece_list = self.piece_list[1:]
def CreateGameFromState(state: GameState) -> GameClient:
game = GameClient(height=state.height, width=state.width)
game.color_map = np.copy(state.color_map)
game.current_piece = state.current_piece.copy()
if state.held_piece is not None:
game.held_piece = state.held_piece.copy()
else:
game.held_piece = None
game.score = state.score
game.piece_list = state.piece_list.copy()
game.can_swap = state.can_swap
game.is_gameover = state.is_gameover
game.accumulated_lines_eliminated = state.accumulated_lines_eliminated
game.piece_dropped = state.piece_dropped
game.line_sent = state.line_sent
game.line_received = state.line_received
return game
|
#_*_ coding: utf-8 _*_
def needsclap(x):
return x==2 or x==3 or x==5 or x==7
for i in range(1,101):
one = needsclap(i%10)
ten = needsclap(i/10)
if one and ten: print i,"짝짝"
elif one or ten: print i,"짝"
else: print i
print [1,2,3] + ["ff"]
print [1,2] * 3
a = [1,2,3,4,5,6,7,8,9]
a[1] = 5
print a[-2]
print a[3:5]
print a[::-1]
birthdays = [
("한재규", 1981, 8, 26),
("박현규", 1981, 6,6),
("장기호", 1980, 9, 2)
]
print birthdays
for name, year, month, day in birthdays:
print name, year,"년", month, "월", day, "일생"
print "%s - %s월 %s일생" % (name,month,day)
print "==========================="
formatfun = "{0} - {1}월 {2}일생".format
macro = "{0} - {1}월 {2}일생"
for name, _, month, day in birthdays:
print formatfun(name, month, day)
print macro.format(name, month, day)
print "4",type(4)
print "hello?", type("hello")
print "[1,2,3]", type([1,2,3])
print "(1,2,3)", type((1,2,3))
print type(formatfun)
print type(macro)
print type(birthdays)
print type(needsclap)
print "hello".upper()
print "".join(["i"," ","am"])
print "i am".split(" ")
for i in "i am a boy".split(" "):
print i
print repr(1/5.0)
print str(1/5.0)
def names(birthdays):
result = []
for name,_,_,_ in birthdays:
result.append(name)
result.sort()
return result
print names(birthdays)
for name in names(birthdays):
print name
a,b,c = [1,2,3]
print a,b,c
|
"""
Will open a port in your router for Home Assistant and provide statistics.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/upnp/
"""
import asyncio
from ipaddress import ip_address
import aiohttp
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import dispatcher
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.components.discovery import DOMAIN as DISCOVERY_DOMAIN
from .const import (
CONF_ENABLE_PORT_MAPPING, CONF_ENABLE_SENSORS,
CONF_HASS, CONF_LOCAL_IP, CONF_PORTS,
CONF_UDN, CONF_SSDP_DESCRIPTION,
SIGNAL_REMOVE_SENSOR,
)
from .const import DOMAIN
from .const import LOGGER as _LOGGER
from .config_flow import ensure_domain_data
from .device import Device
REQUIREMENTS = ['async-upnp-client==0.12.4']
DEPENDENCIES = ['http']
NOTIFICATION_ID = 'upnp_notification'
NOTIFICATION_TITLE = 'UPnP/IGD Setup'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_ENABLE_PORT_MAPPING, default=False): cv.boolean,
vol.Optional(CONF_ENABLE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_LOCAL_IP): vol.All(ip_address, cv.string),
vol.Optional(CONF_PORTS):
vol.Schema({
vol.Any(CONF_HASS, cv.positive_int):
vol.Any(CONF_HASS, cv.positive_int)
})
}),
}, extra=vol.ALLOW_EXTRA)
def _substitute_hass_ports(ports, hass_port):
"""Substitute 'hass' for the hass_port."""
ports = ports.copy()
# substitute 'hass' for hass_port, both keys and values
if CONF_HASS in ports:
ports[hass_port] = ports[CONF_HASS]
del ports[CONF_HASS]
for port in ports:
if ports[port] == CONF_HASS:
ports[port] = hass_port
return ports
# config
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Register a port mapping for Home Assistant via UPnP."""
ensure_domain_data(hass)
# ensure sane config
if DOMAIN not in config:
return True
if DISCOVERY_DOMAIN not in config:
_LOGGER.warning('UPNP needs discovery, please enable it')
return False
# overridden local ip
upnp_config = config[DOMAIN]
if CONF_LOCAL_IP in upnp_config:
hass.data[DOMAIN]['local_ip'] = upnp_config[CONF_LOCAL_IP]
# determine ports
ports = {CONF_HASS: CONF_HASS} # default, port_mapping disabled by default
if CONF_PORTS in upnp_config:
# copy from config
ports = upnp_config[CONF_PORTS]
hass.data[DOMAIN]['auto_config'] = {
'active': True,
'enable_sensors': upnp_config[CONF_ENABLE_SENSORS],
'enable_port_mapping': upnp_config[CONF_ENABLE_PORT_MAPPING],
'ports': ports,
}
return True
# config flow
async def async_setup_entry(hass: HomeAssistantType,
config_entry: ConfigEntry):
"""Set up UPnP/IGD-device from a config entry."""
ensure_domain_data(hass)
data = config_entry.data
# build UPnP/IGD device
ssdp_description = data[CONF_SSDP_DESCRIPTION]
try:
device = await Device.async_create_device(hass, ssdp_description)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error('Unable to create upnp-device')
return False
hass.data[DOMAIN]['devices'][device.udn] = device
# port mapping
if data.get(CONF_ENABLE_PORT_MAPPING):
local_ip = hass.data[DOMAIN].get('local_ip')
ports = hass.data[DOMAIN]['auto_config']['ports']
_LOGGER.debug('Enabling port mappings: %s', ports)
hass_port = hass.http.server_port
ports = _substitute_hass_ports(ports, hass_port)
await device.async_add_port_mappings(ports, local_ip=local_ip)
# sensors
if data.get(CONF_ENABLE_SENSORS):
_LOGGER.debug('Enabling sensors')
# register sensor setup handlers
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
config_entry, 'sensor'))
async def unload_entry(event):
"""Unload entry on quit."""
await async_unload_entry(hass, config_entry)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, unload_entry)
return True
async def async_unload_entry(hass: HomeAssistantType,
config_entry: ConfigEntry):
"""Unload a config entry."""
data = config_entry.data
udn = data[CONF_UDN]
if udn not in hass.data[DOMAIN]['devices']:
return True
device = hass.data[DOMAIN]['devices'][udn]
# port mapping
if data.get(CONF_ENABLE_PORT_MAPPING):
_LOGGER.debug('Deleting port mappings')
await device.async_delete_port_mappings()
# sensors
if data.get(CONF_ENABLE_SENSORS):
_LOGGER.debug('Deleting sensors')
dispatcher.async_dispatcher_send(hass, SIGNAL_REMOVE_SENSOR, device)
# clear stored device
del hass.data[DOMAIN]['devices'][udn]
return True
|
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, NamedTuple, Optional
def run_diagnose_me(
bucket: str,
execution_mode: str,
project_id: str,
target_apis: str,
quota_check: Optional[List[Any]] = None,
) -> NamedTuple('Outputs', [('bucket', str), ('project_id', str)]):
""" Performs environment verification specific to this pipeline.
args:
bucket:
string name of the bucket to be checked. Must be of the format
gs://bucket_root/any/path/here/is/ignored where any path beyond root
is ignored.
execution_mode:
If set to HALT_ON_ERROR will case any error to raise an exception.
This is intended to stop the data processing of a pipeline. Can set
to False to only report Errors/Warnings.
project_id:
GCP project ID which is assumed to be the project under which
current pod is executing.
target_apis:
String consisting of a comma separated list of apis to be verified.
quota_check:
List of entries describing how much quota is required. Each entry
has three fields: region, metric and quota_needed. All
string-typed.
Raises:
RuntimeError: If configuration is not setup properly and
HALT_ON_ERROR flag is set.
"""
# Installing pip3 and kfp, since the base image 'google/cloud-sdk:279.0.0'
# does not come with pip3 pre-installed.
import subprocess
subprocess.run([
'curl', 'https://bootstrap.pypa.io/get-pip.py', '-o', 'get-pip.py'
],
capture_output=True)
subprocess.run(['apt-get', 'install', 'python3-distutils', '--yes'],
capture_output=True)
subprocess.run(['python3', 'get-pip.py'], capture_output=True)
subprocess.run(['python3', '-m', 'pip', 'install', 'kfp>=0.1.31', '--quiet'],
capture_output=True)
import sys
from kfp.cli.diagnose_me import gcp
config_error_observed = False
quota_list = gcp.get_gcp_configuration(
gcp.Commands.GET_QUOTAS, human_readable=False
)
if quota_list.has_error:
print('Failed to retrieve project quota with error %s\n' % (quota_list.stderr))
config_error_observed = True
else:
# Check quota.
quota_dict = {} # Mapping from region to dict[metric, available]
for region_quota in quota_list:
quota_dict[region_quota['name']] = {}
for quota in region_quota['quotas']:
quota_dict[region_quota['name']][quota['metric']
] = quota['limit'] - quota['usage']
quota_check = [] or quota_check
for single_check in quota_check:
if single_check['region'] not in quota_dict:
print(
'Regional quota for %s does not exist in current project.\n' %
(single_check['region'])
)
config_error_observed = True
else:
if quota_dict[single_check['region']][single_check['metric']
] < single_check['quota_needed']:
print(
'Insufficient quota observed for %s at %s: %s is needed but only %s is available.\n'
% (
single_check['metric'], single_check['region'],
str(single_check['quota_needed']
), str(quota_dict[single_check['region']][single_check['metric']])
)
)
config_error_observed = True
# Get the project ID
# from project configuration
project_config = gcp.get_gcp_configuration(
gcp.Commands.GET_GCLOUD_DEFAULT, human_readable=False
)
if not project_config.has_error:
auth_project_id = project_config.parsed_output['core']['project']
print(
'GCP credentials are configured with access to project: %s ...\n' %
(project_id)
)
print('Following account(s) are active under this pipeline:\n')
subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'])
print('\n')
else:
print(
'Project configuration is not accessible with error %s\n' %
(project_config.stderr),
file=sys.stderr
)
config_error_observed = True
if auth_project_id != project_id:
print(
'User provided project ID %s does not match the configuration %s\n' %
(project_id, auth_project_id),
file=sys.stderr
)
config_error_observed = True
# Get project buckets
get_project_bucket_results = gcp.get_gcp_configuration(
gcp.Commands.GET_STORAGE_BUCKETS, human_readable=False
)
if get_project_bucket_results.has_error:
print(
'could not retrieve project buckets with error: %s' %
(get_project_bucket_results.stderr),
file=sys.stderr
)
config_error_observed = True
# Get the root of the user provided bucket i.e. gs://root.
bucket_root = '/'.join(bucket.split('/')[0:3])
print(
'Checking to see if the provided GCS bucket\n %s\nis accessible ...\n' %
(bucket)
)
if bucket_root in get_project_bucket_results.json_output:
print(
'Provided bucket \n %s\nis accessible within the project\n %s\n' %
(bucket, project_id)
)
else:
print(
'Could not find the bucket %s in project %s' % (bucket, project_id) +
'Please verify that you have provided the correct GCS bucket name.\n' +
'Only the following buckets are visible in this project:\n%s' %
(get_project_bucket_results.parsed_output),
file=sys.stderr
)
config_error_observed = True
# Verify APIs that are required are enabled
api_config_results = gcp.get_gcp_configuration(gcp.Commands.GET_APIS)
api_status = {}
if api_config_results.has_error:
print(
'could not retrieve API status with error: %s' %
(api_config_results.stderr),
file=sys.stderr
)
config_error_observed = True
print('Checking APIs status ...')
for item in api_config_results.parsed_output:
api_status[item['config']['name']] = item['state']
# printing the results in stdout for logging purposes
print('%s %s' % (item['config']['name'], item['state']))
# Check if target apis are enabled
api_check_results = True
for api in target_apis.replace(' ', '').split(','):
if 'ENABLED' != api_status.get(api, 'DISABLED'):
api_check_results = False
print(
'API \"%s\" is not accessible or not enabled. To enable this api go to '
% (api) +
'https://console.cloud.google.com/apis/library/%s?project=%s' %
(api, project_id),
file=sys.stderr
)
config_error_observed = True
if 'HALT_ON_ERROR' in execution_mode and config_error_observed:
raise RuntimeError(
'There was an error in your environment configuration.\n' +
'Note that resolving such issues generally require a deep knowledge of Kubernetes.\n'
+ '\n' +
'We highly recommend that you recreate the cluster and check "Allow access ..." \n'
+
'checkbox during cluster creation to have the cluster configured automatically.\n'
+
'For more information on this and other troubleshooting instructions refer to\n'
+ 'our troubleshooting guide.\n' + '\n' +
'If you have intentionally modified the cluster configuration, you may\n'
+
'bypass this error by removing the execution_mode HALT_ON_ERROR flag.\n'
)
return (project_id, bucket)
if __name__ == '__main__':
import kfp.components as comp
comp.func_to_container_op(
run_diagnose_me,
base_image='google/cloud-sdk:279.0.0',
output_component_file='component.yaml',
)
|
import torch
import numpy as np
import itertools
from itertools import product
import math
import random
import unittest
import warnings
import operator
from functools import partial
from torch._six import inf, nan
from torch.testing._internal.common_utils import (
TestCase, iter_indices, TEST_WITH_ASAN, run_tests,
torch_to_numpy_dtype_dict, make_tensor, TEST_SCIPY, set_default_dtype)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,
dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyOnCPUAndCUDA,
skipCUDAIfRocm, skipIf)
from torch.testing import all_types_and_complex_and
if TEST_SCIPY:
import scipy.special
# TODO: remove this
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
# TODO: refactor this out
# Converts half/bfloat16 dtype to float when device is cpu
def _convert_t(dtype, device):
if device == 'cpu' and dtype in {torch.half, torch.bfloat16}:
return torch.float
return dtype
# TODO: revise the tests to use make_tensor in common_utils.py instead
# Returns a tensor of the requested shape, dtype, and device
# Requesting a half CPU tensor returns a float CPU tensor with
# values representable by a half.
# Initialization uses randint for non-float types and randn for float types.
def _make_tensor(shape, dtype, device, fill_ones=False) -> torch.Tensor:
# Returns a tensor filled with ones
if fill_ones:
return torch.ones(*shape, dtype=_convert_t(dtype, device), device=device)
# Returns a tensor with random integer values
if not (dtype.is_floating_point or dtype.is_complex):
t = torch.randint(0, 10, shape, device=device)
if dtype != torch.uint8:
t = t - 5 # generate negative values also
return t.to(_convert_t(dtype, device))
# Populates the CPU tensor with floats representable as half/bfloat16
if dtype == torch.half and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).half().float()
if dtype == torch.bfloat16 and device == 'cpu':
return torch.randn(*shape, dtype=torch.float, device=device).bfloat16().float()
# Default: returns a tensor with random float values
return torch.randn(shape, dtype=dtype, device=device).to(dtype=dtype)
# TODO: update to use opinfos consistently
class TestBinaryUfuncs(TestCase):
def test_add_broadcast_empty(self, device):
# empty + empty
self.assertRaises(RuntimeError, lambda: torch.randn(5, 0, device=device) + torch.randn(0, 5, device=device))
self.assertEqual(torch.randn(5, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, device=device))
self.assertEqual(torch.randn(5, 0, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, 1, device=device))
# scalar + empty
self.assertEqual(torch.randn(5, 0, 6, device=device), torch.randn((), device=device) + torch.randn(5, 0, 6, device=device))
# non-empty, empty
self.assertEqual(torch.randn(0, device=device), torch.randn(0, device=device) + torch.randn(1, device=device))
self.assertEqual(torch.randn(0, 7, 0, 6, 5, 0, 7, device=device),
torch.randn(0, 7, 0, 6, 5, 0, 1, device=device) + torch.randn(1, 1, 5, 1, 7, device=device))
self.assertRaises(RuntimeError, lambda: torch.randn(7, 0, device=device) + torch.randn(2, 1, device=device))
def test_addcmul_scalars_as_floats(self, device):
# zero-dim variables that don't require grad should bind to scalar arguments
x = torch.tensor(2.)
y = torch.tensor(3., device=device)
# 3 + (3 * 3) * 2
self.assertEqual(y.addcmul(y, y, value=x), 21)
x = torch.tensor(2., requires_grad=True)
self.assertRaises(Exception, lambda: y.addcmul(y, y, value=x))
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops(self, device):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
eq = x == y
for idx in iter_indices(x):
self.assertEqual(x[idx] == y[idx], eq[idx] == 1)
ne = x != y
for idx in iter_indices(x):
self.assertEqual(x[idx] != y[idx], ne[idx] == 1)
lt = x < y
for idx in iter_indices(x):
self.assertEqual(x[idx] < y[idx], lt[idx] == 1)
le = x <= y
for idx in iter_indices(x):
self.assertEqual(x[idx] <= y[idx], le[idx] == 1)
gt = x > y
for idx in iter_indices(x):
self.assertEqual(x[idx] > y[idx], gt[idx] == 1)
ge = x >= y
for idx in iter_indices(x):
self.assertEqual(x[idx] >= y[idx], ge[idx] == 1)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_must_take_bool_output(self, device):
for op in [torch.lt, torch.le, torch.gt, torch.ge, torch.eq, torch.ne,
torch.logical_and, torch.logical_or, torch.logical_xor]:
self.assertEqual(op(torch.tensor([True]), torch.tensor([False])).dtype, torch.bool)
# TODO: update to work on CUDA, too
@onlyCPU
def test_inplace_comparison_ops_require_inputs_have_same_dtype(self, device):
with self.assertRaisesRegex(RuntimeError, 'Expected object of scalar type'):
for op in ['lt_', 'le_', 'gt_', 'ge_', 'eq_', 'ne_', 'logical_xor_', 'logical_and_', 'logical_or_']:
x = torch.tensor([1], dtype=torch.int)
y = torch.tensor([2], dtype=torch.long)
in_place_method = getattr(x, op)
in_place_method(y)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_check_for_scalar_overflow(self, device):
s = 1 << 20
t = torch.tensor([1 << 5], dtype=torch.uint8)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t < s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s < t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t <= s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s <= t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t > s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s > t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t >= s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s >= t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t == s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s == t)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t != s)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(s != t)
# TODO: update to work on CUDA, too
@onlyCPU
def test_comparison_ops_check_for_zerodim_tensor_overflow(self, device):
t1 = torch.tensor([1 << 5], dtype=torch.uint8)
t2 = torch.tensor([1 << 30], dtype=torch.int32)
ts1 = torch.tensor(1 << 20, dtype=torch.int32)
ts2 = torch.tensor(1 << 40, dtype=torch.int64)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 < ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 < t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 <= ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 <= t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 > ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 > t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 >= ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 >= t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 == ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 == t2)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(t1 != ts1)
with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):
self.assertTrue(ts2 != t2)
# TODO: update to work on CUDA, too
@onlyCPU
def test_bitwise_ops(self, device):
x = torch.randn(5, 5).gt(0)
y = torch.randn(5, 5).gt(0)
and_result = x & y
for idx in iter_indices(x):
if and_result[idx]:
self.assertTrue(x[idx] and y[idx])
else:
self.assertFalse(x[idx] and y[idx])
or_result = x | y
for idx in iter_indices(x):
if or_result[idx]:
self.assertTrue(x[idx] or y[idx])
else:
self.assertFalse(x[idx] or y[idx])
xor_result = x ^ y
for idx in iter_indices(x):
if xor_result[idx]:
self.assertTrue(x[idx] ^ y[idx])
else:
self.assertFalse(x[idx] ^ y[idx])
x_clone = x.clone()
x_clone &= y
self.assertEqual(x_clone, and_result)
x_clone = x.clone()
x_clone |= y
self.assertEqual(x_clone, or_result)
x_clone = x.clone()
x_clone ^= y
self.assertEqual(x_clone, xor_result)
def test_inplace_division(self, device):
t = torch.rand(5, 5, device=device)
id_before = id(t)
t /= 2
id_after = id(t)
self.assertEqual(id_before, id_after)
@dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_complex=False))
def test_div_rounding_modes(self, device, dtype):
if dtype.is_floating_point:
low, high = -10.0, 10.0
else:
info = torch.iinfo(dtype)
low, high = info.min, info.max
a = make_tensor((100,), device, dtype, low=low, high=high)
b = make_tensor((100,), device, dtype, low=low, high=high)
# Avoid division by zero so we can test (a / b) * b == a
if dtype.is_floating_point:
eps = 0.1
b[(-eps < b) & (b < eps)] = eps
else:
b[b == 0] = 1
if not dtype.is_floating_point:
# floor(a / b) * b can be < a, so fixup slightly to avoid underflow
a = torch.where(a < 0, a + b, a)
d_true = torch.divide(a, b, rounding_mode=None)
self.assertTrue(d_true.is_floating_point())
self.assertEqual(d_true * b, a.to(d_true.dtype))
d_floor = torch.divide(a, b, rounding_mode='floor')
if dtype not in (torch.bfloat16, torch.half):
self.assertEqual(d_floor * b + torch.remainder(a, b), a)
else:
self.assertEqual(d_floor * b + torch.remainder(a.float(), b.float()), a,
exact_dtype=False)
d_trunc = torch.divide(a, b, rounding_mode='trunc')
rounding_unsupported = (
dtype == torch.half and device != 'cuda' or
dtype == torch.bfloat16 and device != 'cpu')
d_ref = d_true.float() if rounding_unsupported else d_true
self.assertEqual(d_trunc, d_ref.trunc().to(dtype))
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_div_rounding_nonfinite(self, device, dtype):
# Compare division of special floating point values against NumPy
num = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype)
# Divide by zero is tested seperately
denom = num[num != 0]
a, b = num[None, :].clone(), denom[:, None].clone()
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in ((None, np.true_divide), ("floor", np.floor_divide)):
with np.errstate(all='ignore'):
expect = np_ref(an, bn)
kwargs = dict(rounding_mode=mode) if mode is not None else {}
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(actual, torch.from_numpy(expect),
exact_device=False, exact_dtype=exact_dtype)
# Compare contiguous (likely vectorized) against non-contiguous (not vectorized)
a_noncontig = torch.empty([2 * i for i in a.shape], dtype=dtype, device=device)[::2, ::2]
a_noncontig[:] = a
b_noncontig = torch.empty([2 * i for i in b.shape], dtype=dtype, device=device)[::2, ::2]
b_noncontig[:] = b
for rounding_mode in (None, "trunc", "floor"):
expect = torch.divide(a_noncontig, b_noncontig, rounding_mode=rounding_mode)
actual = torch.divide(a, b, rounding_mode=rounding_mode)
self.assertEqual(actual, expect)
@dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)
def test_divide_by_zero_rounding(self, device, dtype):
a = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],
dtype=dtype)
exact_dtype = (dtype != torch.bfloat16)
if exact_dtype:
an = a.cpu().numpy()
else:
an = a.float().cpu().numpy()
zero = torch.zeros_like(a)
# NOTE: NumPy's floor_divide rounding changed in 1.20.0 to be consistent with divide
expect = np.divide(an, 0)
for rounding_mode in (None, 'floor'):
# CPU scalar
actual = torch.divide(a, 0, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
# Device tensor
actual = torch.divide(a, zero, rounding_mode=rounding_mode)
self.assertEqual(actual, expect, exact_dtype=exact_dtype)
@dtypes(*torch.testing.get_all_dtypes(
include_bool=False, include_complex=False, include_bfloat16=False))
def test_div_rounding_numpy(self, device, dtype):
info = (torch.finfo(dtype) if dtype.is_floating_point
else torch.iinfo(dtype))
low, high = info.min, info.max
# Compare division of random values against NumPy
a = make_tensor((4096,), device, dtype, low=low, high=high)
b = make_tensor((4096,), device, dtype, low=low, high=high)
# Avoid division by zero which raises for integers and, for floats,
# NumPy 1.20 changed floor_divide to follow IEEE rules for inf/nan
# after dividing by zero.
b[b == 0] = 1
# Compare bfloat16 against NumPy float
exact_dtype = dtype != torch.bfloat16
if exact_dtype:
an, bn = a.cpu().numpy(), b.cpu().numpy()
else:
an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()
for mode, np_ref in (
(None, np.true_divide),
("floor", np.floor_divide),
("trunc", lambda a, b: np.trunc(np.true_divide(a, b)).astype(a.dtype))
):
with np.errstate(all='ignore'):
expect = torch.from_numpy(np_ref(an, bn))
kwargs = dict(rounding_mode=mode) if mode is not None else {}
# Contiguous (likely vectorized)
with set_default_dtype(torch.double):
actual = torch.divide(a, b, **kwargs)
self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)
# Non-contiguous (not vectorized)
expect = expect[::2]
with set_default_dtype(torch.double):
actual = torch.divide(a[::2], b[::2], **kwargs)
self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)
# Tests that trying to add, inplace, a CUDA tensor to a CPU tensor
# throws the correct error message
@onlyCUDA
def test_cross_device_inplace_error_msg(self, device):
a = torch.tensor(2.)
b = torch.tensor(2., device=device)
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
a += b
# TODO: refactor this test into a more generic one, it's parked here currently
@onlyOnCPUAndCUDA
def test_out_resize_warning(self, device):
a = torch.tensor((1, 2, 3), device=device, dtype=torch.float32)
b = torch.tensor((4, 5, 6), device=device, dtype=torch.float32)
unary_inputs = (a,)
binary_inputs = (a, b)
unary_ops = (torch.ceil, torch.exp)
binary_ops = (torch.add, torch.sub)
for op in (unary_ops + binary_ops):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
inputs = unary_inputs if op in unary_ops else binary_inputs
# No warnings
op(*inputs, out=torch.empty(3, device=device))
op(*inputs, out=torch.empty(0, device=device))
self.assertEqual(len(w), 0)
# Cases that throw warnings
op(*inputs, out=torch.empty(2, device=device))
self.assertEqual(len(w), 1)
# Verifies that the inplace dunders (like idiv) actually are in place
@onlyOnCPUAndCUDA
def test_inplace_dunders(self, device):
t = torch.randn((1,), device=device)
expected = t.data_ptr()
t += 1
t -= 1
t *= 1
t /= 1
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
t //= 1
t %= 1
self.assertEqual(expected, t.data_ptr())
def check_internal_mem_overlap(self, inplace_op, num_inputs,
dtype, device,
expected_failure=False):
if isinstance(inplace_op, str):
inplace_op = getattr(torch.Tensor, inplace_op)
input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)
inputs = [input] + [torch.randn_like(input)
for i in range(num_inputs - 1)]
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'single memory location'):
inplace_op(*inputs)
def unary_check_input_output_mem_overlap(self, data, sz, op,
expected_failure=False):
def _test(op, output, input):
output_exp = torch.empty_like(output)
op(input, out=output_exp)
self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)
# output is identical to input:
_test(op, output=data[0:sz], input=data[0:sz])
# output and input are independent:
_test(op, output=data[0:sz], input=data[sz:2 * sz])
# output partially overlaps with input:
if not expected_failure:
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
else:
with self.assertRaises(AssertionError):
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
_test(op, data[0:sz], data[1:sz + 1])
def binary_check_input_output_mem_overlap(self, op, device,
expected_failure=False):
sz = 3
data = torch.randn(2 * sz, device=device)
other = torch.randn(sz, device=device)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out: op(other, input, out=out),
expected_failure=expected_failure)
self.unary_check_input_output_mem_overlap(
data, sz, lambda input, out: op(input, other, out=out),
expected_failure=expected_failure)
@dtypes(torch.double)
def test_binary_op_mem_overlap(self, device, dtype):
ops = [
("add", True, True, 'cpu'),
("add", True, True, 'cuda'),
("mul", True, True, 'cpu'),
("mul", True, True, 'cuda'),
("sub", True, True, 'cpu'),
("sub", True, True, 'cuda'),
("div", True, True, 'cpu'),
("div", True, True, 'cuda'),
("pow", True, True, 'cpu'),
("pow", True, True, 'cuda'),
("fmod", True, True, 'cpu'),
("fmod", True, True, 'cuda'),
("atan2", True, True, 'cpu'),
("atan2", True, True, 'cuda'),
("hypot", True, True, 'cpu'),
("hypot", True, True, 'cuda'),
("igamma", True, True, 'cpu'),
("igamma", True, True, 'cuda'),
("igammac", True, True, 'cpu'),
("igammac", True, True, 'cuda'),
("nextafter", True, True, 'cpu'),
("nextafter", True, True, 'cuda'),
("le", True, True, 'cpu'),
("le", True, True, 'cuda'),
("lt", True, True, 'cpu'),
("lt", True, True, 'cuda'),
("ge", True, True, 'cpu'),
("ge", True, True, 'cuda'),
("gt", True, True, 'cpu'),
("gt", True, True, 'cuda'),
("eq", True, True, 'cpu'),
("eq", True, True, 'cuda'),
("ne", True, True, 'cpu'),
("ne", True, True, 'cuda'),
("logical_and", True, True, 'cpu'),
("logical_and", True, True, 'cuda'),
("logical_or", True, True, 'cpu'),
("logical_or", True, True, 'cuda'),
("logical_xor", True, True, 'cpu'),
("logical_xor", True, True, 'cuda'),
]
for (fn, has_input_output_mem_overlap_check,
has_internal_mem_overlap_check, dev) in ops:
if dev != device:
continue
out_op = getattr(torch, fn)
inplace_op = getattr(torch.Tensor, fn + '_')
self.check_internal_mem_overlap(
inplace_op, 2, dtype, device,
expected_failure=not has_internal_mem_overlap_check)
self.binary_check_input_output_mem_overlap(out_op, device,
expected_failure=not has_input_output_mem_overlap_check)
def _do_pow_for_exponents(self, m1, exponents, pow_fn, atol):
for num in exponents:
if isinstance(num, int) and num < 0 and not m1.is_floating_point() and not m1.is_complex():
with self.assertRaisesRegex(RuntimeError,
r'Integers to negative integer powers are not allowed\.'):
torch.pow(m1[4], num)
else:
# base - tensor, exponent - number
# contiguous
res1 = torch.pow(m1[4], num)
res2 = res1.clone().zero_()
# `math.pow` has issues with complex exponentiation so we need to resort to normal `pow`.
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[4][i], num)
rtol = 0 if atol is not None else None
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# non-contiguous
res1 = torch.pow(m1[:, 4], num)
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow_fn(m1[i, 4], num)
self.assertEqual(res1, res2, atol=atol, rtol=rtol)
# scalar ** tensor to enforce correct handling of dtypes for __rpow__().
expected_dtype = torch.result_type(num, m1)
res1 = num ** m1[4]
res2 = torch.tensor(num, dtype=expected_dtype, device=m1.device) ** m1[4]
self.assertEqual(res1, res2)
self.assertEqual(res1.dtype, expected_dtype)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_pow(self, device, dtype):
m1 = torch.empty(0, dtype=dtype, device=device)
if m1.is_floating_point() or m1.is_complex():
m1 = make_tensor((100, 100), low=0, high=1, dtype=dtype, device=device) + 0.5
else:
# math.pow will overflow and throw exceptions for large integers
range_high = 4 if dtype in (torch.int8, torch.uint8) else 10
m1 = make_tensor((100, 100), low=1, high=range_high, dtype=dtype, device=device)
exponents = [-2.8, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 4, 3.3]
complex_exponents = [-2.5j, -1.0j, 0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]
if m1.is_complex():
self._do_pow_for_exponents(m1, exponents + complex_exponents, pow, 10e-4)
else:
self._do_pow_for_exponents(m1, exponents, math.pow, None)
self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)
# base - number, exponent - tensor
# contiguous
res1 = torch.pow(3, m1[4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[4, i])
self.assertEqual(res1, res2)
# non-contiguous
res1 = torch.pow(3, m1[:, 4])
res2 = res1.clone().zero_()
for i in range(res2.size(0)):
res2[i] = pow(3, m1[i][4])
self.assertEqual(res1, res2)
# TODO: refactor all these tests using opinfos properly
def _test_pow(self, base, exponent, np_exponent=None):
if np_exponent is None:
np_exponent = exponent
def to_np(value):
if isinstance(value, torch.Tensor):
return value.cpu().numpy()
return value
try:
np_res = np.power(to_np(base), to_np(np_exponent))
expected = torch.from_numpy(np_res) if isinstance(np_res, np.ndarray) else torch.tensor(np_res, dtype=base.dtype)
except ValueError as e:
err_msg = "Integers to negative integer powers are not allowed."
self.assertEqual(str(e), err_msg)
out = torch.empty_like(base)
test_cases = [
lambda: base.pow(exponent),
lambda: base.pow_(exponent),
lambda: torch.pow(base, exponent),
lambda: torch.pow(base, exponent, out=out)
]
for test_case in test_cases:
self.assertRaisesRegex(RuntimeError, err_msg, test_case)
else:
if isinstance(base, torch.Tensor):
actual = base.pow(exponent)
self.assertEqual(actual, expected.to(actual))
actual = base.clone()
# When base is a 0-dim cpu tensor and exp is a cuda tensor, we exp `pow` to work but `pow_` to fail, since
# `pow` will try to create the output tensor on a cuda device, but `pow_` needs to use the cpu tensor as the output
if (isinstance(exponent, torch.Tensor) and base.dim() == 0 and base.device.type == 'cpu' and
exponent.device.type == 'cuda'):
regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
elif torch.can_cast(torch.result_type(base, exponent), base.dtype):
actual2 = actual.pow_(exponent)
self.assertEqual(actual, expected)
self.assertEqual(actual2, expected)
else:
self.assertRaisesRegex(RuntimeError, "Found dtype \\w+ but expected \\w+", lambda: actual.pow_(exponent))
actual = torch.pow(base, exponent)
self.assertEqual(actual, expected.to(actual))
actual2 = torch.pow(base, exponent, out=actual)
self.assertEqual(actual, expected.to(actual))
self.assertEqual(actual2, expected.to(actual))
# Tests pow() for integral, floating-type tensors, with integral, floating-type
# exponents (tensor or scalar), respectively. noncontiguous tensors are also tested.
def test_int_and_float_pow(self, device):
def _test_int_and_float_pow(dt, low, high, dev):
test_cases = (
((4, 4), 0, (4, 1)),
((3, 1), 4, (3, 1)),
((2,), 4, (1,)),
((1,), 2, ()),
((513, 513), 4, (513,)),
((5, 5, 5), 5, (5,)),
((), 2, ()),
)
for base_shape, exp_scalar, exp_shape in test_cases:
base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high)
# int tensors don't take negative exponents
if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high)
else:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
# test non-contiguous tensors as well
base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high,
noncontiguous=True)
if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high,
noncontiguous=True)
else:
exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high,
noncontiguous=True)
self._test_pow(base_tensor, exp_scalar)
self._test_pow(base_tensor, exp_tensor)
_test_int_and_float_pow(torch.int8, -2, 2, device)
_test_int_and_float_pow(torch.uint8, 0, 3, device)
_test_int_and_float_pow(torch.int16, -5, 5, device)
_test_int_and_float_pow(torch.int64, -10, 10, device)
_test_int_and_float_pow(torch.int32, -10, 10, device)
_test_int_and_float_pow(torch.float16, 0., 5., device)
_test_int_and_float_pow(torch.float32, 0., 10., device)
_test_int_and_float_pow(torch.float64, 0., 10., device)
# pow's output would have some NaNs as well
_test_int_and_float_pow(torch.float32, -10., 10., device)
_test_int_and_float_pow(torch.float64, -10., 10., device)
# Tests that a Runtime error occurs when a base tensor cannot be resized
# by pow's inplace variant due to PyTorch's broadcasting semantics.
def test_pow_inplace_resizing_exception(self, device):
test_cases = (
((), (3,)),
((2,), (2, 1)),
((2, 1), (2, 2)),
((2, 2), (2, 1, 1)),
)
test_inputs = list((make_tensor(base_size, dtype=torch.float64, device=device,
high=10., low=0.),
make_tensor(exp_size, dtype=torch.float64, device=device,
high=10., low=0.))
for base_size, exp_size in test_cases)
for base, exponent in test_inputs:
regex = "doesn't match the broadcast shape"
self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)
def test_int_tensor_pow_neg_ints(self, device):
ints = [torch.iinfo(torch.int32).min,
-3, -2, -1, 0, 1, 2, 3,
torch.iinfo(torch.int32).max]
neg_ints = [torch.iinfo(torch.int32).min, -3, -2, -1]
tensor = torch.tensor(ints, dtype=torch.int32, device=device)
for pow in neg_ints:
self._test_pow(tensor, pow)
def test_long_tensor_pow_floats(self, device):
ints = [0, 1, 23, 4567]
floats = [0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
tensor = torch.tensor(ints, dtype=torch.int64, device=device)
for pow in floats:
self._test_pow(tensor, pow)
@dtypes(*[torch.float32, torch.float64])
def test_float_scalar_pow_float_tensor(self, device, dtype):
floats = [2.0, -3 / 2, -1.0, -1 / 2, -1 / 3, 0.0,
1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]
exponent_shapes = (
(1,),
(2, 2),
(2, 1),
(2, 2, 2),
)
tensors = list(make_tensor(shape, dtype=dtype, device=device, low=0)
for shape in exponent_shapes)
floats_tensor = torch.tensor(floats, dtype=dtype, device=device)
for base in floats:
self._test_pow(base, floats_tensor)
for tensor in tensors:
self._test_pow(base, tensor)
@onlyCUDA
def test_cuda_tensor_pow_scalar_tensor(self, device):
cuda_tensors = [torch.randn((3, 3), device=device), torch.tensor(3.0, device=device)]
scalar_tensors = [torch.tensor(5.0, device='cpu'), torch.tensor(-3), torch.tensor(1)]
for base, exp in product(cuda_tensors, scalar_tensors):
self._test_pow(base, exp)
@onlyCUDA
def test_cpu_tensor_pow_cuda_scalar_tensor(self, device):
cuda_tensors = [torch.tensor(5.0, device='cuda'), torch.tensor(-3, device='cuda')]
for exp in cuda_tensors:
base = torch.randn((3, 3), device='cpu')
regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'
self.assertRaisesRegex(RuntimeError, regex, torch.pow, base, exp)
for exp in cuda_tensors:
# Binary ops with a cpu + cuda tensor are allowed if the cpu tensor has 0 dimension
base = torch.tensor(3.0, device='cpu')
self._test_pow(base, exp)
@onlyCUDA
@dtypes(torch.complex64, torch.complex128)
def test_pow_cuda_complex_extremal_failing(self, device, dtype):
t = torch.tensor(complex(-1., float('inf')), dtype=dtype, device=device)
with self.assertRaises(AssertionError):
cuda_out = t.pow(2)
cpu_out = t.cpu().pow(2)
self.assertEqual(cpu_out, cuda_out)
@onlyOnCPUAndCUDA
@dtypes(*(torch.testing.get_all_dtypes(include_bool=False, include_bfloat16=False)))
def test_complex_scalar_pow_tensor(self, device, dtype):
complexes = [0.5j, 1. + 1.j, -1.5j, 2.2 - 1.6j, 1 + 0j]
first_exp = make_tensor((100,), device, dtype, low=-2, high=2)
second_exp = make_tensor((100,), device, dtype, low=-2, high=2, noncontiguous=True)
first_exp[0] = first_exp[10] = first_exp[20] = 0
second_exp[0] = second_exp[10] = second_exp[20] = 0
for base in complexes:
self._test_pow(base, first_exp)
self._test_pow(base, second_exp)
@onlyOnCPUAndCUDA
def test_pow_scalar_type_promotion(self, device):
# Test against a scalar and non-scalar input
inputs = [17, [17]]
for input in inputs:
# We expect the computation to be performed in uint8 (overflowing to 0), and then cast to int64
input_tensor_uint8 = torch.tensor(input, dtype=torch.uint8, device=device)
out_uint8_computation = torch.pow(2, input_tensor_uint8, out=torch.tensor(0, dtype=torch.int64, device=device))
# Computation should run in int64, and not overflow
input_tensor_int64 = torch.tensor(input, dtype=torch.int64, device=device)
out_int64_computation = torch.pow(2, input_tensor_int64, out=torch.tensor(0, dtype=torch.int64, device=device))
self.assertNotEqual(out_uint8_computation, out_int64_computation)
self.assertEqual(out_uint8_computation.to(dtype=torch.uint8), out_int64_computation.to(dtype=torch.uint8))
def test_tensor_pow_tensor(self, dev):
def rotate(l, n):
return l[-n:] + l[:-n]
def test_tensor_pow_tensor(values, torch_type, numpy_type):
vals_tensor = torch.tensor(values, dtype=torch_type, device=dev)
for i in range(len(values)):
pows = rotate(values, i)
pows_tensor = torch.tensor(pows, dtype=torch_type, device=dev)
self._test_pow(vals_tensor, pows_tensor)
ints = [0, 1, 2, 3]
test_tensor_pow_tensor(ints, torch.uint8, np.uint8)
test_tensor_pow_tensor(ints, torch.int8, np.int8)
test_tensor_pow_tensor(ints, torch.int16, np.int16)
test_tensor_pow_tensor(ints, torch.int32, np.int32)
test_tensor_pow_tensor(ints, torch.int64, np.int64)
floats = [-3.0, -2.0, -1.0, -1 / 2, -1 / 3,
0.0, 1 / 3, 1 / 2, 1.0, 2.0, 3.0]
test_tensor_pow_tensor(floats, torch.float16, np.float16)
test_tensor_pow_tensor(floats, torch.float32, np.float32)
test_tensor_pow_tensor(floats, torch.float64, np.float64)
def test_logical_xor_with_nontrivial_alignment(self, device):
# test tensor that is not aligned to multiple of 16 bytes
size = 128
a = (torch.randn(size, device=device) > 0)
b = (torch.randn(size, device=device) > 0)
c = (torch.randn(size, device=device) > 0)
non_trivial_alignment = [1, 2, 4, 8, 15]
for i in non_trivial_alignment:
for j in non_trivial_alignment:
for k in non_trivial_alignment:
a_ = a[i: 100 + i]
b_ = b[j: 100 + j]
c_ = c[k: 100 + k]
torch.logical_xor(a_, b_, out=c_)
for x, y, z in zip(a_.tolist(), b_.tolist(), c_.tolist()):
self.assertEqual(x ^ y, z)
@dtypes(torch.float)
def test_add_with_tail(self, device, dtype):
# test tensor where there is a tail which is not a multiple
# of GPU warp size
for tail_size in [1, 63, 67, 130]:
size = 4096 + tail_size
a = torch.randn(size, device=device, dtype=dtype)
b = torch.randn(size, device=device, dtype=dtype)
c = a + b
for x, y, z in zip(a.tolist(), b.tolist(), c.tolist()):
self.assertEqual(x + y, z)
# Tests that CUDA tensors on different devices cannot be used in the same
# binary operation, and that CUDA "scalars" cannot be used in the same
# binary operation as non-scalar CPU tensors.
@deviceCountAtLeast(2)
@onlyCUDA
def test_cross_device_binary_ops(self, devices):
vals = (1., (2.,))
cpu_tensor = torch.randn(2, 2)
def do_test(op, a, b):
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, b)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(b, a)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(a, cpu_tensor)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors.+"):
op(cpu_tensor, a)
for op in (operator.add, torch.add,
operator.sub, torch.sub,
operator.mul, torch.mul,
operator.truediv, torch.true_divide,
operator.floordiv, torch.floor_divide):
for a, b in product(vals, vals):
a = torch.tensor(a, device=devices[0])
b = torch.tensor(b, device=devices[1])
do_test(op, a, b)
# This test ensures that a scalar Tensor can be safely used
# in a binary operation in conjunction with a Tensor on all
# available CUDA devices
@deviceCountAtLeast(2)
@onlyCUDA
def test_binary_op_scalar_device_unspecified(self, devices):
scalar_val = torch.tensor(1.)
for default_device in devices:
with torch.cuda.device(default_device):
for device in devices:
device_obj = torch.device(device)
x = torch.rand(3, device=device)
y0 = x * scalar_val
self.assertEqual(y0.device, device_obj)
y1 = scalar_val * x
self.assertEqual(y1.device, device_obj)
self.assertEqual(y0, y1)
def test_div_and_floordiv_vs_python(self, device):
# Tests torch division ops which can handle both arguments being
# scalars.
# NOTE: torch.floor_divide currently truncates instead of flooring.
# the quotient. See https://github.com/pytorch/pytorch/issues/43874.
def _scalar_helper(python_op, torch_op):
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected = python_op(a, b)
for op in (operator.truediv, torch.true_divide):
actual_scalar = torch_op(a, b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
actual_tensor = torch_op(a_t, b_t)
actual_first_tensor = torch_op(a_t, b)
actual_second_tensor = torch_op(a, b_t)
self.assertEqual(actual_scalar, expected_div)
self.assertEqual(actual_tensor.item(), expected_div)
self.assertEqual(actual_first_tensor, actual_tensor)
self.assertEqual(actual_second_tensor, actual_tensor)
_scalar_helper(operator.truediv, operator.truediv)
_scalar_helper(operator.truediv, torch.true_divide)
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
_scalar_helper(lambda a, b: math.trunc(a / b), operator.floordiv)
_scalar_helper(lambda a, b: math.trunc(a / b), torch.floor_divide)
# NOTE: torch.floor_divide currently truncates instead of flooring.
# See https://github.com/pytorch/pytorch/issues/43874.
@onlyOnCPUAndCUDA
def test_div_and_floordiv_script_vs_python(self, device):
# Creates jitted functions of two tensors
def _wrapped_div(a, b):
return a / b
def _wrapped_floordiv(a, b):
return a // b
scripted_div = torch.jit.script(_wrapped_div)
scripted_floordiv = torch.jit.script(_wrapped_floordiv)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_div = a / b
expected_truncdiv = math.trunc(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
self.assertEqual(scripted_div(a_t, b_t), expected_div)
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
self.assertEqual(scripted_floordiv(a_t, b_t), expected_truncdiv)
# Creates jitted functions of one tensor
def _wrapped_div_scalar(a):
return a / 5
# NOTE: the JIT implements division as torch.reciprocal(a) * 5
def _wrapped_rdiv_scalar(a):
return 5 / a
def _wrapped_floordiv_scalar(a):
return a // 5
# NOTE: this fails if the input is not an integer tensor
# See https://github.com/pytorch/pytorch/issues/45199
def _wrapped_rfloordiv_scalar(a):
return 5 // a
scripted_div_scalar = torch.jit.script(_wrapped_div_scalar)
scripted_rdiv_scalar = torch.jit.script(_wrapped_rdiv_scalar)
scripted_floordiv_scalar = torch.jit.script(_wrapped_floordiv_scalar)
scripted_rfloordiv_scalar = torch.jit.script(_wrapped_rfloordiv_scalar)
for a in range(-10, 10):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
a_t = torch.tensor(a, device=device)
self.assertEqual(a / 5, scripted_div_scalar(a_t))
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
self.assertEqual(math.trunc(a / 5), scripted_floordiv_scalar(a_t))
# Skips zero divisors
if a == 0:
continue
self.assertEqual(5 / a, scripted_rdiv_scalar(a_t))
# Handles Issue 45199 (see comment above)
if a_t.is_floating_point():
with self.assertRaises(RuntimeError):
scripted_rfloordiv_scalar(a_t)
else:
# This should emit a UserWarning, why doesn't it?
# See issue gh-52387
self.assertEqual(5 // a, scripted_rfloordiv_scalar(a_t))
# NOTE: torch.floor_divide currently truncates instead of flooring
# the quotient. See https://github.com/pytorch/pytorch/issues/43874.
@onlyOnCPUAndCUDA
def test_idiv_and_ifloordiv_vs_python(self, device):
def _wrapped_idiv_tensor(a, b):
a /= b
return a
def _wrapped_idiv_scalar(a):
a /= 5
return a
def _wrapped_true_divide__tensor(a, b):
a.true_divide_(b)
return a
def _wrapped_true_divide__scalar(a):
a.true_divide_(5)
return a
def _wrapped_floor_divide__tensor(a, b):
a.floor_divide_(b)
return a
def _wrapped_floor_divide__scalar(a):
a.floor_divide_(5)
return a
# The following functions are unsupported by the JIT
def _wrapped_ifloordiv_tensor(a, b):
a //= b
return a
def _wrapped_ifloordiv_scalar(a):
a //= 5
return a
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_tensor = torch.jit.script(_wrapped_ifloordiv_tensor)
with self.assertRaises(torch.jit.frontend.NotSupportedError):
scripted_ifloordiv_scalar = torch.jit.script(_wrapped_ifloordiv_scalar)
scripted_idiv_tensor = torch.jit.script(_wrapped_idiv_tensor)
scripted_idiv_scalar = torch.jit.script(_wrapped_idiv_scalar)
scripted_true_divide__tensor = torch.jit.script(_wrapped_true_divide__tensor)
scripted_true_divide__scalar = torch.jit.script(_wrapped_true_divide__scalar)
scripted_floor_divide__tensor = torch.jit.script(_wrapped_floor_divide__tensor)
scripted_floor_divide__scalar = torch.jit.script(_wrapped_floor_divide__scalar)
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0:
continue
expected_idiv = a / b
expected_ifloordiv = a // b
expected_itruncdiv = math.trunc(a / b)
a_t = torch.tensor(a, device=device)
b_t = torch.tensor(b, device=device)
if a_t.is_floating_point():
tmp0 = a_t.clone()
tmp0 /= b
tmp1 = a_t.clone()
tmp1 /= b_t
self.assertEqual(tmp0.item(), expected_idiv)
self.assertEqual(tmp1.item(), expected_idiv)
self.assertEqual(scripted_true_divide__tensor(a_t.clone(), b_t).item(), expected_idiv)
self.assertEqual(scripted_true_divide__scalar(a_t.clone()).item(), a / 5)
else:
tmp = a_t.clone()
with self.assertRaises(RuntimeError):
tmp /= b
with self.assertRaises(RuntimeError):
tmp /= b_t
with self.assertRaises(RuntimeError):
scripted_true_divide__tensor(tmp, b_t)
with self.assertRaises(RuntimeError):
scripted_true_divide__scalar(tmp)
if not a_t.is_floating_point() and b_t.is_floating_point():
# Inplace modification fails because a float tensor is required
# if the divisor is a float tensor
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a_t.clone().floor_divide_(b_t)
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
scripted_floor_divide_tensor(a_t.clone(), b_t)
tmp = a_t.clone()
with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
tmp //= b_t
else:
# Inplace modification is OK when both or neither tensor is
# a float tensor
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
self.assertEqual(a_t.clone().floor_divide_(b_t).item(), expected_itruncdiv)
self.assertEqual(scripted_floor_divide__tensor(a_t.clone(), b_t).item(), expected_itruncdiv)
tmp = a_t.clone()
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
tmp //= b_t
self.assertEqual(tmp.item(), expected_itruncdiv)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
self.assertEqual(scripted_floor_divide__scalar(a_t), math.trunc(a / 5))
# Tests binary op equivalence with Python builtin ops
# Also tests that reverse operations are equivalent to forward ops
# NOTE: division ops are tested separately above
def test_binary_ops_with_scalars(self, device):
for ops in ((operator.add, torch.add),
(operator.sub, torch.sub),
(operator.mul, torch.mul),
(operator.truediv, torch.div)):
python_op, torch_op = ops
for a, b in product(range(-10, 10), range(-10, 10)):
for op in (lambda x: x * .5, lambda x: math.floor(x)):
a = op(a)
b = op(b)
# Skips zero divisors
if b == 0 or a == 0:
continue
a_tensor = torch.tensor(a, device=device)
b_tensor = torch.tensor(b, device=device)
a_tensor_cpu = a_tensor.cpu()
b_tensor_cpu = b_tensor.cpu()
vals = (a, b, a_tensor, b_tensor, a_tensor_cpu, b_tensor_cpu)
for args in product(vals, vals):
first, second = args
first_scalar = first if not isinstance(first, torch.Tensor) else first.item()
second_scalar = second if not isinstance(second, torch.Tensor) else second.item()
expected = python_op(first_scalar, second_scalar)
self.assertEqual(expected, python_op(first, second))
self.assertEqual(expected, torch_op(first, second))
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False), torch.testing.get_all_dtypes(include_complex=False)))
def test_maximum_minimum_type_promotion(self, device, dtypes):
a = torch.tensor((0, 1), device=device, dtype=dtypes[0])
b = torch.tensor((1, 0), device=device, dtype=dtypes[1])
for op in (torch.maximum, torch.max, torch.fmax, torch.minimum, torch.min, torch.fmin):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
@dtypes(*(torch.testing.get_all_int_dtypes() + [torch.bool]))
def test_maximum_minimum_int_and_bool(self, device, dtype):
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
rng = np.random.default_rng()
a_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
numpy_result = numpy_op(a_np, b_np)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@precisionOverride({torch.bfloat16: 1e-2})
@dtypes(*(torch.testing.get_all_fp_dtypes()))
def test_maximum_minimum_float(self, device, dtype):
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
if dtype == torch.bfloat16:
a_np = np.random.randn(10).astype(np.float64)
b_np = np.random.randn(10).astype(np.float64)
else:
a_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
b_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result, exact_dtype=False)
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
@dtypes(*(torch.testing.get_all_fp_dtypes()))
def test_maximum_minimum_float_nan_and_inf(self, device, dtype):
# np.maximum and np.minimum functions compare input arrays element-wisely.
# if one of the elements being compared is a NaN, then that element is returned.
ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),
(torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))
a_vals = (float('inf'), -float('inf'), float('nan'), float('inf'), float('nan'), float('nan'), 1, float('nan'))
b_vals = (-float('inf'), float('inf'), float('inf'), float('nan'), float('nan'), 0, float('nan'), -5)
if dtype == torch.bfloat16:
a_np = np.array(a_vals, dtype=np.float64)
b_np = np.array(b_vals, dtype=np.float64)
else:
a_np = np.array(a_vals, dtype=torch_to_numpy_dtype_dict[dtype])
b_np = np.array(b_vals, dtype=torch_to_numpy_dtype_dict[dtype])
for torch_op, alias, numpy_op in ops:
numpy_result = numpy_op(a_np, b_np)
a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)
b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)
tensor_result = torch_op(a_tensor, b_tensor)
out = torch.empty_like(a_tensor)
torch_op(a_tensor, b_tensor, out=out)
if alias is not None:
alias_result = alias(a_tensor, b_tensor)
self.assertEqual(alias_result, tensor_result)
if dtype == torch.bfloat16:
self.assertEqual(tensor_result, numpy_result, exact_dtype=False)
self.assertEqual(out, numpy_result, exact_dtype=False)
else:
self.assertEqual(tensor_result, numpy_result)
self.assertEqual(out, numpy_result)
@dtypes(*product(torch.testing.get_all_complex_dtypes(), torch.testing.get_all_dtypes()))
def test_maximum_minimum_complex(self, device, dtypes):
for torch_op in (torch.maximum, torch.minimum, torch.max, torch.min, torch.fmax, torch.fmin):
with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):
torch_op(torch.ones(1, device=device, dtype=dtypes[0]),
torch.ones(1, device=device, dtype=dtypes[1]))
with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):
torch_op(torch.ones(1, device=device, dtype=dtypes[1]),
torch.ones(1, device=device, dtype=dtypes[0]))
@onlyCUDA
def test_maximum_minimum_cross_device(self, device):
a = torch.tensor((1, 2, -1))
b = torch.tensor((3, 0, 4), device=device)
ops = (torch.maximum, torch.minimum)
for torch_op in ops:
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch_op(a, b)
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch_op(b, a)
# test cuda tensor and cpu scalar
ops = ((torch.maximum, np.maximum), (torch.minimum, np.minimum))
a_np = np.array(1)
b_np = np.array([3, 0, 4])
for torch_op, numpy_op in ops:
a_tensor = torch.from_numpy(a_np)
b_tensor = torch.from_numpy(b_np).to(device=device)
tensor_result_1 = torch_op(a_tensor, b_tensor)
numpy_result_1 = numpy_op(a_np, b_np)
tensor_result_2 = torch_op(b_tensor, a_tensor)
numpy_result_2 = numpy_op(b_np, a_np)
self.assertEqual(tensor_result_1, numpy_result_1)
self.assertEqual(tensor_result_2, numpy_result_2)
# TODO: tests like this should be generic
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_mul_intertype_scalar(self, device, dtype):
x = torch.tensor(1.5, dtype=dtype, device=device)
y = torch.tensor(3, dtype=torch.int32, device=device)
self.assertEqual(x * y, 4.5)
self.assertEqual(y * x, 4.5)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
y *= x
x *= y
self.assertEqual(x, 4.5)
@onlyCPU
@dtypes(*torch.testing.get_all_dtypes())
def test_sub(self, device, dtype):
m1 = torch.tensor([2.34, 4.44], dtype=dtype, device=device)
m2 = torch.tensor([1.23, 2.33], dtype=dtype, device=device)
if dtype == torch.bool:
self.assertRaises(RuntimeError, lambda: m1 - m2)
elif (dtype == torch.bfloat16 or dtype == torch.half):
# bfloat16 has a lower precision so we have to have a separate check for it
self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype), atol=0.01, rtol=0)
else:
self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype))
# TODO: what is this test testing?
@onlyCPU
@dtypes(torch.float)
def test_csub(self, device, dtype):
# with a tensor
a = torch.randn(100, 90, dtype=dtype, device=device)
b = a.clone().normal_()
res_add = torch.add(a, b, alpha=-1)
res_csub = a.clone()
res_csub.sub_(b)
self.assertEqual(res_add, res_csub)
# with a scalar
a = torch.randn(100, 100, dtype=dtype, device=device)
scalar = 123.5
res_add = torch.add(a, -scalar)
res_csub = a.clone()
res_csub.sub_(scalar)
self.assertEqual(res_add, res_csub)
# TODO: reconcile with minimum/maximum tests
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_min_max_binary_op_nan(self, device, dtype):
a = torch.rand(1000, dtype=dtype, device=device)
b = torch.rand(1000, dtype=dtype, device=device)
# 0:250: a -- nan, b -- not nan
a[:250] = float('nan')
# 250:500: a -- not nan, b -- nan
b[250:500] = float('nan')
# 500:750: a and b both nan
a[500:750] = float('nan')
b[500:750] = float('nan')
# 750:1000: neither nan
ma = torch.max(a, b)
mi = torch.min(a, b)
for i in range(750):
self.assertTrue(torch.isnan(ma[i]), "max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]))
self.assertTrue(torch.isnan(mi[i]), "min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]))
for i in range(750, 1000):
self.assertFalse(torch.isnan(ma[i]), "max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i]))
self.assertFalse(torch.isnan(mi[i]), "min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i]))
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False),
torch.testing.get_all_dtypes(include_complex=False)))
def test_copysign(self, device, dtypes):
def _test_copysign_numpy(a, b):
torch_result = torch.copysign(a, b)
if a.dtype == torch.bfloat16:
np_a = a.to(torch.float).cpu().numpy()
else:
np_a = a.cpu().numpy()
if b.dtype == torch.bfloat16:
np_b = b.to(torch.float).cpu().numpy()
else:
np_b = b.cpu().numpy()
expected = torch.from_numpy(np.copysign(np_a, np_b))
# To handle inconsistencies of type promotion between PyTorch and Numpy
# Applied for both arguments having integral precision and bfloat16
types = [torch.bool, torch.bfloat16] + torch.testing.get_all_int_dtypes()
if a.dtype in types or b.dtype in types:
promoted_type = torch.promote_types(torch_result.dtype, expected.dtype)
torch_result = torch_result.to(promoted_type)
expected = expected.to(promoted_type)
# Verify Value
self.assertEqual(torch_result, expected)
# Verify Sign
# Use double copysign to verify the correctnes of 0.0 and -0.0, since
# it always True for self.assertEqual(0.0 == -0.0). So, we use 1 as the
# magnitude to verify the sign between torch and numpy results, elementwise.
# Special case: NaN conversions between FP32 and FP16 is not bitwise
# equivalent to pass this assertion.
if a.dtype != torch.float16 and b.dtype != torch.float16:
self.assertEqual(torch.copysign(torch.tensor(1.0), torch_result),
torch.copysign(torch.tensor(1.0), expected))
# Compare Result with NumPy
# Type promotion
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# Broadcast
a = make_tensor((10, 1, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
b = make_tensor((10, 1, 10), device=device, dtype=dtypes[1], low=-9, high=9)
_test_copysign_numpy(a, b)
# 0.0/-0.0/inf/-inf/nan
cases = [0.0, -0.0, float('inf'), float('-inf'), float('nan')]
# torch.bfloat16 can not hold '-nan'
# torch.half can not hold '-nan' on CUDA
types = [torch.float32, torch.float64]
if device == 'cpu':
types.append(torch.float16)
if dtypes[0] in types:
b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)
for case in cases:
_test_copysign_numpy(torch.tensor([case], device=device, dtype=dtypes[0]), b)
if dtypes[1] in torch.testing.get_all_fp_dtypes():
a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)
for case in cases:
_test_copysign_numpy(a, torch.tensor([case], device=device, dtype=dtypes[1]))
@dtypes(torch.bfloat16, torch.float)
def test_div(self, device, dtype):
for op, method, inplace in ((torch.div, torch.Tensor.div, torch.Tensor.div_),
(torch.true_divide, torch.Tensor.true_divide,
torch.Tensor.true_divide_)):
m1 = torch.randn(10, 10, dtype=torch.float, device=device).to(dtype=dtype)
res1 = m1.clone()
inplace(res1[:, 3], 2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] / 2
self.assertEqual(res1, res2)
if dtype == torch.bfloat16:
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2., 2.], dtype=dtype, device=device)
self.assertEqual(op(a1, a2),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01, rtol=0)
self.assertEqual(method(a1, a2), op(a1, a2))
@dtypes(torch.bfloat16, torch.float)
def test_true_divide_out(self, device, dtype):
a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)
a2 = torch.tensor([2., 2.], dtype=dtype, device=device)
res = torch.empty_like(a1)
self.assertEqual(torch.true_divide(a1, a2, out=res),
torch.tensor([2.1, 3.1], dtype=dtype, device=device),
atol=0.01, rtol=0)
@onlyCUDA
@dtypes(torch.half)
def test_divmul_scalar(self, device, dtype):
x = torch.tensor(100., device=device, dtype=dtype)
x_ref = x.float()
scale = 1e5
res = x.div(scale)
expected = x_ref.div(scale)
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
x = torch.tensor(1e-5, device=device, dtype=dtype)
x_ref = x.float()
res = x.mul(scale)
expected = x_ref.mul(scale)
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
res = scale * x
self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)
@dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})
@dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})
def test_floor_divide_tensor(self, device, dtype):
x = torch.randn(10, device=device).mul(30).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
z = x // y
z_alt = torch.trunc(x.double() / y.double()).to(dtype)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
@dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})
@dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})
def test_floor_divide_scalar(self, device, dtype):
x = torch.randn(100, device=device).mul(10).to(dtype)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
z = x // 3
z_alt = torch.tensor([math.trunc(v.item() / 3.) for v in x], dtype=x.dtype, device=device)
self.assertEqual(z.dtype, x.dtype)
self.assertEqual(z, z_alt)
# Note: this tests fails on XLA
@onlyOnCPUAndCUDA
@dtypes(torch.float, torch.long)
def test_floor_divide_out(self, device, dtype):
x = torch.randn(10, device=device).mul(10).to(dtype)
y = torch.arange(1, 11, dtype=dtype, device=device)
o = torch.empty(10, dtype=dtype, device=device)
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
torch.floor_divide(x, y, out=o)
self.assertEqual(o, x // y)
# Tests scalar with out
torch.floor_divide(x, 2, out=o)
self.assertEqual(o, x // 2)
if dtype == torch.int:
o = torch.empty(10, dtype=torch.float, device=device)
torch.floor_divide(x, y, out=o)
self.assertEqual(o, torch.floor_divide(x.float(), y.float()))
@onlyCPU
@dtypes(*torch.testing.get_all_math_dtypes('cpu'))
def test_rdiv(self, device, dtype):
if dtype is torch.float16:
return
elif dtype.is_complex:
x = torch.rand(100, dtype=dtype, device=device).add(1).mul(4)
else:
x = torch.rand(100, device=device).add(1).mul(4).to(dtype)
y = 30 / x
z = torch.tensor([30 / v.item() for v in x], device=device)
self.assertEqual(y, z, exact_dtype=False)
@dtypes(*torch.testing.get_all_fp_dtypes(include_bfloat16=False))
def test_fmod_remainder_by_zero_float(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check floating-point tensor fmod/remainder to zero is nan on both CPU and GPU
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
self.assertTrue(torch.all(fn(x, 0.0).isnan()))
self.assertTrue(torch.all(fn(x, zero).isnan()))
@onlyOnCPUAndCUDA # Check Issue https://github.com/pytorch/pytorch/issues/48130
@skipCUDAIfRocm # Error happens on both ROCM and XLA
@dtypes(*torch.testing.get_all_int_dtypes())
def test_fmod_remainder_by_zero_integral(self, device, dtype):
fn_list = (torch.fmod, torch.remainder)
for fn in fn_list:
# check integral tensor fmod/remainder to zero
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
zero = torch.zeros_like(x)
# RuntimeError on CPU
if self.device_type == 'cpu':
with self.assertRaisesRegex(RuntimeError, "ZeroDivisionError"):
fn(x, zero)
# Different value for different dtype on CUDA:
# Due to it's an undefined behavior, CUDA returns a pattern of all 1s
# for integral dividend (other than int64) divided by zero. For int64,
# CUDA returns all 1s for negative dividend, half 1s for positive dividend.
# uint8: 0xff -> 255
# int32: 0xffffffff -> -1
else:
if dtype == torch.int64:
self.assertEqual(fn(x, zero) == 4294967295, x >= 0)
self.assertEqual(fn(x, zero) == -1, x < 0)
else:
value = 255 if dtype == torch.uint8 else -1
self.assertTrue(torch.all(fn(x, zero) == value))
@dtypes(*torch.testing.get_all_dtypes(include_bfloat16=False, include_bool=False, include_complex=False))
def test_fmod_remainder(self, device, dtype):
# Use numpy as reference
def _helper(x, mod, fns_list):
for fn, inplace_fn, ref_fn in fns_list:
np_x = x.cpu().numpy() if torch.is_tensor(x) else x
np_mod = mod.cpu().numpy() if torch.is_tensor(mod) else mod
exp = ref_fn(np_x, np_mod)
exp = torch.from_numpy(exp)
res = fn(x, mod)
self.assertEqual(res, exp, exact_dtype=False)
if torch.is_tensor(x):
# out
out = torch.empty(0, device=device, dtype=res.dtype)
fn(x, mod, out=out)
self.assertEqual(out, exp, exact_dtype=False)
self.assertEqual(out.size(), torch.Size([10, 10]))
# in-place (Type cast runtime error)
try:
inplace_fn(x, mod)
self.assertEqual(x, exp, exact_dtype=False)
except RuntimeError as e:
self.assertRegex(str(e), "result type (Half|Float|Double) "
"can't be cast to the desired output "
"type (Byte|Char|Short|Int|Long)")
x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# mod with same dtype as x
mod = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)
# Exclude 0
mod[mod == 0] = 1
# Mods: Integer, Float, Tensor, Non-contiguous Tensor
mods = [3, 2.3, mod, mod.t()]
# mod with floating-point dtype
if dtype in torch.testing.get_all_int_dtypes():
mod_float = make_tensor((10, 10), device=device, dtype=torch.float, low=-9, high=9)
mod[mod == 0] = 1
mods.append(mod_float)
for dividend, mod in product([x, x.t()], mods):
_helper(dividend, mod,
((torch.fmod, torch.Tensor.fmod_, np.fmod),
(torch.remainder, torch.Tensor.remainder_, np.remainder),))
# Tests for torch.remainder(scalar, tensor)
for dividend, mod in product([5, 3.14], mods):
if torch.is_tensor(mod):
_helper(dividend, mod,
((torch.remainder, torch.Tensor.remainder_, np.remainder),))
@dtypes(torch.float, torch.double)
def test_remainder_fmod_large_dividend(self, device, dtype):
alarge = 1e9
pi = 3.14159265358979
for avalue in [alarge, -alarge]:
for bvalue in [pi, -pi]:
a = torch.tensor([avalue], dtype=dtype, device=device)
b = torch.tensor([bvalue], dtype=dtype, device=device)
c = torch.remainder(a, b)
d = torch.fmod(a, b)
self.assertTrue((b[0] > 0) == (c[0] > 0)) # remainder has same sign as divisor
self.assertTrue((a[0] > 0) == (d[0] > 0)) # fmod has same sign as dividend
self.assertTrue(abs(c[0]) < abs(b[0])) # remainder is within range of divisor
self.assertTrue(abs(d[0]) < abs(b[0])) # fmod is within range of divisor
if ((a[0] > 0) == (b[0] > 0)):
self.assertTrue(c[0] == d[0]) # remainder is same as fmod
else:
self.assertTrue(abs(c[0] - d[0]) == abs(b[0])) # differ by one divisor
@dtypesIfCPU(torch.bfloat16, torch.float32, torch.float64)
@dtypes(torch.float32, torch.float64)
def test_hypot(self, device, dtype):
inputs = [
(torch.randn(10, device=device).to(dtype), torch.randn(10, device=device).to(dtype)),
(torch.randn((3, 3, 3), device=device).to(dtype), torch.randn((3, 3, 3), device=device).to(dtype)),
(torch.randn((10, 1), device=device).to(dtype), torch.randn((10, 1), device=device).to(dtype).transpose(0, 1)),
(torch.randint(100, (10, ), device=device, dtype=torch.long), torch.randn(10, device=device).to(dtype))
]
for input in inputs:
actual = torch.hypot(input[0], input[1])
if dtype == torch.bfloat16:
expected = torch.sqrt(input[0] * input[0] + input[1] * input[1])
else:
expected = np.hypot(input[0].cpu().numpy(), input[1].cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyOnCPUAndCUDA
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_gcd(self, device, dtype):
# Tests gcd(0, 0), gcd(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.gcd(t1, t2)
expected = np.gcd([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
if dtype == torch.uint8:
# Test unsigned integers with potential sign issues (i.e., uint8 with value >= 128)
a = torch.tensor([190, 210], device=device, dtype=dtype)
b = torch.tensor([190, 220], device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = torch.tensor([190, 10], device=device, dtype=dtype)
self.assertEqual(actual, expected)
else:
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.gcd(a, b)
expected = np.gcd(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@onlyOnCPUAndCUDA
@dtypes(torch.int16, torch.int32, torch.int64)
def test_lcm(self, device, dtype):
# Tests lcm(0, 0), lcm(0, a) cases
t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)
t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)
actual = torch.lcm(t1, t2)
expected = np.lcm([0, 10, 0], [0, 0, 10])
self.assertEqual(actual, expected, exact_dtype=False)
# Compares with NumPy
a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)
actual = torch.lcm(a, b)
expected = np.lcm(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, exact_dtype=False)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_nextafter(self, device, dtype):
# Test special cases
t1 = torch.tensor([0, 0, 10], device=device, dtype=dtype)
t2 = torch.tensor([inf, -inf, 10], device=device, dtype=dtype)
actual = torch.nextafter(t1, t2)
expected = np.nextafter(t1.cpu().numpy(), t2.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
actual = torch.nextafter(t2, t1)
expected = np.nextafter(t2.cpu().numpy(), t1.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
t1 = torch.tensor([0, nan], device=device, dtype=dtype)
t2 = torch.tensor([nan, 0], device=device, dtype=dtype)
self.assertTrue(torch.nextafter(t1, t2).isnan().all())
a = torch.randn(100, device=device, dtype=dtype)
b = torch.randn(100, device=device, dtype=dtype)
actual = torch.nextafter(a, b)
expected = np.nextafter(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected, atol=0, rtol=0)
def _test_cop(self, torchfn, mathfn, dtype, device):
def reference_implementation(res2):
for i, j in iter_indices(sm1):
idx1d = i * sm1.size(0) + j
res2[i, j] = mathfn(sm1[i, j], sm2[idx1d])
return res2
# contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[4]
sm2 = m2[4]
res1 = torchfn(sm1, sm2.view(10, 10))
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)
m2 = torch.randn(10 * 10, 10 * 10, dtype=dtype, device=device)
sm1 = m1[:, 4]
sm2 = m2[:, 4]
# view as sm1.size()
sm2.set_(sm2.storage(), sm2.storage_offset(), sm1.size(), (sm2.stride()[0] * 10, sm2.stride()[0]))
res1 = torchfn(sm1, sm2)
# reference_implementation assumes 1-d sm2
sm2.set_(sm2.storage(), sm2.storage_offset(), m2[:, 4].size(), m2[:, 4].stride())
res2 = reference_implementation(res1.clone())
self.assertEqual(res1, res2)
@onlyCPU
@dtypes(torch.float)
def test_cdiv(self, device, dtype):
self._test_cop(torch.div, lambda x, y: x / y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cremainder(self, device, dtype):
self._test_cop(torch.remainder, lambda x, y: x % y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cmul(self, device, dtype):
self._test_cop(torch.mul, lambda x, y: x * y, dtype, device)
@onlyCPU
@dtypes(torch.float)
def test_cpow(self, device, dtype):
self._test_cop(torch.pow, lambda x, y: nan if x < 0 else math.pow(x, y), dtype, device)
@onlyCPU
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_floor_divide_zero(self, device, dtype):
a = torch.tensor([0, 1], dtype=dtype, device=device)
b = torch.tensor([0, 1], dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'ZeroDivisionError'):
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
a // b
@unittest.skipIf(TEST_WITH_ASAN, "Integer overflows are not allowed under ASAN")
@dtypes(*torch.testing.get_all_dtypes())
def test_muldiv_scalar(self, device, dtype):
x = make_tensor((10, 3), device, dtype, low=None, high=None)
s = make_tensor((1,), 'cpu', dtype, low=None, high=None).item()
y = torch.full_like(x, s)
self.assertEqual(x * s, x * y)
self.assertEqual(s * x, y * x)
self.assertEqual(x / s, x / y)
self.assertEqual(s / x, y / x)
@dtypes(*tuple(itertools.combinations_with_replacement(torch.testing.get_all_dtypes(), 2)))
def test_comparison_ops_type_promotion_and_broadcasting(self, device, dtypes):
# issue #42660
# testing all combinations of broadcasting and type promotion
# with a range of dtypes and input shapes, and with extremal values
def compare_with_numpy_bin_op(torch_fn, np_fn, x, y, out=None):
# working around the fact that numpy doesn't support bfloat16
# by letting numpy treat them as float32's
x_np = x if x.dtype != torch.bfloat16 else x.to(torch.float32)
y_np = y.cpu().numpy() if y.dtype != torch.bfloat16 else y.to(torch.float32).cpu().numpy()
self.compare_with_numpy(lambda inp: torch_fn(inp, y, out=out) if out else torch_fn(inp, y),
lambda inp: np_fn(inp, y_np, out=out) if out else np_fn(inp, y_np),
x_np)
complex_op_denylist = [torch.lt, torch.le, torch.gt, torch.ge] # complex not supported
input_sizes = [
(1,),
(10,),
(10, 1),
(1, 10),
(4, 10),
(64, 10),
(12, 3)]
op_pairs = [(torch.lt, np.less),
(torch.le, np.less_equal),
(torch.gt, np.greater),
(torch.ge, np.greater_equal),
(torch.eq, np.equal),
(torch.ne, np.not_equal),
(torch.logical_and, np.logical_and),
(torch.logical_or, np.logical_or),
(torch.logical_xor, np.logical_xor)]
for size1 in input_sizes:
size2 = (2,) + size1 # perform broadcasting
for with_extremal in [False, True]:
a = _generate_input(size1, dtypes[0], device, with_extremal)
b = _generate_input(size2, dtypes[1], device, with_extremal)
for torch_op, numpy_op in op_pairs:
if (dtypes[0].is_complex or dtypes[1].is_complex) and torch_op in complex_op_denylist:
continue
# functional version of op
compare_with_numpy_bin_op(torch_op, numpy_op, a, b)
# functional comparison ops always return bool tensors
self.assertEqual(torch_op(a, b).dtype, torch.bool)
# out version of op
out = torch.zeros(1, dtype=torch.complex128) # all casts to complex128 are safe
compare_with_numpy_bin_op(torch_op, numpy_op, a, b, out=out)
@onlyOnCPUAndCUDA
@dtypes(torch.int8, torch.int16, torch.int32, torch.int64)
def test_signed_shift(self, device, dtype):
"Ensure that signed integer bit shifting works as expected."
a = torch.tensor([-10, 10], device=device, dtype=dtype) # [11...1110110, 1010]
expected_l = torch.tensor([-40, 40], device=device, dtype=dtype) # [11...11011000, 101000]
self.assertEqual(a << 2, expected_l)
self.compare_with_numpy(lambda x: x << 2, lambda x: np.left_shift(x, 2), a)
expected_r = torch.tensor([-5, 5], device=device, dtype=dtype) # [1111...111011, 101]
self.assertEqual(a >> 1, expected_r)
self.compare_with_numpy(lambda x: x >> 1, lambda x: np.right_shift(x, 1), a)
def test_bitwise_and(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([0, 0, 3], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([0, 2, 2], dtype=dtype, device=device)
# standard version
self.assertEqual(torch.bitwise_and(a, b), expected_res)
self.assertEqual(torch.bitwise_and(a, b_scalar), expected_res_scalar)
# out
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_and(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_and(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
# in-place
a1 = a.clone()
a1.bitwise_and_(b)
self.assertEqual(a1, expected_res)
a.bitwise_and_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([False, True, False], device=device),
torch.bitwise_and(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
def test_bitwise_or(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([3, -1, 3], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([3, -2, 3], dtype=dtype, device=device)
# standard version
self.assertEqual(torch.bitwise_or(a, b), expected_res)
self.assertEqual(torch.bitwise_or(a, b_scalar), expected_res_scalar)
# out
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_or(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_or(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
# in-place
a1 = a.clone()
a1.bitwise_or_(b)
self.assertEqual(a1, expected_res)
a.bitwise_or_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([True, True, False], device=device),
torch.bitwise_or(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
def test_bitwise_xor(self, device):
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
a = torch.tensor([1, -2, 3], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3], dtype=dtype, device=device)
expected_res = torch.tensor([3, -1, 0], dtype=dtype, device=device)
b_scalar = 2
expected_res_scalar = torch.tensor([3, -4, 1], dtype=dtype, device=device)
# standard version
self.assertEqual(torch.bitwise_xor(a, b), expected_res)
self.assertEqual(torch.bitwise_xor(a, b_scalar), expected_res_scalar)
# out
c = torch.empty(0, dtype=dtype, device=device)
torch.bitwise_xor(a, b, out=c)
self.assertEqual(c, expected_res)
torch.bitwise_xor(a, b_scalar, out=c)
self.assertEqual(c, expected_res_scalar)
# in-place
a1 = a.clone()
a1.bitwise_xor_(b)
self.assertEqual(a1, expected_res)
a.bitwise_xor_(b_scalar)
self.assertEqual(a, expected_res_scalar)
self.assertEqual(torch.tensor([True, False, False], device=device),
torch.bitwise_xor(torch.tensor([True, True, False], device=device),
torch.tensor([False, True, False], device=device)))
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_bitwise_shift(self, device, dtype):
ops = [
(torch.bitwise_left_shift, np.left_shift),
(operator.lshift, operator.lshift),
(torch.bitwise_right_shift, np.right_shift),
(operator.rshift, operator.rshift),
]
for torch_op, numpy_op in ops:
a = torch.tensor([19, -20, -21, 22], dtype=dtype, device=device)
b = torch.tensor([2, 1, 3, 1], dtype=dtype, device=device)
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
# Tensor x Tensor
self.assertEqual(torch_op(a, b), torch.tensor(numpy_op(a_np, b_np), device=device))
# Tensor x int scalar
self.assertEqual(torch_op(a, 2), torch.tensor(numpy_op(a_np, 2), device=device))
def test_bitwise_shift_float(self, device):
ops = [
(torch.bitwise_left_shift, lambda x, y: x * 2. ** y),
(operator.lshift, lambda x, y: x * 2. ** y),
(torch.bitwise_right_shift, lambda x, y: x / 2. ** y),
(operator.rshift, lambda x, y: x / 2. ** y),
]
for torch_op, expected_op in ops:
# int tensor x float
a = torch.tensor([19, -20, -21, 22], dtype=torch.int64, device=device)
self.assertEqual(torch_op(a, 1.8), torch.floor(expected_op(a, 1)).to(a.dtype))
# float tensor x int scalar
a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)
self.assertEqual(torch_op(a, 2), expected_op(a, 2))
# float tensor x float scalar
a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)
self.assertEqual(torch_op(a, 2.2), expected_op(a, 2.2))
@onlyOnCPUAndCUDA
@dtypes(*list(product(torch.testing.get_all_dtypes(include_complex=False),
torch.testing.get_all_dtypes(include_complex=False))))
def test_heaviside(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
rng = np.random.default_rng()
input = np.array(rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[input_dtype if (input_dtype != torch.bfloat16) else torch.float64])
input[0] = input[3] = input[7] = 0
values = np.array(rng.integers(-10, 10, size=10),
dtype=torch_to_numpy_dtype_dict[values_dtype if (values_dtype != torch.bfloat16) else torch.float64])
np_result = torch.from_numpy(np.heaviside(input, values)).to(device=device, dtype=input_dtype)
input = torch.from_numpy(input).to(device=device, dtype=input_dtype)
values = torch.from_numpy(values).to(device=device, dtype=values_dtype)
out = torch.empty_like(input)
if input_dtype == values_dtype:
torch_result = torch.heaviside(input, values)
self.assertEqual(np_result, torch_result)
torch_result = input.heaviside(values)
self.assertEqual(np_result, torch_result)
torch.heaviside(input, values, out=out)
self.assertEqual(np_result, out)
input.heaviside_(values)
self.assertEqual(np_result, input)
else:
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
torch.heaviside(input, values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
input.heaviside(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
torch.heaviside(input, values, out=out)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):
input.heaviside_(values)
@onlyCUDA
def test_heaviside_cross_device(self, device):
x = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
y = torch.tensor(0)
result = torch.heaviside(x, y)
expect = torch.tensor([0, 1, 0, 1, 0, 1], device=device)
self.assertEqual(result, expect)
result = torch.heaviside(y, x)
expect = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)
self.assertEqual(result, expect)
x = torch.tensor([-9, 5, 0, 6, -2, 2])
y = torch.tensor(0, device=device)
with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):
torch.heaviside(x, y)
with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):
torch.heaviside(y, x)
@dtypes(*list(product(torch.testing.get_all_complex_dtypes(),
torch.testing.get_all_complex_dtypes())))
def test_heaviside_complex(self, device, dtypes):
input_dtype = dtypes[0]
values_dtype = dtypes[1]
data = (complex(0, -6), complex(-1, 3), complex(1, 1))
input = torch.tensor(data, device=device, dtype=input_dtype)
values = torch.tensor(data, device=device, dtype=values_dtype)
out = torch.empty_like(input)
real = input.real
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
torch.heaviside(input, real)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
real.heaviside(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
input.heaviside_(values)
with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):
torch.heaviside(real, real, out=out)
def _test_logical(self, device, dtypes, op, a_, b_, expected_res_):
expected_res = torch.tensor(expected_res_, dtype=dtypes[0], device=device)
a = torch.tensor(a_, dtype=dtypes[0], device=device)
b = torch.tensor(b_, dtype=dtypes[1], device=device)
# new tensor
self.assertEqual(expected_res.bool(), getattr(a, op)(b))
# out
c = torch.empty(0, dtype=torch.bool, device=device)
getattr(torch, op)(a, b, out=c)
self.assertEqual(expected_res.bool(), c)
# in-place
# TODO: remove when different dtypes as operands are supported
if dtypes[0] != dtypes[1]:
with self.assertRaises(RuntimeError):
getattr(a, op + '_')(b)
return
getattr(a, op + '_')(b)
self.assertEqual(expected_res, a)
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_xor(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_xor', [10, 0, 1, 0], [1, 0, 0, 10], [0, 0, 1, 1])
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_and(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_and', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 0, 0])
@dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))
def test_logical_or(self, device, dtypes):
self._test_logical(device, dtypes, 'logical_or', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1])
def test_remainder_overflow(self, device):
# Check Integer Overflows
x = torch.tensor(23500, dtype=torch.int64, device=device)
q = 392486996410368
self.assertEqual(x % q, x)
self.assertEqual(-x % q, q - x)
self.assertEqual(x % -q, x - q)
self.assertEqual(-x % -q, -x)
def test_rpow(self, device):
m = torch.randn(10, 10, device=device)
self.assertEqual(torch.pow(2, m), 2**m)
# test with scalar
m = torch.randn(1, device=device).squeeze()
assert m.dim() == 0, "m is intentionally a scalar"
self.assertEqual(torch.pow(2, m), 2**m)
@onlyCPU
def test_ldexp(self, device):
# random values
mantissas = torch.randn(64, device=device)
exponents = torch.randint(-31, 31, (64,), device=device, dtype=torch.int32)
# basic test
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome_1 = torch.ldexp(mantissas, exponents)
pt_outcome_2 = mantissas.ldexp(exponents)
self.assertEqual(np_outcome, pt_outcome_1)
self.assertEqual(np_outcome, pt_outcome_2)
mantissas.ldexp_(exponents)
self.assertEqual(np_outcome, mantissas)
# test bounds
mantissas = torch.tensor([float('inf'), float('-inf'), float('inf'), float('nan')], device=device)
exponents = torch.randint(0, 31, (4,), device=device, dtype=torch.int32)
np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())
pt_outcome = torch.ldexp(mantissas, exponents)
self.assertEqual(np_outcome, pt_outcome)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_lerp(self, device, dtype):
start_end_weight_shapes = [(), (5,), (5, 5)]
for shapes in product(start_end_weight_shapes, start_end_weight_shapes, start_end_weight_shapes):
start = torch.randn(shapes[0], device=device, dtype=dtype)
end = torch.randn(shapes[1], device=device, dtype=dtype)
# Tensor weights
weights = [torch.randn(shapes[2], device=device, dtype=dtype), random.random()]
if dtype.is_complex:
weights += [complex(0, 1), complex(0.4, 1.2)]
for weight in weights:
actual = torch.lerp(start, end, weight)
actual_method = start.lerp(end, weight)
self.assertEqual(actual, actual_method)
actual_out = torch.tensor(1., dtype=dtype, device=device)
torch.lerp(start, end, weight, out=actual_out)
self.assertEqual(actual, actual_out)
expected = start + weight * (end - start)
self.assertEqual(expected, actual)
def _test_logaddexp(self, device, dtype, base2):
if base2:
ref_func = np.logaddexp2
our_func = torch.logaddexp2
else:
ref_func = np.logaddexp
our_func = torch.logaddexp
def _test_helper(a, b):
ref = ref_func(a.cpu().numpy(), b.cpu().numpy())
v = our_func(a, b)
self.assertEqual(ref, v)
# simple test
a = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
b = torch.randn(64, 2, dtype=dtype, device=device) - 0.5
_test_helper(a, b)
_test_helper(a[:3], b[:3])
# large value test for numerical stability
a *= 10000
b *= 10000
_test_helper(a, b)
_test_helper(a[:3], b[:3])
a = torch.tensor([float('inf'), float('-inf'), float('inf'), float("nan")], dtype=dtype, device=device)
b = torch.tensor([float('inf'), float('-inf'), float('-inf'), float("nan")], dtype=dtype, device=device)
_test_helper(a, b)
@dtypes(torch.float32, torch.float64)
def test_logaddexp(self, device, dtype):
self._test_logaddexp(device, dtype, base2=False)
@dtypes(torch.float32, torch.float64)
def test_logaddexp2(self, device, dtype):
self._test_logaddexp(device, dtype, base2=True)
def test_add(self, device):
dtypes = [torch.float, torch.double] + torch.testing.get_all_complex_dtypes()
for dtype in dtypes:
# [res] torch.add([res,] tensor1, tensor2)
m1 = torch.randn(100, 100, dtype=dtype, device=device)
v1 = torch.randn(100, dtype=dtype, device=device)
# contiguous
res1 = torch.add(m1[4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(1)):
res2[i] = m1[4, i] + v1[i]
self.assertEqual(res1, res2)
m1 = torch.randn(100, 100, device=device)
v1 = torch.randn(100, device=device)
# non-contiguous
res1 = torch.add(m1[:, 4], v1)
res2 = res1.clone().zero_()
for i in range(m1.size(0)):
res2[i] = m1[i, 4] + v1[i]
self.assertEqual(res1, res2)
# [res] torch.add([res,] tensor, value)
m1 = torch.randn(10, 10, device=device)
# contiguous
res1 = m1.clone()
res1[3].add_(2)
res2 = m1.clone()
for i in range(m1.size(1)):
res2[3, i] = res2[3, i] + 2
self.assertEqual(res1, res2)
# non-contiguous
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].add_(2)
res2 = m1.clone()
for i in range(m1.size(0)):
res2[i, 3] = res2[i, 3] + 2
self.assertEqual(res1, res2)
# inter-type
m1 = torch.randn(10, 10, dtype=dtype, device=device)
self.assertEqual(m1 + 3, m1 + torch.tensor(3))
self.assertEqual(3 + m1, torch.tensor(3) + m1)
# contiguous + non-contiguous
m1 = torch.randn(10, 10, dtype=dtype, device=device)
m2 = torch.randn(10, 10, dtype=dtype, device=device).t()
res = m1 + m2
self.assertTrue(res.is_contiguous())
self.assertEqual(res, m1 + m2.contiguous())
# 1d + empty
m1 = torch.tensor([1.0], dtype=dtype, device=device)
m2 = torch.tensor([], dtype=dtype, device=device)
self.assertEqual(m1 + m2, [])
# inter-type unint8
one = torch.tensor(1, dtype=torch.uint8, device=device)
self.assertEqual(torch.add(one, 1), 2)
self.assertEqual(torch.add(one, 1).dtype, torch.uint8)
# bool
m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)
m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)
expected = torch.tensor([True, True, False, True, False, True], dtype=torch.bool, device=device)
self.assertEqual(m1 + m2, expected)
# fused multiply add
a = torch.zeros(2, 3, dtype=torch.bool, device=device)
res = torch.add(a, a, alpha=0)
expected = torch.zeros(2, 3, device=device).bool()
self.assertEqual(res, expected)
# bfloat16
m1 = torch.tensor([1., 2.], dtype=torch.bfloat16)
m2 = torch.tensor([3., 4.], dtype=torch.bfloat16)
self.assertEqual(m1 + m2, torch.tensor([4., 6.], dtype=torch.bfloat16))
# different alpha types
m1 = torch.tensor([2 + 3j, 4 + 5j], dtype=torch.complex64, device=device)
m2 = torch.tensor([4 + 5j, 2 + 3j], dtype=torch.complex64, device=device)
# add complex numbers with float alpha
res = torch.add(m1, m2, alpha=0.1)
expected = torch.tensor([2.4000 + 3.5000j, 4.2000 + 5.3000j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
# add complex numbers with complex alpha
res = torch.add(m1, m2, alpha=complex(0.1, 0.2))
expected = torch.tensor([1.4000 + 4.3000j, 3.6000 + 5.7000j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
# add complex numbers with integer alpha
res = torch.add(m1, m2, alpha=2)
expected = torch.tensor([10. + 13.j, 8. + 11.j], dtype=torch.complex64, device=device)
self.assertEqual(res, expected)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.add(m1, m2, alpha=True))
self.assertRaisesRegex(RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.add(m1, m2, alpha=1.0))
# mismatched alpha, float / double tensor and complex alpha
msg = r"For non-complex input tensors, argument alpha must not be a complex number\."
m1 = torch.tensor([3., 4.], device=device)
m2 = torch.tensor([4., 3.], device=device)
self.assertRaisesRegex(RuntimeError, msg,
lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))
m1 = torch.tensor([3., 4.], dtype=torch.double, device=device)
m2 = torch.tensor([4., 3.], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, msg,
lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))
# complex
m1 = torch.tensor((4.0000 + 4.0000j), dtype=torch.complex64)
m2 = torch.tensor(4., dtype=torch.float64)
self.assertRaisesRegex(RuntimeError, r"result type ComplexFloat can't be cast to the desired output type Double",
lambda: torch.add(m1, m1, out=m2))
@onlyCUDA
def test_addsub_half_tensor(self, device):
x = torch.tensor([60000.0], dtype=torch.half, device=device)
for op, y, alpha in (
(torch.add, torch.tensor([-60000.0], dtype=torch.half, device=device), 2),
(torch.sub, torch.tensor([60000.0], dtype=torch.half, device=device), 2),
(torch.add, -70000.0, 1),
(torch.sub, 70000.0, 1),
):
actual = op(x, y, alpha=alpha)
self.assertTrue(not (actual.isnan() or actual.isinf()))
def test_sub_typing(self, device):
m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)
m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with two bool tensors is not supported. "
r"Use the `\^` or `logical_xor\(\)` operator instead.",
lambda: m1 - m2)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: 1 - m1)
self.assertRaisesRegex(RuntimeError,
r"Subtraction, the `\-` operator, with a bool tensor is not supported. "
r"If you are trying to invert a mask, use the `\~` or `logical_not\(\)` operator instead.",
lambda: m2 - 1)
# mismatched alpha
m1 = torch.tensor([1], dtype=torch.int8, device=device)
m2 = torch.tensor([2], dtype=torch.int8, device=device)
self.assertRaisesRegex(RuntimeError,
r"Boolean alpha only supported for Boolean results\.",
lambda: torch.sub(m1, m2, alpha=True))
self.assertRaisesRegex(RuntimeError,
r"For integral input tensors, argument alpha must not be a floating point number\.",
lambda: torch.sub(m1, m2, alpha=1.0))
def test_mul(self, device):
m1 = torch.randn(10, 10, device=device)
res1 = m1.clone()
res1[:, 3].mul_(2)
res2 = m1.clone()
for i in range(res1.size(0)):
res2[i, 3] = res2[i, 3] * 2
self.assertEqual(res1, res2)
a1 = torch.tensor([True, False, False, True], dtype=torch.bool, device=device)
a2 = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)
self.assertEqual(a1 * a2, torch.tensor([True, False, False, False], dtype=torch.bool, device=device))
if device == 'cpu':
a1 = torch.tensor([0.1, 0.1], dtype=torch.bfloat16, device=device)
a2 = torch.tensor([1.1, 0.1], dtype=torch.bfloat16, device=device)
self.assertEqual(a1 * a2, torch.tensor([0.11, 0.01], dtype=torch.bfloat16, device=device), atol=0.01, rtol=0)
self.assertEqual(a1.mul(a2), a1 * a2)
def test_bool_tensor_comparison_ops(self, device):
a = torch.tensor([True, False, True, False, True, False], dtype=torch.bool, device=device)
b = torch.tensor([True, False, True, True, True, True], dtype=torch.bool, device=device)
self.assertEqual(a == b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a != b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertEqual(a < b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertEqual(a > b, torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.bool, device=device))
self.assertEqual(a >= b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a <= b, torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.bool, device=device))
self.assertEqual(a > False, torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a == torch.tensor(True, dtype=torch.bool, device=device),
torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))
self.assertEqual(a == torch.tensor(0, dtype=torch.bool, device=device),
torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool, device=device))
self.assertFalse(a.equal(b))
@dtypes(*torch.testing.get_all_dtypes(include_complex=False))
def test_logical(self, device, dtype):
if dtype != torch.bool:
x = torch.tensor([1, 2, 3, 4], device=device, dtype=dtype)
b = torch.tensor([2], device=device, dtype=dtype)
self.assertEqual(x.lt(2), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(2), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(2), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(2), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(2), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(2), torch.tensor([True, False, True, True]))
self.assertEqual(x.lt(b), torch.tensor([True, False, False, False]))
self.assertEqual(x.le(b), torch.tensor([True, True, False, False]))
self.assertEqual(x.ge(b), torch.tensor([False, True, True, True]))
self.assertEqual(x.gt(b), torch.tensor([False, False, True, True]))
self.assertEqual(x.eq(b), torch.tensor([False, True, False, False]))
self.assertEqual(x.ne(b), torch.tensor([True, False, True, True]))
else:
x = torch.tensor([True, False, True, False], device=device)
self.assertEqual(x.lt(True), torch.tensor([False, True, False, True]))
self.assertEqual(x.le(True), torch.tensor([True, True, True, True]))
self.assertEqual(x.ge(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.gt(True), torch.tensor([False, False, False, False]))
self.assertEqual(x.eq(True), torch.tensor([True, False, True, False]))
self.assertEqual(x.ne(True), torch.tensor([False, True, False, True]))
def test_atan2(self, device):
def _test_atan2_with_size(size, device):
a = torch.rand(size=size, device=device, dtype=torch.double)
b = torch.rand(size=size, device=device, dtype=torch.double)
actual = a.atan2(b)
x = a.view(-1)
y = b.view(-1)
expected = torch.tensor([math.atan2(x[i].item(), y[i].item()) for i in range(x.numel())],
device=device, dtype=torch.double)
self.assertEqual(expected, actual.view(-1), rtol=0, atol=0.02)
_test_atan2_with_size((2, 2), device)
_test_atan2_with_size((3, 3), device)
_test_atan2_with_size((5, 5), device)
def test_atan2_edgecases(self, device):
def _test_atan2(x, y, expected, device, dtype):
expected_tensor = torch.tensor([expected], dtype=dtype, device=device)
x_tensor = torch.tensor([x], dtype=dtype, device=device)
y_tensor = torch.tensor([y], dtype=dtype, device=device)
actual = torch.atan2(y_tensor, x_tensor)
self.assertEqual(expected_tensor, actual, rtol=0, atol=0.02)
for dtype in [torch.float, torch.double]:
_test_atan2(0, 0, 0, device, dtype)
_test_atan2(0, 1, math.pi / 2, device, dtype)
_test_atan2(0, -1, math.pi / -2, device, dtype)
_test_atan2(-1, 0, math.pi, device, dtype)
_test_atan2(1, 0, 0, device, dtype)
_test_atan2(-1, -1, math.pi * -3 / 4 , device, dtype)
_test_atan2(1, 1, math.pi / 4 , device, dtype)
_test_atan2(1, -1, math.pi / -4 , device, dtype)
_test_atan2(-1, 1, math.pi * 3 / 4 , device, dtype)
def test_trapz(self, device):
def test_dx(sizes, dim, dx, device):
t = torch.randn(sizes, device=device)
actual = torch.trapz(t, dx=dx, dim=dim)
expected = np.trapz(t.cpu().numpy(), dx=dx, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual, exact_dtype=False)
def test_x(sizes, dim, x, device):
t = torch.randn(sizes, device=device)
actual = torch.trapz(t, x=torch.tensor(x, device=device), dim=dim)
expected = np.trapz(t.cpu().numpy(), x=x, axis=dim)
self.assertEqual(expected.shape, actual.shape)
self.assertEqual(expected, actual.cpu(), exact_dtype=False)
test_dx((2, 3, 4), 1, 1, device)
test_dx((10, 2), 0, 0.1, device)
test_dx((1, 10), 0, 2.3, device)
test_dx((0, 2), 0, 1.0, device)
test_dx((0, 2), 1, 1.0, device)
test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)
test_x((10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device)
test_x((1, 10), 0, [1.0], device)
test_x((0, 2), 0, [], device)
test_x((0, 2), 1, [1.0, 2.0], device)
with self.assertRaisesRegex(
IndexError,
'Dimension out of range'):
test_x((2, 3), 2, [], device)
test_dx((2, 3), 2, 1.0, device)
with self.assertRaisesRegex(
RuntimeError,
'There must be one `x` value for each sample point'):
test_x((2, 3), 1, [1.0, 2.0], device)
test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)
@dtypes(torch.double)
def test_pow_scalar_overloads_mem_overlap(self, device, dtype):
sz = 3
doubles = torch.randn(2 * sz, dtype=dtype, device=device)
self.check_internal_mem_overlap(
lambda t: t.pow_(42), 1, dtype, device)
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(input, 42, out=out))
self.unary_check_input_output_mem_overlap(
doubles, sz, lambda input, out: torch.pow(42, input, out=out))
@dtypes(*list(product(torch.testing.get_all_dtypes(include_bool=False),
torch.testing.get_all_dtypes(include_bool=False))))
def test_float_power(self, device, dtypes):
def to_np(value):
if isinstance(value, torch.Tensor) and value.dtype == torch.bfloat16:
return value.to(torch.float).cpu().numpy()
return value.cpu().numpy() if isinstance(value, torch.Tensor) else value
base_dtype = dtypes[0]
exp_dtype = dtypes[1]
out_dtype = torch.complex128 if base_dtype.is_complex or exp_dtype.is_complex else torch.float64
base = make_tensor((30,), device, base_dtype, low=1, high=100)
# Complex and real results do not agree between PyTorch and NumPy when computing negative and zero power of 0
# Related: https://github.com/pytorch/pytorch/issues/48000
# base[0] = base[3] = base[7] = 0
exp = make_tensor((30,), device, exp_dtype, low=-2, high=2)
exp[0] = exp[4] = exp[6] = 0
expected = torch.from_numpy(np.float_power(to_np(base), to_np(exp)))
exponents = [-2.8, -2, -1, -0.5, 0.5, 1, 2]
complex_exponents = exponents + [-2.5j, -1.0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]
for op in (torch.float_power, torch.Tensor.float_power, torch.Tensor.float_power_):
# Case of Tensor x Tensor
if op is torch.Tensor.float_power_ and base_dtype != out_dtype:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
op(base.clone(), exp)
else:
result = op(base.clone(), exp)
self.assertEqual(expected, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype)
op(base, exp, out=out)
self.assertEqual(expected, out)
# Case of Tensor x Scalar
for i in complex_exponents if exp_dtype.is_complex else exponents:
out_dtype_scalar_exp = torch.complex128 if base_dtype.is_complex or type(i) == complex else torch.float64
expected_scalar_exp = torch.from_numpy(np.float_power(to_np(base), i))
if op is torch.Tensor.float_power_ and base_dtype != out_dtype_scalar_exp:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
op(base.clone(), i)
else:
result = op(base.clone(), i)
self.assertEqual(expected_scalar_exp, result)
if op is torch.float_power:
out = torch.empty_like(base).to(device=device, dtype=out_dtype_scalar_exp)
op(base, i, out=out)
self.assertEqual(expected_scalar_exp, out)
# Case of Scalar x Tensor
for i in complex_exponents if base_dtype.is_complex else exponents:
out_dtype_scalar_base = torch.complex128 if exp_dtype.is_complex or type(i) == complex else torch.float64
expected_scalar_base = torch.from_numpy(np.float_power(i, to_np(exp)))
result = torch.float_power(i, exp)
self.assertEqual(expected_scalar_base, result)
out = torch.empty_like(exp).to(device=device, dtype=out_dtype_scalar_base)
torch.float_power(i, exp, out=out)
self.assertEqual(expected_scalar_base, out)
def test_float_power_exceptions(self, device):
def _promo_helper(x, y):
for i in (x, y):
if type(i) == complex:
return torch.complex128
elif type(i) == torch.Tensor and i.is_complex():
return torch.complex128
return torch.double
test_cases = ((torch.tensor([-2, -1, 0, 1, 2], device=device), -.25),
(torch.tensor([-1.0j, 0j, 1.0j, 1.0 + 1.0j, -1.0 - 1.5j], device=device), 2.))
for base, exp in test_cases:
for out_dtype in (torch.long, torch.float, torch.double, torch.cdouble):
out = torch.empty(1, device=device, dtype=out_dtype)
required_dtype = _promo_helper(base, exp)
if out.dtype == required_dtype:
torch.float_power(base, exp, out=out)
else:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
torch.float_power(base, exp, out=out)
if base.dtype == required_dtype:
torch.Tensor.float_power_(base.clone(), exp)
else:
with self.assertRaisesRegex(RuntimeError, "operation's result requires dtype"):
torch.Tensor.float_power_(base.clone(), exp)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False),
torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False)))
def test_xlogy_xlog1py(self, device, dtypes):
x_dtype, y_dtype = dtypes
def out_variant_helper(torch_fn, x, y):
expected = torch_fn(x, y)
out = torch.empty_like(expected)
torch_fn(x, y, out=out)
self.assertEqual(expected, out)
def xlogy_inplace_variant_helper(x, y):
if x.dtype in torch.testing.get_all_int_dtypes() + [torch.bool]:
with self.assertRaisesRegex(RuntimeError,
"can't be cast to the desired output type"):
x.clone().xlogy_(y)
else:
expected = torch.empty_like(x)
torch.xlogy(x, y, out=expected)
inplace_out = x.clone().xlogy_(y)
self.assertEqual(expected, inplace_out)
def test_helper(torch_fn, reference_fn, inputs, scalar=None):
x, y, z = inputs
torch_fn_partial = partial(torch_fn, x)
reference_fn_partial = partial(reference_fn, x.cpu().numpy())
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, x, exact_dtype=False)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, y, exact_dtype=False)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, z, exact_dtype=False)
val = scalar if scalar is not None else x
out_variant_helper(torch_fn, val, x)
out_variant_helper(torch_fn, val, y)
out_variant_helper(torch_fn, val, z)
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)
z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)
x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.5, high=1000)
y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.5, high=1000)
z_1p = make_tensor((4, 5), device, y_dtype, low=-0.5, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
test_helper(*xlogy_fns, (x, y, z))
xlogy_inplace_variant_helper(x, x)
xlogy_inplace_variant_helper(x, y)
xlogy_inplace_variant_helper(x, z)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p))
# Scalar-Tensor Test
test_helper(*xlogy_fns, (x, y, z), 3.14)
test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p), 3.14)
# Special Values Tensor-Tensor
t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)
zeros = torch.zeros(7, dtype=y_dtype, device=device)
def test_zeros_special_helper(torch_fn, reference_fn, scalar=False):
zeros_t = 0 if scalar else zeros
zeros_np = 0 if scalar else zeros.cpu().numpy()
torch_fn_partial = partial(torch_fn, zeros_t)
reference_fn_partial = partial(reference_fn, zeros_np)
self.compare_with_numpy(torch_fn_partial, reference_fn_partial, t, exact_dtype=False)
out_variant_helper(torch_fn, zeros_t, t)
test_zeros_special_helper(*xlogy_fns)
xlogy_inplace_variant_helper(zeros, t)
test_zeros_special_helper(*xlog1py_fns)
# Special Values Scalar-Tensor
test_zeros_special_helper(*xlogy_fns, scalar=True)
test_zeros_special_helper(*xlog1py_fns, scalar=True)
def test_xlogy_xlog1py_scalar_type_promotion(self, device):
# Test that python numbers don't participate in type promotion at the same
# priority level as 0-dim tensors
t = torch.randn((), dtype=torch.float32, device=device)
self.assertEqual(t.dtype, torch.xlogy(t, 5).dtype)
self.assertEqual(t.dtype, torch.xlogy(t, 5.).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(t, 5.).dtype)
self.assertEqual(t.dtype, torch.xlogy(5, t).dtype)
self.assertEqual(t.dtype, torch.xlogy(5., t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5, t).dtype)
self.assertEqual(t.dtype, torch.special.xlog1py(5., t).dtype)
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_xlogy_xlog1py_bfloat16(self, device):
def _compare_helper(x, y, torch_fn, reference_fn):
x_np = x if isinstance(x, float) else x.cpu().to(torch.float).numpy()
y_np = y if isinstance(y, float) else y.cpu().to(torch.float).numpy()
expected = torch.from_numpy(reference_fn(x_np, y_np))
actual = torch_fn(x, y)
self.assertEqual(expected, actual, exact_dtype=False)
x_dtype, y_dtype = torch.bfloat16, torch.bfloat16
# Tensor-Tensor Test (tensor of same and different shape)
x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)
y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)
z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)
x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.8, high=1000)
y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.8, high=1000)
z_1p = make_tensor((4, 5), device, y_dtype, low=-0.8, high=1000)
xlogy_fns = torch.xlogy, scipy.special.xlogy
xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py
_compare_helper(x, x, *xlogy_fns)
_compare_helper(x, y, *xlogy_fns)
_compare_helper(x, z, *xlogy_fns)
_compare_helper(x, 3.14, *xlogy_fns)
_compare_helper(y, 3.14, *xlogy_fns)
_compare_helper(z, 3.14, *xlogy_fns)
_compare_helper(x_1p, x_1p, *xlog1py_fns)
_compare_helper(x_1p, y_1p, *xlog1py_fns)
_compare_helper(x_1p, z_1p, *xlog1py_fns)
_compare_helper(x_1p, 3.14, *xlog1py_fns)
_compare_helper(y_1p, 3.14, *xlog1py_fns)
_compare_helper(z_1p, 3.14, *xlog1py_fns)
# Special Values Tensor-Tensor
t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)
zeros = torch.tensor(7, dtype=y_dtype, device=device)
_compare_helper(t, zeros, *xlogy_fns)
_compare_helper(t, 0., *xlogy_fns)
_compare_helper(t, zeros, *xlog1py_fns)
_compare_helper(t, 0., *xlog1py_fns)
@dtypes(*product(torch.testing.get_all_dtypes(include_complex=False,
include_half=False, include_bfloat16=False),
torch.testing.get_all_dtypes(include_complex=False,
include_half=False, include_bfloat16=False)))
@skipIf(not TEST_SCIPY, "Scipy required for the test.")
def test_zeta(self, device, dtypes):
x_dtype, q_dtype = dtypes
def test_helper(x, q):
x_np = x if isinstance(x, float) else x.cpu().numpy()
q_np = q if isinstance(q, float) else q.cpu().numpy()
expected = torch.from_numpy(scipy.special.zeta(x_np, q_np))
actual = torch.special.zeta(x, q)
rtol, atol = None, None
if self.device_type == 'cpu':
rtol, atol = 1e-6, 1e-6
self.assertEqual(expected, actual, rtol=rtol, atol=atol, exact_dtype=False)
# x tensor - q tensor same size
x = make_tensor((2, 3, 4), device, x_dtype)
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast lhs
x = make_tensor((2, 1, 4), device, x_dtype)
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast rhs
x = make_tensor((2, 3, 4), device, x_dtype)
q = make_tensor((2, 1, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q tensor broadcast all
x = make_tensor((2, 3, 1), device, x_dtype)
q = make_tensor((2, 1, 4), device, q_dtype)
test_helper(x, q)
# x scalar - q tensor
for x in np.linspace(-5, 5, num=10).tolist():
if not q_dtype.is_floating_point:
q_dtype = torch.get_default_dtype()
q = make_tensor((2, 3, 4), device, q_dtype)
test_helper(x, q)
# x tensor - q scalar
for q in np.linspace(-5, 5, num=10).tolist():
if not x_dtype.is_floating_point:
x_dtype = torch.get_default_dtype()
x = make_tensor((2, 3, 4), device, x_dtype)
test_helper(x, q)
tensor_binary_ops = [
'__lt__', '__le__',
'__gt__', '__ge__',
'__eq__', '__ne__',
'__add__', '__radd__', '__iadd__',
'__sub__', '__rsub__', '__isub__',
'__mul__', '__rmul__', '__imul__',
'__matmul__', '__rmatmul__',
'__truediv__', '__rtruediv__', '__itruediv__',
'__floordiv__', '__rfloordiv__', '__ifloordiv__',
'__mod__', '__rmod__', '__imod__',
'__pow__', '__rpow__', '__ipow__',
'__lshift__', '__rlshift__', '__ilshift__',
'__rshift__', '__rrshift__', '__irshift__',
'__and__', '__iand__',
'__xor__', '__ixor__',
'__or__', '__ior__',
# Unsupported operators
# '__imatmul__',
# '__divmod__', '__rdivmod__', '__idivmod__',
# '__rand__', '__ror__', '__rxor__',
]
# Test that binary math operations return NotImplemented for unknown types.
def generate_not_implemented_tests(cls):
class UnknownType:
pass
# TODO: refactor to inline these
_types = [
torch.half, torch.float, torch.double,
torch.int8, torch.short, torch.int, torch.long,
torch.uint8
]
# TODO: refactor to use make_tensor
def _small_2d(dtype, device, has_zeros=True, fill_ones=False, oneish=False):
t = _make_tensor((5, 5), dtype, device, fill_ones=fill_ones)
if oneish:
return t.clamp(min=_number(.99, 1, dtype), max=1.01)
if not has_zeros:
return t.clamp(min=(_number(_div_min, 1, dtype)))
return t
def create_test_func(op):
@dtypes(*_types)
def test(self, device, dtype):
# Generate the inputs
tensor = _small_2d(dtype, device)
# Runs the tensor op on the device
result = getattr(tensor, op)(UnknownType())
self.assertEqual(result, NotImplemented)
return test
for op in tensor_binary_ops:
test_name = "test_{}_not_implemented".format(op)
assert not hasattr(cls, test_name), "{0} already in {1}".format(
test_name, cls.__name__)
setattr(cls, test_name, create_test_func(op))
generate_not_implemented_tests(TestBinaryUfuncs)
instantiate_device_type_tests(TestBinaryUfuncs, globals())
if __name__ == '__main__':
run_tests()
|
from sympy import Rational, Symbol, latex, UnevaluatedExpr
import sympy as sp
import numpy as np
u = lambda x : UnevaluatedExpr(x)
# Helper functions
def explain_add(a, b):
assert(np.shape(a) == np.shape(b))
rows, columns = np.shape(a)
return sp.Matrix([[Symbol(f"({latex(u(a[i,j]))} + {latex(u(b[i,j]))})") for j in range(columns)] for i in range(rows)])
def symbolic_matrix(character, rows, columns):
# row or column vector
if rows == 1:
return sp.Matrix([[Symbol(f"{{{character}}}_{{{j+1}}}") for j in range(columns)] for i in range(rows)])
if columns == 1:
return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}}}") for j in range(columns)] for i in range(rows)])
return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}, {j+1}}}") for j in range(columns)] for i in range(rows)])
def explain_multiply(a, b):
# #rows in b == #columns in a
assert(np.shape(a)[1] == np.shape(b)[0])
rows = np.shape(a)[0]
columns = np.shape(b)[1]
result = np.empty(shape=(rows, columns), dtype=object)
for i in range(rows):
row = a[i,:]
for j in range(columns):
column = b[:,j]
zipped = zip(row, column)
mapped = list(map(lambda t: f"{latex(u(t[0]))} \cdot {latex(u(t[1]))}", zipped))
s = Symbol("")
result[i, j] = Symbol(" + ".join(mapped), evaluate=False)
return sp.Matrix(result)
|
import logging
from numpy import degrees, pi, radians
from beyond.frames import get_frame, create_station
from beyond.errors import UnknownFrameError
from .wspace import ws
from .utils import dms2deg, deg2dms
log = logging.getLogger(__name__)
class StationDb:
def __new__(cls):
if not hasattr(cls, "_instance"):
# Singleton
cls._instance = super().__new__(cls)
return cls._instance
@classmethod
def list(cls):
self = cls()
if not hasattr(self, "_stations"):
self._stations = {}
for abbr, charact in ws.config["stations"].items():
charact["parent_frame"] = get_frame(charact["parent_frame"])
full_name = charact.pop("name")
mask = charact.get("mask")
if mask:
# reverse direction of the mask to put it in counterclockwise
# to comply with the mathematical definition
charact["mask"] = (
(2 * pi - radians(mask["azims"][::-1])),
radians(mask["elevs"][::-1]),
)
# Deletion of all unknown characteristics from the charact dict
# and conversion to object attributes (they may be used by addons)
extra_charact = {}
for key in list(charact.keys()):
if key not in ("parent_frame", "latlonalt", "mask"):
extra_charact[key] = charact.pop(key)
self._stations[abbr] = create_station(abbr, **charact)
self._stations[abbr].abbr = abbr
self._stations[abbr].full_name = full_name
for key, value in extra_charact.items():
setattr(self._stations[abbr], key, value)
return self._stations
@classmethod
def get(cls, name):
self = cls()
try:
return get_frame(name)
except UnknownFrameError:
if name not in self.list().keys():
raise
return self.list()[name]
@classmethod
def save(cls, station):
self = cls()
ws.config["stations"].update(station)
ws.config.save()
if hasattr(self, "_stations"):
del self._stations
def wshook(cmd, *args, **kwargs):
if cmd in ("init", "full-init"):
name = "TLS"
ws.config.setdefault("stations", {})
try:
StationDb.get(name)
except UnknownFrameError:
StationDb.save(
{
name: {
"latlonalt": [43.604482, 1.443962, 172.0],
"name": "Toulouse",
"parent_frame": "WGS84",
}
}
)
log.info("Station {} created".format(name))
else:
log.warning("Station {} already exists".format(name))
def space_station(*argv):
"""Stations management
Usage:
space-station list [--map] [<abbr>]
space-station create <abbr> <name> <lat> <lon> <alt>
Options
list List available stations
create Interactively create a station
<abbr> Abbreviation
<name> Name of the station
<lat> Latitude in degrees
<lon> Longitude in degrees
<alt> Altitude in meters
-m, --map Display the station on a map
Latitude and longitude both accept degrees as float or as
degrees, minutes and seconds of arc (e.g. 43°25"12')
"""
from pathlib import Path
import matplotlib.pyplot as plt
from .utils import docopt
from .map.background import set_background
args = docopt(space_station.__doc__)
station = StationDb()
if args["create"]:
abbr = args["<abbr>"]
name = args["<name>"]
latitude = args["<lat>"]
longitude = args["<lon>"]
altitude = args["<alt>"]
if "°" in latitude:
latitude = dms2deg(latitude)
else:
latitude = float(latitude)
if "°" in longitude:
longitude = dms2deg(longitude)
else:
longitude = float(longitude)
altitude = float(altitude)
log.info("Creation of station '{}' ({})".format(name, abbr))
log.debug(
"{} {}, altitude : {} m".format(
deg2dms(latitude, "lat"), deg2dms(longitude, "lon"), altitude
)
)
StationDb.save(
{
abbr: {
"name": name,
"latlonalt": (latitude, longitude, altitude),
"parent_frame": "WGS84",
}
}
)
else:
stations = []
for station in sorted(station.list().values(), key=lambda x: x.abbr):
if args["<abbr>"] and station.abbr != args["<abbr>"]:
continue
print(station.name)
print("-" * len(station.name))
lat, lon, alt = station.latlonalt
lat, lon = degrees([lat, lon])
print("name: {}".format(station.full_name))
print(
"altitude: {} m\nposition: {}, {}".format(
alt, deg2dms(lat, "lat"), deg2dms(lon, "lon")
)
)
print()
stations.append((station.name, lat, lon))
if args["--map"]:
plt.figure(figsize=(15.2, 8.2))
set_background()
plt.subplots_adjust(left=0.02, right=0.98, top=0.98, bottom=0.02)
plt.show()
|
# -*- coding: utf-8 -*-
import requests
from urllib.parse import urljoin
from os import getenv
import types
class Fieldbook(object):
"""
Client for Fieldbook API: https://github.com/fieldbook/api-docs
Initialize with a fieldbook_id and optionally the api key (name) and secret.
"""
BASE_URL = "https://api.fieldbook.com"
API_VERSION = "v1"
def __init__(self, book_id, key=None, secret=None):
super(Fieldbook, self).__init__()
self._key = key if key else getenv('FIELDBOOK_API_KEY', None)
self._secret = secret if secret else getenv('FIELDBOOK_API_SECRET', None)
self.book_id = book_id
self.session = requests.Session()
if self._key and self._secret:
self.set_auth(self._key, self._secret)
def set_auth(self, key, secret):
self._key = key
self._secret = secret
self.session.auth = (self._key, self._secret)
def _make_sheet_endpoints(self, endpoint_names):
def make_endpoint(name):
def sheet_endpoint(self, **kwargs):
return self._get(name, **kwargs)
return sheet_endpoint
for name in endpoint_names:
endpoint = make_endpoint(name)
endpoint.__doc__ = "Query '{}' sheet.".format(name)
setattr(self, name, types.MethodType(endpoint, self))
def _make_url(self, sheet_name=None):
return urljoin(Fieldbook.BASE_URL, "/".join((Fieldbook.API_VERSION, self.book_id, sheet_name or '')))
def _get(self, sheet_name=None, **kwargs):
if not self.session.auth and self._key and self._secret:
self.set_auth(self._key, self._secret)
url = self._make_url(sheet_name=sheet_name)
if 'row_id' in kwargs:
row_id = str(kwargs.pop('row_id'))
url = '{}/{}'.format(url, row_id)
resp = self.session.get(url, params=kwargs)
if not resp.ok:
raise resp.raise_for_status()
return resp.json()
def sheets(self, make_endpoints=False):
"""Returns a list of sheets associated with a book"""
sheets = self._get()
if make_endpoints:
self._make_sheet_endpoints(sheets)
return sheets
def list(self, sheet_name, **kwargs):
"""Query a named sheet"""
return self._get(sheet_name=sheet_name, **kwargs)
def get(self, sheet_name, row_id, **kwargs):
"""Retrieve a row from a sheet by its id"""
kwargs['row_id'] = row_id
return self._get(sheet_name=sheet_name, **kwargs)
|
"""
BROS
Copyright 2022-present NAVER Corp.
Apache License v2.0
Do 2nd preprocess on top of the result of the 'preprocess.sh' file.
Reference: https://github.com/microsoft/unilm/blob/master/layoutlm/deprecated/examples/seq_labeling/run_seq_labeling.py
"""
import json
import os
from collections import Counter
from tqdm import tqdm
from transformers import BertTokenizer
MAX_SEQ_LENGTH = 512
MODEL_TYPE = "bert"
VOCA = "bert-base-uncased"
INPUT_PATH = "./data"
OUTPUT_PATH = "../../datasets/funsd"
os.makedirs(OUTPUT_PATH, exist_ok=True)
os.makedirs(os.path.join(OUTPUT_PATH, "preprocessed"), exist_ok=True)
def main():
for dataset_split in ["train", "val"]:
print(f"dataset_split: {dataset_split}")
do_2nd_preprocess(dataset_split)
os.system(f"cp -r {os.path.join(INPUT_PATH, 'training_data')} {OUTPUT_PATH}")
os.system(f"cp -r {os.path.join(INPUT_PATH, 'testing_data')} {OUTPUT_PATH}")
os.system(f"cp {os.path.join(INPUT_PATH, 'labels.txt')} {OUTPUT_PATH}")
def do_2nd_preprocess(dataset_split):
label_fpath = os.path.join(INPUT_PATH, "labels.txt")
labels = get_labels(label_fpath)
tokenizer = BertTokenizer.from_pretrained(VOCA, do_lower_case=True)
cls_token_id = tokenizer.convert_tokens_to_ids("[CLS]")
sep_token_id = tokenizer.convert_tokens_to_ids("[SEP]")
pad_token_id = tokenizer.convert_tokens_to_ids("[PAD]")
ignore_index = -100
if dataset_split == "train":
mode = "train"
elif dataset_split == "val":
mode = "test"
else:
raise ValueError(f"Invalid dataset_split={dataset_split}")
examples = read_examples_from_file(INPUT_PATH, mode)
features = convert_examples_to_features(
examples,
labels,
MAX_SEQ_LENGTH,
tokenizer,
cls_token_at_end=bool(MODEL_TYPE in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if MODEL_TYPE in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(MODEL_TYPE in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(MODEL_TYPE in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if MODEL_TYPE in ["xlnet"] else 0,
pad_token_label_id=ignore_index,
)
# Save image ocr files
image_cnter = Counter()
preprocessed_fnames = []
for example, feature in tqdm(zip(examples, features), total=len(examples)):
# Example: guid, words, labels, boxes, actual_bboxes, file_name, page_size
# Feature: input_ids, input_mask, segment_ids, label_ids,
# boxes, actual_bboxes, file_name, page_size
this_file_name = "{}_{}.json".format(
example.file_name[: example.file_name.rfind(".")],
image_cnter[example.file_name],
)
image_cnter[example.file_name] += 1
data_obj = {}
# meta
data_obj["meta"] = {}
# data_obj["meta"]["image_size"]
# = example.page_size[::-1] + [3] # [height, width, rgb?]
height, width = example.page_size[::-1]
data_obj["meta"]["imageSize"] = {"width": width, "height": height}
data_obj["meta"]["voca"] = VOCA
if mode == "train":
data_obj["meta"]["image_path"] = os.path.join(
"training_data", "images", example.file_name
)
elif mode == "test":
data_obj["meta"]["image_path"] = os.path.join(
"testing_data", "images", example.file_name
)
else:
raise ValueError(f"Unknown mode={mode}")
# words
# text, tokens, boundingBox
data_obj["words"] = []
this_input_ids = []
for word, bb in zip(example.words, example.actual_bboxes):
word_tokens = []
for splitted_word in word.split():
word_tokens.append(
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(splitted_word))
)
tokens = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(word))
word_obj = {
"text": word,
"tokens": tokens,
"boundingBox": [
[bb[0], bb[1]],
[bb[2], bb[1]],
[bb[2], bb[3]],
[bb[0], bb[3]],
],
}
data_obj["words"].append(word_obj)
this_input_ids.extend(tokens)
if VOCA == "bert-base-uncased":
feature_input_ids = feature.input_ids
assert feature_input_ids[0] == cls_token_id
feature_input_ids = feature_input_ids[
1 : feature_input_ids.index(sep_token_id)
]
assert feature_input_ids == this_input_ids
else:
raise NotImplementedError
# masks, labels
data_obj["parse"] = {}
if VOCA == "bert-base-uncased":
data_obj["parse"]["seq_len"] = sum(feature.input_mask)
data_obj["parse"]["input_ids"] = feature.input_ids
data_obj["parse"]["input_mask"] = feature.input_mask
data_obj["parse"]["label_ids"] = feature.label_ids
else:
raise NotImplementedError
# Save file name to list
preprocessed_fnames.append(os.path.join("preprocessed", this_file_name))
# Save to file
data_obj_file = os.path.join(OUTPUT_PATH, "preprocessed", this_file_name)
with open(data_obj_file, "w", encoding="utf-8") as fp:
json.dump(data_obj, fp, ensure_ascii=False)
# Save file name list file
preprocessed_filelist_file = os.path.join(
OUTPUT_PATH, f"preprocessed_files_{dataset_split}.txt"
)
with open(preprocessed_filelist_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(preprocessed_fnames))
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(
box_file_path, encoding="utf-8"
) as fb, open(image_file_path, encoding="utf-8") as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
boxes
)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
"""Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
# if ex_index % 10000 == 0:
# print("Writing example {} of {}".format(ex_index, len(examples)))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(
example.words, example.labels, example.boxes, example.actual_bboxes
):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend(
[label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)
)
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
# if ex_index < 5:
# print("*** Example ***")
# print("guid: {}".format(example.guid))
# print("tokens: {}".format(" ".join([str(x) for x in tokens])))
# print("input_ids: {}".format(" ".join([str(x) for x in input_ids])))
# print("input_mask: {}".format(" ".join([str(x) for x in input_mask])))
# print("segment_ids: {}".format(" ".join([str(x) for x in segment_ids])))
# print("label_ids: {}".format(" ".join([str(x) for x in label_ids])))
# print("boxes: {}".format(" ".join([str(x) for x in token_boxes])))
# print("actual_bboxes: {}".format(" ".join([str(x) for x in actual_bboxes])))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features
if __name__ == "__main__":
main()
|
import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
from vyper import ast as vy_ast
@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(
idx=st.integers(min_value=0, max_value=9),
array=st.lists(st.integers(), min_size=10, max_size=10),
)
def test_subscript(get_contract, array, idx):
source = """
@public
def foo(array: int128[10], idx: uint256) -> int128:
return array[idx]
"""
contract = get_contract(source)
vyper_ast = vy_ast.parse_to_ast(f"{array}[{idx}]")
old_node = vyper_ast.body[0].value
new_node = old_node.evaluate()
assert contract.foo(array, idx) == new_node.value
|
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2014-2018 <see AUTHORS.txt>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This package contains the plugins, delivered with the EMSM.
The instance folder contains also a ``plugins`` directory, where the user
can place plugins he'd like to run.
"""
|
#!/usr/bin/env python
import yaml, json
with open("testlist.yml", "r") as f:
y = yaml.load(f)
print "Here's the pretty YAML:"
print yaml.dump(y)
with open("testlist.json", "r") as f:
j = json.load(f)
print "Here's the pretty JSON:"
print json.dumps(j, indent=4)
|
from babel import localedata
from grow.pods import errors
from grow.pods import messages
import pickle
import os
import babel
import re
class Locales(object):
def __init__(self, pod):
self.pod = pod
def list_groups(self):
if 'locales' not in self.pod.yaml:
return []
return self.pod.yaml['locales'].keys()
def get_regions(self, group_name='default'):
if 'regions' not in self.pod.yaml:
return []
try:
return self.pod.yaml['locales'][group_name].get('regions', [])
except errors.PodConfigurationError:
return []
def get_languages(self, group_name='default'):
if 'locales' not in self.pod.yaml:
return []
try:
return self.pod.yaml['locales'][group_name].get('languages', [])
except errors.PodConfigurationError:
return []
def to_message(self):
message = messages.LocalesMessage()
message.groups = []
for group_name in self.list_groups():
group_message = messages.LocaleGroupMessage()
group_message.group_name = group_name
group_message.regions = self.get_regions(group_name)
group_message.languages = self.get_languages(group_name)
message.groups.append(group_message)
return message
class Locale(babel.Locale):
RTL_REGEX = re.compile('^(he|ar|fa|ur)(\W|$)')
_alias = None
def __init__(self, language, *args, **kwargs):
# Normalize from "de_de" to "de_DE" for case-sensitive filesystems.
parts = language.rsplit('_', 1)
if len(parts) > 1:
language = '{}_{}'.format(parts[0], parts[1].upper())
super(Locale, self).__init__(language, *args, **kwargs)
@classmethod
def parse(cls, *args, **kwargs):
locale = super(Locale, cls).parse(*args, **kwargs)
# Weak attempt to permit fuzzy locales (locales for which we still have
# language and country information, but not a full localedata file for),
# but disallow completely invalid locales. See note at end of file.
if locale and locale.get_display_name() is None:
raise ValueError('{} is not a valid locale identifier'.format(args[0]))
return locale
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, basestring):
return str(self).lower() == other.lower()
return super(Locale, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<Locale: "{}">'.format(str(self))
@classmethod
def parse_codes(cls, codes):
return [cls.parse(code) for code in codes]
@property
def is_rtl(self):
return Locale.RTL_REGEX.match(self.language)
@property
def direction(self):
return 'rtl' if self.is_rtl else 'ltr'
@classmethod
def from_alias(cls, pod, alias):
podspec = pod.get_podspec()
config = podspec.get_config()
if 'localization' in config and 'aliases' in config['localization']:
aliases = config['localization']['aliases']
for custom_locale, babel_locale in aliases.iteritems():
if custom_locale == alias:
return cls.parse(babel_locale)
return cls.parse(alias)
def set_alias(self, pod):
podspec = pod.get_podspec()
self._alias = podspec.get_locale_alias(str(self).lower())
@property
def alias(self):
return self._alias
@alias.setter
def alias(self, alias):
self._alias = alias
# NOTE: Babel does not support "fuzzy" locales. A locale is considered "fuzzy"
# when a corresponding "localedata" file that matches a given locale's full
# identifier (e.g. "en_US") does not exist. Here's one example: "en_BD". CLDR
# does not have a localedata file matching "en_BD" (English in Bangladesh), but
# it does have individual files for "en" and also "bn_BD". As it turns
# out, localedata files that correspond to a locale's full identifier (e.g.
# "bn_BD.dat") are actually pretty light on the content (largely containing
# things like start-of-week information) and most of the "meat" of the data is
# contained in the main localedata file, e.g. "en.dat".
#
# Users may need to generate pages corresponding to locales that we don't
# have full localedata for, and until Babel supports fuzzy locales, we'll
# monkeypatch two Babel functions to provide partial support for fuzzy locales.
#
# With this monkeypatch, locales will be valid even if Babel doesn't have a
# localedata file matching a locale's full identifier, but locales will still
# fail with a ValueError if the user specifies a territory that does not exist.
# With this patch, a user can, however, specify an invalid language. Obviously,
# this patch should be removed when/if Babel adds support for fuzzy locales.
# Optionally, we may want to provide users with more control over whether a
# locale is valid or invalid, but we can revisit that later.
# See: https://github.com/grow/grow/issues/93
def fuzzy_load(name, merge_inherited=True):
localedata._cache_lock.acquire()
try:
data = localedata._cache.get(name)
if not data:
# Load inherited data
if name == 'root' or not merge_inherited:
data = {}
else:
parts = name.split('_')
if len(parts) == 1:
parent = 'root'
else:
parent = '_'.join(parts[:-1])
data = fuzzy_load(parent).copy()
filename = os.path.join(localedata._dirname, '%s.dat' % name)
try:
fileobj = open(filename, 'rb')
try:
if name != 'root' and merge_inherited:
localedata.merge(data, pickle.load(fileobj))
else:
data = pickle.load(fileobj)
localedata._cache[name] = data
finally:
fileobj.close()
except IOError:
pass
return data
finally:
localedata._cache_lock.release()
localedata.exists = lambda name: True
localedata.load = fuzzy_load
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4_new2/' + job_name + '*'
total_epochs = 6
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4_new2/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
from assembler import ASM
from roomEditor import RoomEditor
import entityData
def addMultiworldShop(rom):
# Make a copy of the shop into GrandpaUlrira house
shop_room = RoomEditor(rom, 0x2A1)
re = RoomEditor(rom, 0x2A9)
re.objects = [obj for obj in shop_room.objects if obj.x is not None and obj.type_id != 0xCE] + re.getWarps()
re.entities = [(1, 6, 0x77), (2, 6, 0x77)]
re.animation_id = shop_room.animation_id
re.floor_object = shop_room.floor_object
re.store(rom)
# Fix the tileset
rom.banks[0x20][0x2EB3 + 0x2A9 - 0x100] = rom.banks[0x20][0x2EB3 + 0x2A1 - 0x100]
# Load the shopkeeper sprites instead of Grandpa sprites
entityData.SPRITE_DATA[0x77] = entityData.SPRITE_DATA[0x4D]
labels = {}
rom.patch(0x06, 0x2860, "00" * 0x215, ASM("""
shopItemsHandler:
; Render the shop items
ld h, $00
loop:
; First load links position to render the item at
ldh a, [$98] ; LinkX
ldh [$EE], a ; X
ldh a, [$99] ; LinkY
sub $0E
ldh [$EC], a ; Y
; Check if this is the item we have picked up
ld a, [$C509] ; picked up item in shop
dec a
cp h
jr z, .renderCarry
ld a, h
swap a
add a, $20
ldh [$EE], a ; X
ld a, $30
ldh [$EC], a ; Y
.renderCarry:
ld a, h
push hl
ldh [$F1], a ; variant
cp $03
jr nc, .singleSprite
ld de, ItemsDualSpriteData
call $3BC0 ; render sprite pair
jr .renderDone
.singleSprite:
ld de, ItemsSingleSpriteData
call $3C77 ; render sprite
.renderDone:
pop hl
.skipItem:
inc h
ld a, $07
cp h
jr nz, loop
; check if we want to pickup or drop an item
ldh a, [$CC]
and $30 ; A or B button
call nz, checkForPickup
; check if we have an item
ld a, [$C509] ; carry item
and a
ret z
; Set that link has picked something up
ld a, $01
ld [$C15C], a
call $0CAF ; reset spin attack...
; Check if we are trying to exit the shop and so drop our item.
ldh a, [$99]
cp $78
ret c
xor a
ld [$C509], a
ret
checkForPickup:
ldh a, [$9E] ; direction
cp $02
ret nz
ldh a, [$99] ; LinkY
cp $48
ret nc
ld a, $13
ldh [$F2], a ; play SFX
ld a, [$C509] ; picked up shop item
and a
jr nz, .drop
ldh a, [$98] ; LinkX
sub $08
swap a
and $07
ld [$C509], a ; picked up shop item
ret
.drop:
xor a
ld [$C509], a
ret
ItemsDualSpriteData:
db $60, $08, $60, $28 ; zol
db $68, $09 ; chicken (left)
ItemsSingleSpriteData: ; (first 3 entries are still dual sprites)
db $6A, $09 ; chicken (right)
db $14, $02, $14, $22 ; piece of power
;Real single sprite data starts here
db $00, $0F ; bomb
db $38, $0A ; rupees
db $20, $0C ; medicine
db $28, $0C ; heart
;------------------------------------trying to buy something starts here
talkHandler:
ld a, [$C509] ; carry item
add a, a
ret z ; check if we have something to buy
sub $02
ld hl, itemNames
ld e, a
ld d, b ; b=0
add hl, de
ld e, [hl]
inc hl
ld d, [hl]
ld hl, wCustomMessage
call appendString
dec hl
call padString
ld de, postMessage
call appendString
dec hl
ld a, $fe
ld [hl], a
ld de, $FFEF
add hl, de
ldh a, [$EE]
swap a
and $0F
add a, $30
ld [hl], a
ld a, $C9
call $2385 ; open dialog
call $3B12 ; increase entity state
ret
appendString:
ld a, [de]
inc de
and a
ret z
ldi [hl], a
jr appendString
padString:
ld a, l
and $0F
ret z
ld a, $20
ldi [hl], a
jr padString
itemNames:
dw itemZol
dw itemChicken
dw itemPieceOfPower
dw itemBombs
dw itemRupees
dw itemMedicine
dw itemHealth
postMessage:
db "For player X? Yes No ", $00
itemZol:
db m"Slime storm|100 {RUPEES}", $00
itemChicken:
db m"Coccu party|50 {RUPEES}", $00
itemPieceOfPower:
db m"Piece of Power|50 {RUPEES}", $00
itemBombs:
db m"20 Bombs|50 {RUPEES}", $00
itemRupees:
db m"100 {RUPEES}|200 {RUPEES}", $00
itemMedicine:
db m"Medicine|100 {RUPEES}", $00
itemHealth:
db m"Health refill|10 {RUPEES}", $00
TalkResultHandler:
ld hl, ItemPriceTableBCD
ld a, [$C509]
dec a
add a, a
ld c, a ; b=0
add hl, bc
ldi a, [hl]
ld d, [hl]
ld e, a
ld a, [$DB5D]
cp d
ret c
jr nz, .highEnough
ld a, [$DB5E]
cp e
ret c
.highEnough:
; Got enough money, take it.
ld hl, ItemPriceTableDEC
ld a, [$C509]
dec a
ld c, a ; b=0
add hl, bc
ld a, [hl]
ld [$DB92], a
; No longer picked up item
xor a
ld [$C509], a
ret
ItemPriceTableBCD:
dw $0100, $0050, $0050, $0050, $0200, $0100, $0010
ItemPriceTableDEC:
db $64, $32, $32, $32, $C8, $64, $0A
""", 0x6860, labels), fill_nop=True)
# Patch GrandpaUlrira to work as a multiworld shop
rom.patch(0x06, 0x1C0E, 0x1C89, ASM("""
ld a, $01
ld [$C50A], a ; this stops link from using items
;Draw shopkeeper
ld de, OwnerSpriteData
call $3BC0 ; render sprite pair
ldh a, [$E7] ; frame counter
swap a
and $01
call $3B0C ; set sprite variant
ldh a, [$F0]
and a
jr nz, checkTalkingResult
call $641A ; prevent link from moving into the sprite
call $645D ; check if talking to NPC
call c, ${TALKHANDLER:04x} ; talk handling
ldh a, [$EE] ; X
cp $18
ret nz
; Jump to other code which is placed on the old owl code. As we do not have enough space here.
jp ${SHOPITEMSHANDLER:04x}
checkTalkingResult:
ld a, [$C19F]
and a
ret nz ; still taking
call $3B12 ; increase entity state
ld [hl], $00
ld a, [$C177] ; dialog selection
and a
ret nz
jp ${TALKRESULTHANDLER:04x}
OwnerSpriteData:
;db $60, $03, $62, $03, $62, $23, $60, $23 ; down
db $64, $03, $66, $03, $66, $23, $64, $23 ; up
;db $68, $03, $6A, $03, $6C, $03, $6E, $03 ; left
;db $6A, $23, $68, $23, $6E, $23, $6C, $23 ; right
""".format(**labels), 0x5C0E), fill_nop=True)
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the state manager module."""
from __future__ import unicode_literals
import copy
import os
import tempfile
import unittest
from unittest import mock
from turbinia import config
from turbinia.workers import TurbiniaTask
from turbinia.workers import TurbiniaTaskResult
from turbinia import state_manager
class TestPSQStateManager(unittest.TestCase):
"""Test PSQStateManager class."""
def _get_state_manager(self):
"""Gets a Datastore State Manager object for test."""
config.STATE_MANAGER = 'Datastore'
return state_manager.get_state_manager()
@mock.patch('turbinia.state_manager.datastore.Client')
def setUp(self, _):
self.remove_files = []
self.remove_dirs = []
self.state_manager = None
config.LoadConfig()
self.state_manager_save = config.STATE_MANAGER
self.test_data = {
'name': 'TestTask',
'request_id': 'TestRequestId',
'group_id': 'TestGroupId',
'status': 'TestStatus',
'saved_paths': ['testpath1', 'testpath2']
}
# Set up TurbiniaTask
self.base_output_dir = tempfile.mkdtemp()
self.task = TurbiniaTask(
base_output_dir=self.base_output_dir, name=self.test_data['name'],
request_id=self.test_data['request_id'],
group_id=self.test_data['group_id'])
self.task.output_manager = mock.MagicMock()
self.task.output_manager.get_local_output_dirs.return_value = (
'/fake/tmp/dir', self.base_output_dir)
# Set up TurbiniaTaskResult
self.result = TurbiniaTaskResult(base_output_dir=self.base_output_dir)
self.result.setup(self.task)
self.result.status = self.test_data['status']
self.result.saved_paths = self.test_data['saved_paths']
self.task.result = self.result
def tearDown(self):
config.STATE_MANAGER = self.state_manager_save
[os.remove(f) for f in self.remove_files if os.path.exists(f)]
[os.rmdir(d) for d in self.remove_dirs if os.path.exists(d)]
os.rmdir(self.base_output_dir)
@mock.patch('turbinia.state_manager.datastore.Client')
def testStateManagerGetTaskDict(self, _):
"""Test State Manager get_task_dict()."""
self.state_manager = self._get_state_manager()
task_dict = self.state_manager.get_task_dict(self.task)
# Make the returned task_dict contains all of our test data
self.assertEqual(task_dict['name'], self.test_data['name'])
self.assertEqual(task_dict['request_id'], self.test_data['request_id'])
self.assertEqual(task_dict['status'], self.test_data['status'])
self.assertEqual(len(task_dict['saved_paths']), 2)
self.assertEqual(task_dict['group_id'], self.test_data['group_id'])
self.assertTrue('instance' in task_dict)
self.assertIn(self.test_data['saved_paths'][0], task_dict['saved_paths'])
@mock.patch('turbinia.state_manager.datastore.Client')
def testStateManagerValidateDataValidDict(self, _):
"""Test State Manager _validate_data() base case."""
self.state_manager = self._get_state_manager()
# pylint: disable=protected-access
test_data = self.state_manager._validate_data(self.test_data)
self.assertDictEqual(test_data, self.test_data)
@mock.patch('turbinia.state_manager.datastore.Client')
def testStateManagerValidateDataInvalidDict(self, _):
"""Test State Manager _validate_data() base case."""
self.state_manager = self._get_state_manager()
invalid_dict = copy.deepcopy(self.test_data)
invalid_dict['status'] = 'A' * state_manager.MAX_DATASTORE_STRLEN + 'BORKEN'
# pylint: disable=protected-access
test_data = self.state_manager._validate_data(invalid_dict)
self.assertListEqual(list(test_data.keys()), list(self.test_data.keys()))
self.assertNotEqual(test_data['status'], self.test_data['status'])
self.assertLessEqual(
len(test_data['status']), state_manager.MAX_DATASTORE_STRLEN)
|
def _gcs_upload_impl(ctx):
targets = []
for target in ctx.files.data:
targets.append(target.short_path)
ctx.file_action(
output = ctx.outputs.targets,
content = "\n".join(targets),
)
ctx.file_action(
content = "%s --manifest %s --root $PWD -- $@" % (
ctx.attr.uploader.files_to_run.executable.short_path,
ctx.outputs.targets.short_path,
),
output = ctx.outputs.executable,
executable = True,
)
return struct(
runfiles = ctx.runfiles(
files = ctx.files.data + ctx.files.uploader +
[ctx.version_file, ctx.outputs.targets]
)
)
gcs_upload = rule(
attrs = {
"data": attr.label_list(
mandatory = True,
allow_files = True,
),
"uploader": attr.label(
default = Label("//defs:gcs_uploader"),
allow_files = True,
),
},
executable = True,
outputs = {
"targets": "%{name}-targets.txt",
},
implementation = _gcs_upload_impl,
)
|
# from code.transformer_vid.utils import convert_weights
# import rotary_embedding_torch
from torch.nn.modules.activation import GELU, ReLU
# from data.OneCombo3.trainer import TrainerConfig
import math
import numpy as np
import itertools
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torchvision.models.video import r3d_18
# from ResNet3D import r3d_18
from scipy.optimize import linear_sum_assignment
# from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding
from einops.layers.torch import Rearrange
logger = logging.getLogger(__name__)
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d,
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.2
resid_pdrop = 0.2
attn_pdrop = 0.2
pos_pdrop = 0.2
temp_pdrop = 0.2
pos_emb = True
temp_emb = True
start_prune = 30
epoch = 0
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class neuralGPTConfig:
""" base GPT config, params common to all GPT versions """
n = 0.4
im_drop = 0.2
id_drop = n
embd_pdrop = n
resid_pdrop = n
attn_pdrop = n
pos_pdrop = n
temp_pdrop = n
pos_emb = True
temp_emb = True
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class VideoFeaturesExtractor(nn.Module):
"""
R3D: (3 x T x H x W)
H, W = 112
"""
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))
convert_weights(self.backbone)
# # freeze backbone
# for k, v in self.backbone.named_parameters():
# v.requires_grad = False
def forward(self, x):
# B = Batch, T, C, Fm, H, W
features = self.backbone(x) # (B, C, T, H, W)
B, C, T, H, W = features.shape
features = features.permute(0, 2, 3, 4, 1)
features = features.view(B, -1, C)
return features
class VideoEncoder(nn.Module):
def __init__(self):
super().__init__()
self.to_patch_embedding = nn.Sequential(
Rearrange('b c t (h p1) (w p2) -> b (t h w) (p1 p2 c)', p1=16, p2=16)
)
def forward(self, x):
return self.to_patch_embedding(x)
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.config = config
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer("mask", self.build_mask(config.block_size))
self.n_head = config.n_head
self.att = None
self.T = config.block_size
# self.rotary_embedding = RotarySpatioTemporalEmbedding(config)
def build_mask(self, block_size):
mask = torch.tril(torch.ones((block_size, block_size)),
).view(1, 1, block_size, block_size)
return mask
def generate_sparse_mask(self, att, p, config):
"""
Generate a sparse mask according to p.
"""
assert p >= 0 and p <= 1, "p should be in [0, 1]"
T = config.block_size
mask = torch.rand((1, T)) < p
mask = mask.repeat(T, 1)
mask[0, 0] = False # don't mask 1st step
# check if any step is fully masked and umask it
idx_all_true = (True == torch.all(mask, dim=0)).nonzero()
for step in idx_all_true:
sampler = torch.distributions.Uniform(low=0, high=step.item()+1)
idx_false = sampler.sample((1,1)).long()
mask[step, idx_false] = False
# mask = mask.repeat(T, 1)
mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T)
att = att.masked_fill(mask, float('-inf'))
return att
def forward(self, x, pad=None, dtx=None):
# B = Batch, T = Sequence, C = n_embed
B, T, C = x.size()
# calculate query, key, values for all head in batch and move head forward to the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# # apply rotary embeddings
# if dtx is not None:
# q, k = self.rotary_embedding(q, k, dtx)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
if self.training:
att = self.generate_sparse_mask(att, 0.25, self.config)
if pad is not None:
for idx, i in enumerate(pad):
att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
self.att = att
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class PositionalEmbedding(nn.Module):
""" Implement the PE function. """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# class RotarySpatioTemporalEmbedding(nn.Module):
# """ Rotary temporal embeddings - block_size = id_blk_sz """
# def __init__(self, config):
# super().__init__()
# self.frame_block_size = config.frame_block_size
# self.id_block_size = config.id_block_size
# self.emb = RotaryEmbedding(dim=32)
# def forward(self, q, k, t):
# b = t.shape[0]
# tf = self.frame_block_size
# queries = []
# keys = []
# for B in range(b):
# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))
# im_pos_emb = torch.arange(self.frame_block_size)
# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)
# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)
# freqs = self.emb(torch.cat(im_emb, id_temp_emb))
# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))
# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))
# q, k = torch.cat(queries), torch.cat(keys)
# return q, k
class TemporalEmbedding(nn.Module):
""" encoding temporal information using fourrier signals """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class LearntTemporalEmbedding(nn.Module):
"""
Project B x T x 1 time sequence to
B x T x C
"""
def __init__(self, block_sz, n_embd, p_drop=0.2):
super().__init__()
self.temp_emb = nn.Sequential(
nn.Linear(1, n_embd // 2),
nn.GELU(),
nn.Linear(n_embd // 2, n_embd),
nn.Dropout(p_drop)
)
def forward(self, x):
return self.temp_emb(x.unsqueeze(-1))
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
# decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head,
# activation='gelu', dropout=0.2, batch_first=True)
# self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)
self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head,
num_encoder_layers=3, num_decoder_layers=config.n_layer,
activation="gelu", dropout=0.4, batch_first=True)
self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size))
# self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size))
self.T = config.id_block_size
def generate_square_subsequent_mask(self, sz: int, pad=None):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_padding_mask(self, sz: int, pad=None):
r"""Build a (B x T) mask that resides on the GPU and can be
manipulated by build_padding_mask according to padded sequence
"""
mask = torch.zeros(1, sz, dtype=torch.bool)
return mask
def generate_sparse_mask(self, sz: int, pad=None):
r""" Build a square mask that employs
teacher forcing according to P
"""
rand_mat = torch.rand(1, sz)
k = round(0.75 * sz)
k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]
bool_tensor = rand_mat <= k_th_quant
mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask
def build_padding_mask(self, tgt, pad):
# mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)
mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)
for B, P in enumerate(pad):
mask[B, self.T - P:] = True
return mask # .to(torch.cuda.current_device())
def forward(self, tgt, memory, pad):
# padding_mask = self.build_padding_mask(tgt, pad)
# tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask
return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask,
tgt_key_padding_mask=None)
class ProjectNorm(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
self.ln = nn.LayerNorm(feat_size)
self.mlp = nn.Sequential(
nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),
nn.GELU(),
nn.Linear(math.floor(2 * feat_size), target_size, bias=False),
)
def forward(self, x):
return self.mlp(self.ln(x))
class TimeProjection(nn.Module):
def __init__(self, seq_size, id_seq_size, feat_size, target_size):
super().__init__()
self.mlp_seq = nn.Sequential(
nn.Linear(seq_size, id_seq_size),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(id_seq_size, id_seq_size)
)
self.mlp_t = nn.Sequential(
nn.Linear(feat_size, feat_size // 2),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(feat_size // 2, target_size)
)
def forward(self, x):
x = x.permute(0, 2, 1) # B, T, C -> B, C, T
x = self.mlp_seq(x) # B, C, T / 2
x = x.permute(0, 2, 1) # B, T / 2, C
return self.mlp_t(x) # B, T / 2, 1
class PSTHProjection(nn.Module):
"""Takes Last Output of Block -> (B, C)
Builds PSTH table
"""
def __init__(self, config):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),
nn.Dropout(p=0.2),
nn.GELU(),
nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
)
def forward(self, x):
return self.mlp(x)
# class PSTHProjection(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.mlp_seq = nn.Sequential(
# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.id_block_size // 2, 1, bias=False)
# )
# self.mlp_t = nn.Sequential(
# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
# )
# def forward(self, x):
# x = x.transpose(-1, -2) # B, T, C -> B, C, T
# x = self.mlp_seq(x) # B, C, 1
# x = x.transpose(-2, -1) # B, 1, Vocab_id
# return self.mlp_t(x)
class TimeRNN(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, pad=None, dtx=None):
x = x + self.attn(self.ln1(x), pad)
x = x + self.mlp(self.ln2(x))
return x
class BlockSequential(nn.Sequential):
def forward(self, x, pad=None, dtx=None):
for module in self._modules.values():
x = module(x, pad, dtx)
return x
class DiceLossPSTH(nn.Module):
def __init__(self, size_average=True, smooth=1):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets, smooth=1, class_weights=None):
total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])
# probs = F.log_softmax(logits, dim=-1)
probs = F.softmax(total_logits, dim=-1)
# logits = F.gelu(logits)
# probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))
# flatten label and prediction tensors
outputs = probs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
labels = torch.zeros_like(outputs)
labels[targets] = 1 / len(targets)
# intersection = (outputs * labels).sum()
# dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)
return self.cross_entropy(outputs[None, ...], labels[None, ...])
class SetLoss(nn.Module):
def __init__(self):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets):
targets = targets.contiguous().view(-1)
loss = 0
for n_step, n_logits in enumerate(logits):
n_logits = F.softmax(n_logits, dim=-1)
n_target = targets[n_step:]
n_target_dist = torch.zeros_like(n_logits)
if len(n_target) != 0:
n_target_dist[n_target] = 1 / len(n_target)
loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])
return loss / len(logits)
class TruncatedLoss(nn.Module):
def __init__(self, q=0.8, k=0.2, trainset_size=50000):
super(TruncatedLoss, self).__init__()
self.q = q
self.k = k
self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)
def forward(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]
loss = torch.mean(loss)
return loss
def update_weight(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
Lq = ((1-(Yg**self.q))/self.q)
Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))
Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)
Lqk = torch.unsqueeze(Lqk, 1)
condition = torch.gt(Lqk, Lq)
self.weight[indexes] = condition.type(torch.cuda.FloatTensor)
# class PSTHLOSS(nn.Module):
# def __init__(self):
# super().__init__()
# def forward(self, logits, targets):
# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension
# probs = F.softmax(total_logits, dim=-1)
# outptu
class HungarianMatcher(nn.Module):
def __init__(self):
super().__init__()
@torch.no_grad()
def forward(self, logits, targets):
T, C = logits.size()
probs = F.softmax(logits, dim=-1)
cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class KLDivLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
self.KLdiv = nn.KLDivLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.KLdiv(log_probs.long(), targets)
class PoissonCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
# self.softmax = nn.Softmax(dim=-1)
self.nll_poisson = nn.PoissonNLLLoss()
# self.nll_poisson = nn.NLLLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.nll_poisson(log_probs, targets)
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.config = config
# input embedding stem
self.n_embd = config.n_embd
self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)
self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)
# self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))
self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))
# self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)
# self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)
self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)
self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)
self.id_drop = nn.Dropout(config.id_drop)
self.im_drop = nn.Dropout(config.im_drop)
self.drop = nn.Dropout(config.embd_pdrop)
# -- Visual Backbone -- #
# self.visual_backbone = VideoFeaturesExtractor()
self.video_encoder = VideoEncoder()
frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)
self.register_buffer("frame_temp_emb_seq", frame_temp_emb)
# -- Contrastive Loss -- ##
# self.proj_id = ProjectNorm(config.n_embd, config.n_embd)
# self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape
## -- IM_Decoder -- ##
# self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])
# self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])
# self.ln_f_id = nn.LayerNorm(config.n_embd)
# self.ln_f_im = nn.LayerNorm(config.n_embd)
## -- Decoder -- ##
# self.ln_f = nn.LayerNorm(config.n_embd)
## GPT
# self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])
# self.ln_f = nn.LayerNorm(config.n_embd)
## enc_dec
self.state_decoder = Decoder(config)
self.ln_f_state_dec = nn.LayerNorm(config.n_embd)
self.stimulus_decoder = Decoder(config)
self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
## -- Time -- ##
# self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, 1)
## -- PSTH -- ##
# self.proj_psth = PSTHProjection(config)
# Loss
# self.dice_loss = DiceLossPSTH()
# self.poisson_loss = PoissonCrossEntropyLoss()
# self.hungarian_matcher = HungarianMatcher()
# self.kldiv_loss = KLDivLoss()
# self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)
# self.set_loss = SetLoss()
# self.a = torch.tensor(0.5, requires_grad=True)
self.block_size = config.block_size
self.apply(self._init_weights)
if config.class_weights is not None:
self.register_buffer("class_weights", config.class_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
Separates parameters into those who will experience weight decay and those that will not
"""
if train_config.decay_weights:
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
else: no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
black_list_mods = ['pos_emb', 'temp_emb']
for mods in black_list_mods:
for name, param in self.named_parameters():
if mods in name:
no_decay.add(name) # also pos_emb
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
no_decay -= decay & no_decay
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
else:
parameters = self.parameters()
optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)
return optimizer
def process_features(self, x):
# batch, block_size, feature
p_idx = x['id_prev']
idx = x['id']
dtx = x['dt']
dtx_prev = x['dt_prev']
frames = self.video_encoder(x['frames'])
pad = x['pad']
b, t = idx.size()
# b_p, t_p = p_idx.size()
bf, tf = frames.size()[0:2]
# forward the GPT model
'''
positional and temporal embeddings implemented in multiple ways, learnt,
fourrier decomposition and in the case of time, just passed as is.
'''
# # Embeddings
prev_id_position_embeddings = 0 # self.pos_emb(p_idx)
prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())
id_position_embeddings = 0 # self.pos_emb(idx)
im_position_embeddings = self.pos_emb_frames
temporal_embeddings = self.temp_emb(dtx.float())
# Extract ID features
prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings
token_embeddings = self.id_drop(token_embeddings)
# Extract image features and add time embeddings
im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)
im_embeddings = frames # self.tok_emb(frames)
im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings
im_embeddings = self.im_drop(im_embeddings) # separate pos emb?
# Tidy up
features = dict()
features['id_prev'] = prev_token_embeddings
features['id'] = token_embeddings
features['frames'] = im_embeddings
return features, pad
def perceiver(self, features, pad):
x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)
x = self.ln_f_state_dec(x)
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def enc_dec(self, features, pad):
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def GPTdecoder(self, features, pad, dtx=None):
# image + neural features
x = torch.cat((features['frames'], features['id']), dim=1)
# Decoder
x = self.blocks(x, pad, dtx) # (B, T, C)
x = self.ln_f(x)
logits = self.head(x)
# print(logits.shape) # (B, T, Vocab)
# logits_psth = x[:, -1] # (B, C)
return logits, x
def forward(self, x, targets=None):
idx = x['id']
dtx = x['dt']
frames = x['frames']
pad = x['pad']
b, t = idx.size()
# b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]
bf, tf = frames.size()[0:2]
tf = self.config.frame_block_size
# assert t + tf == self.config.block_size, f"{tf} {t}"
# assert t <= self.block_size, "Cannot forward, model block size is exhausted"
features, pad = self.process_features(x)
logits, x = self.perceiver(features, pad)
# logits, x = self.enc_dec(features, pad)
# logits, x = self.GPTdecoder(features, pad)
# time = self.proj_time(x) # (B, T_id, 1)
# print(x[:, 0].shape)
# psth = self.proj_psth(x) # (B, Vocab_id)
# if targets, calculate loss
# calculate loss on logits up to padding token for each batch
loss = None
loss_frames = 0
loss_id = []
loss_time = []
loss_dice = []
loss_psth = []
loss_hungarian = []
if targets is not None:
# loss_psth = self.dice_loss(psth, targets['modes'][:, tf:])
for B, P in enumerate(pad):
tf = 0
# im_logits = logits[B, :tf]
# im_targets = targets['frames'][B, :tf]
# loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))
id_logits = logits[B, tf:tf + t - P]
id_targets = targets['id'][B, :t - P]
loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1))
# if self.config.epoch >= 15:
# self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# time_preds = time[B, :t - P]
# time_targets = targets['dt'][B, :t - P]
# loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1))
# loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)
# loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))
# if len(id_targets) > 0:
# indices = self.hungarian_matcher(id_logits, id_targets)
# probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]
# loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)
# loss_hungarian.append(loss_hungarian_)
# # psth = self.proj_psth(x[B, -1]) # from the EOS position
# loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))
# loss_psth_ = self.dice_loss(id_logits, id_targets)
# loss_psth.append(torch.nan_to_num(loss_psth_))
# loss_time.append(torch.nan_to_num(loss_time_))
loss_id.append(torch.nan_to_num(loss_id_))
loss = dict()
# loss['frames'] = loss_frames / (b / 3)
loss['id'] = sum(loss_id) / (b) # sum(loss_id) / (b * 2) # / len(loss_id)
# loss['time'] = sum(loss_time) / (b * 2)
# loss['dice'] = sum(loss_dice) / len(loss_dice)
# loss['dt'] = loss_time / (b * 50)
# loss['hungarian'] = sum(loss_hungarian) / (b * 2)
# loss['psth'] = sum(loss_psth) / (b * 2)
for key in list(loss):
if isinstance(loss[key], float):
del loss[key]
preds = dict()
preds['logits'] = logits # [:, tf:] # only id logits
# preds['dt'] = time
return preds, features, loss
|
from . import PrioritizationTechnique
from collections import Counter
class UniqueSearch(PrioritizationTechnique):
def __init__(self, binary, target_os, target_arch, similarity_func=None):
super(UniqueSearch, self).__init__(binary=binary, target_os=target_os, target_arch=target_arch)
self.uniqueness = dict()
self.similarity = dict()
self.similarity_func = similarity_func or self.l2_similarity
def update(self, seeds):
super(UniqueSearch, self).update(seeds=seeds)
if all([s in self.uniqueness for s in seeds]): return
# clean up
self.uniqueness = {k:(0,0) for k in seeds}
self.similarity = {(a,b):v for (a,b),v in self.similarity.items() if a in seeds and b in seeds}
def update_average(seed, new):
prev, size = self.uniqueness[seed]
new_average = float(prev * size + new) / (size + 1)
self.uniqueness[seed] = new_average, size + 1
for a in seeds:
for b in seeds:
similarity = self.similarity.get((a, b), None) or self.similarity_func(a, b)
self.similarity[(a, b)] = self.similarity[(b, a)] = similarity
update_average(a, similarity)
update_average(b, similarity)
self.uniqueness = {k:v for k,(v,_) in self.uniqueness.items()}
def pop_best(self, not_drilled):
best = max({k:v for k,v in self.uniqueness.items() if k in not_drilled}, key=self.uniqueness.get)
self.uniqueness.pop(best)
return best
def l2_similarity(self, seed_a, seed_b):
"""
The (L2) distance between the counts of the state addresses in the history of the path.
:param seed_a: The first seed to compare
:param seed_b: The second seed to compare
"""
if seed_a == seed_b: return 1.0
try:
count_a = Counter(self.trace(seed_a))
count_b = Counter(self.trace(seed_b))
normal_distance = sum((count_a.get(addr, 0) - count_b.get(addr, 0)) ** 2
for addr in set(list(count_a.keys()) + list(count_b.keys()))) ** 0.5
return 1.0 / (1 + normal_distance)
except: return 0.0
|
# Synthesis of multifractal random walk and derived processes.
#
# Roberto Fabio Leonarduzzi
# January, 2019
import numpy as np
from .fbm import fgn
from .pzutils import gaussian_cme, gaussian_chol
from numpy.fft import fft, ifft
# import math
# import matplotlib.pyplot as plt
def mrw(shape, H, lam, L, sigma=1, method='cme', z0=(None, None)):
'''
Create a realization of fractional Brownian motion using circulant
matrix embedding.
Parameters
----------
shape : int | tuple(int)
If scalar, it is the number of samples. If tuple it is (N, R),
the number of samples and realizations, respectively.
H : float
Hurst exponent
lam : float
Lambda, intermittency parameter
L : float
Integral scale
sigma : float
Variance of process
Returns
-------
mrw : ndarray
Synthesized mrw realizations. If `shape` is scalar,
fbm is ofshape (N,). Otherwise, it is of shape (N, R).
References
----------
.. [1] Bacry, Delour, Muzy, "Multifractal Random Walk", Physical Review E,
2001
'''
try:
N, R = shape
do_squeeze = False
except TypeError: # shape is scalar
N, R = shape, 1
do_squeeze = True
# Is 0.5 or 0 the lower bound ? Search biblio
if not 0 <= H <= 1:
raise ValueError('H must satisfy 0 <= H <= 1')
if L > N:
raise ValueError('Integral scale L is larger than data length N')
# 1) Gaussian process w
w = gaussian_w(N, R, L, lam, 1, method, z0[1])
# Adjust mean to ensure convergence of variance
r = 1/2 # see Bacry, Delour & Muzy, Phys Rev E, 2001, page 4
w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L)
# 2) fGn e
e = fgn((N, R), H, sigma, method=method, z0=z0[0])
# 3) mrw
mrw = np.cumsum(e * np.exp(w), axis=0)
return mrw.squeeze() if do_squeeze else mrw
def mrw_cumul(shape, c1, c2, L, **kwargs):
'''
Wrapper for mrw generation from cumulants.
Parameters
----------
shape : int | tuple(int)
If scalar, it is the number of samples. If tuple it is (N, R),
the number of samples and realizations, respectively.
c1 : float
First order cumulant
c2 : float
Second order cumulant
L : float
Integral scale
kwargs : dict
Optional parameters passed to :obj:`mrw`
Returns
-------
mrw : ndarray
Synthesized mrw realizations. If `shape` is scalar,
fbm is ofshape (N,). Otherwise, it is of shape (N, R).
References
----------
.. [1] Bacry, Delour, Muzy, "Multifractal Random Walk", Physical Review E,
2001
'''
H = c1 + c2
lam = np.sqrt(-c2)
return mrw(shape, H, lam, L, **kwargs)
def skewed_mrw(shape, H, lam, L, K0=1, alpha=1, sigma=1, dt=1, beta=1,
do_mirror=False):
'''
Create skewed mrw as in Pochart & Bouchaud
Assumes :math:`\\Delta_t=1`, so no parameter beta is needed.
'''
try:
N, R = shape
do_squeeze = False
except TypeError: # shape is scalar
N, R = shape, 1
do_squeeze = True
# Is 0.5 or 0 the lower bound ? Search biblio
if not 0 <= H <= 1:
raise ValueError('H must satisfy 0 <= H <= 1')
if L / dt > N:
raise ValueError('Integral scale L/dt is larger than data length N')
# 1) Gaussian process w
w = gaussian_w(N, R, L, lam, dt)
# Adjust mean to ensure convergence of variance
r = 1 # see Bacry, Delour & Muzy, Phys Rev E, 2001, page 4
w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L / dt)
# 2) fGn e
e = fgn((2*N + 1, R), H, sigma, dt)
# 3) Correlate components
past = skewness_convolution(e, K0, alpha, beta, dt)
wtilde = w - past
# 4) skewed mrw
smrw = np.cumsum(e[N:] * np.exp(wtilde), axis=0)
if do_squeeze:
smrw = smrw.squeeze()
if do_mirror:
past_mirror = skewness_convolution(-e, K0, alpha, beta, dt)
wtilde_mirror = w - past_mirror
smrw_mirror = np.cumsum(-e[N:] * np.exp(wtilde_mirror), axis=0)
if do_squeeze:
smrw_mirror = smrw_mirror.squeeze()
return smrw, smrw_mirror
else:
return smrw
def gaussian_w(N, R, L, lam, dt=1, method='cme', z0=None):
'''
Auxiliar function to create gaussian process w
'''
kmax = int(L / dt)
k = np.arange(kmax)
rho = np.ones((N))
rho[:kmax] = L / (k + 1) / dt
cov = (lam ** 2) * np.log(rho)
if method == 'cme':
w = gaussian_cme(cov, N, R, z0)
elif method == 'chol':
w = gaussian_chol(cov, N, R, z0)
return w
def skewness_convolution(e, K0, alpha, beta=1, dt=1):
'''
Noise e should be of length 2*N, with "N false past variables" at the
beginning to avoid spurious correlations due to cutoffs in convolution.
'''
N, _ = e.shape
N = N // 2
tau = np.arange(1, N+1)
Kbar = np.zeros((2*N))
Kbar[1:N+1] = K0 / (tau**alpha) / (dt**beta)
skew_conv = np.real(ifft(fft(Kbar[:, None], axis=0) *
fft(e, axis=0), axis=0))
return skew_conv[N:]
def skewness_convolution_dumb(e, K0, alpha, beta=1, dt=1):
'''
Direct and inefficient calculation for testing purposes.
Receives "true" input noise of size N.
'''
N, R = e.shape
def K(i, j):
return K0 / (j-i)**alpha / dt**beta
scorr = np.zeros((N, R))
for k in range(N):
for i in range(k):
scorr[k, :] += K(i, k) * e[i, :]
return scorr
def mrw2D(shape, H, lam, L, sigma=1):
'''
Create a realization of fractional Brownian motion using circulant
matrix embedding.
Parameters
----------
shape : int | tuple(int)
If scalar, it is the number of samples. If tuple it is (N, R),
the number of samples and realizations, respectively.
H : float
Hurst exponent
lambda : float
Intermittency parameter
L : float
Integral scale
sigma : float
Variance of process
Returns
-------
mrw : ndarray
Synthesized mrw realizations. If 'shape' is scalar,
fbm is of shape (N,). Otherwise, it is of shape (N, N, R).
References
----------
.. [1] Bacry, Delour, Muzy, "Multifractal Random Walk", Physical Review E,
2001
'''
try:
N, R = shape
# do_squeeze = False
except TypeError: # shape is scalar
N, R = shape, 1
# do_squeeze = True
N = int(2 * np.ceil(N / 2))
# dim = 2
n = np.arange(-N // 2, N // 2)
d = np.sqrt(n[:, None]**2 + n[None, :]**2)
corr = lam**2 * np.log(np.maximum(L / (1 + d), 1))
L = np.fft.fft2(corr)
z1 = np.random.randn(N, N, R) + 1j * np.random.randn(N, N, R)
w = np.exp(np.real(np.fft.ifft2(z1 * np.sqrt(L[..., None]), axes=(0, 1))))
# Increment process:
X = np.random.randn(N, N, R) * w
# Fractional integration to produce motion:
BX = fract_int_2d(X, H + 1)
return BX, X
def fract_int_2d(x, alpha):
'''
Assumes size of x divisible by two
'''
N = x.shape[0]
# Create Fourier filter
k = np.arange(-N/2, N/2)
d = np.sqrt(k[:, None]**2 + k[None, :]**2)
mini = np.min(d[d != 0])
d[d == 0] = mini
filt = 1 / (d ** alpha)
yhat = np.fft.fftshift(np.fft.fft2(x, axes=(0, 1)), axes=(0, 1))
yhat *= filt[..., None]
y = np.real(np.fft.ifft2(np.fft.ifftshift(yhat, axes=(0, 1)), axes=(0, 1)))
return y
|
import unittest
from pyregex.file_extensions import is_audio, is_img
class FileExtTests(unittest.TestCase):
def test_1(self):
self.assertEqual(is_audio("Nothing Else Matters.mp3"), False)
def test_2(self):
self.assertEqual(is_audio("NothingElseMatters.mp3"), True)
def test_3(self):
self.assertEqual(is_audio("DaftPunk.FLAC"), False)
def test_4(self):
self.assertEqual(is_audio("DaftPunk.flac"), True)
def test_5(self):
self.assertEqual(is_audio("AmonTobin.aac"), True)
def test_6(self):
self.assertEqual(is_audio(" Amon Tobin.alac"), False)
def test_7(self):
self.assertEqual(is_audio("tobin.alac"), True)
def test_8(self):
self.assertEqual(is_img("Home.jpg"), True)
def test_9(self):
self.assertEqual(is_img("flat.jpeg"), True)
def test_10(self):
self.assertEqual(is_img("icon.bmp"), True)
def test_11(self):
self.assertEqual(is_img("icon2.jpg"), False)
def test_12(self):
self.assertEqual(is_img("bounce.gif"), True)
def test_13(self):
self.assertEqual(is_img("animate bounce.GIF"), False)
def test_14(self):
self.assertEqual(is_img("transparency.png"), True)
if __name__ == "__main__":
unittest.main()
|
"""Helper functions for validating LFOM.
Created on September 18, 2020
@author: jcs528@cornell.edu
"""
from aguaclara.core.units import u
import aguaclara.core.physchem as pc
import aguaclara.core.constants as con
def flow_lfom_vert(height, d_ori, h_ori, n_oris):
"""Returns the flow through the LFOM as a function of height
Args:
height: height of water in the LFOM (u.m)
d_ori: diameter of each orifice (u.m)
h_ori: height of each row of the LFOM (list)
n_oris: number of orifices at each row of the LFOM (list of lists)
Returns:
flow: flow rate through the LFOM (u.L / u.s)
"""
flow = pc.flow_orifice_vert(d_ori, height - h_ori, con.VC_ORIFICE_RATIO) * n_oris
return (sum(flow)).to(u.L / u.s)
def check_flow_lfom_vert(
diameter, ori_heights, ori_numbers, cutoff, q_input, report_writer
):
"""Evaluates the flow
Args:
diameter: diameter of each orifice (u.m)
ori_heights: height of each row of the LFOM (list)
ori_numbers: number of orifices at each row of the LFOM (list of lists)
cutoff: allowable tolerance between design and expected flow as a percent
q_input: design flow rate (u.L / u.s)
report_writer: ReportWriter object to record validation results
Returns:
flow: flow rate through the LFOM (u.L / u.s)
"""
try:
q_calc = flow_lfom_vert(
ori_heights[-1] + 0.5 * diameter, diameter, ori_heights, ori_numbers
)
assert cutoff > (q_calc - q_input) / q_input
assert -cutoff < (q_calc - q_input) / q_input
report_writer.write_message(
"The expected flow rate, {!s}, was very close "
"to the one calculated by this validation "
"code, {!s}.\n".format(q_input, q_calc)
)
except AssertionError:
report_writer.write_message(
"INVALID: The expected flow rate, {!s}, is "
"different from the one calculated by this "
"validation code, {!s}.\n".format(q_input, q_calc)
)
report_writer.set_result("Invalid: Check Validation Report")
|
#!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import sys
import warnings
# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
# but if invoked (i.e. imported), it will issue a warning to stderr if socks
# isn't available. requests unconditionally imports urllib3's socks contrib
# module, triggering this warning. The warning breaks DEP-8 tests (because of
# the stderr output) and is just plain annoying in normal usage. I don't want
# to add socks as yet another dependency for pip, nor do I want to allow-stder
# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
# be done before the import of pip.vcs.
from pip._vendor.urllib3.exceptions import DependencyWarning
warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
# We want to inject the use of SecureTransport as early as possible so that any
# references or sessions or what have you are ensured to have it, however we
# only want to do this in the case that we're running on macOS and the linked
# OpenSSL is too old to handle TLSv1.2
try:
import ssl
except ImportError:
pass
else:
# Checks for OpenSSL 1.0.1 on MacOS
if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f:
try:
from pip._vendor.urllib3.contrib import securetransport
except (ImportError, OSError):
pass
else:
securetransport.inject_into_urllib3()
from pip._internal.cli.autocompletion import autocomplete
from pip._internal.cli.main_parser import parse_command
from pip._internal.commands import commands_dict
from pip._internal.exceptions import PipError
from pip._internal.utils import deprecation
from pip._vendor.urllib3.exceptions import InsecureRequestWarning
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWarning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parse_command(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip._internal.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args))
return command.main(cmd_args)
|
# -*- coding: utf-8 -*-
import datetime
import warnings
import pandas as pd
import numpy as np
from rqdatac.utils import to_datetime, to_date
from rqdatac.validators import (
ensure_date_range,
ensure_date_or_today_int,
ensure_list_of_string,
check_items_in_container,
ensure_order,
ensure_order_book_id,
ensure_order_book_ids,
ensure_dates_base_on_listed_date,
ensure_string,
ensure_date_int
)
from rqdatac.services.basic import instruments
from rqdatac.services.calendar import (
get_trading_dates,
get_previous_trading_date,
get_trading_dates_in_type,
)
from rqdatac.client import get_client
from rqdatac.decorators import export_as_api, compatible_with_parm
@export_as_api
def is_st_stock(order_book_ids, start_date=None, end_date=None, market="cn"):
"""判断股票在给定的时间段是否是ST股, 返回值为一个DataFrame
:param order_book_ids: 股票 id
:param start_date: (Default value = None)
:param end_date: (Default value = None)
:param market: (Default value = "cn")
"""
order_book_ids = ensure_order_book_ids(order_book_ids, type="CS", market=market)
if len(order_book_ids) == 1:
instrument = instruments(order_book_ids[0], market=market)
start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market)
if start_date is None:
return
start_date, end_date = ensure_date_range(start_date, end_date)
trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market))
data = get_client().execute(
"get_st_days", order_book_ids, start_date=start_date, end_date=end_date
)
df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates)
for idx, dates in data.items():
for date in dates:
date = to_datetime(date)
df.at[date, idx] = True
return df
@export_as_api
def _is_st_stock(order_book_id, date=None, market="cn"):
"""判断股票在给定日期是否是ST股
:param order_book_id: 股票id
:param date: (Default value = None)
:param market: (Default value = "cn")
:returns: True or False
"""
order_book_id = ensure_order_book_id(order_book_id, type="CS", market=market)
date = ensure_date_or_today_int(date)
df = is_st_stock(order_book_id, start_date=date, end_date=date, market=market)
if df is None or df.empty:
return False
else:
return df[order_book_id][0]
@export_as_api
@compatible_with_parm(name="country", value="cn", replace="market")
def is_suspended(order_book_ids, start_date=None, end_date=None, market="cn"):
"""获取股票停牌信息
:param order_book_ids: 股票名称
:param start_date: 开始日期, 如'2013-01-04' (Default value = None)
:param end_date: 结束日期,如'2014-01-04' (Default value = None)
:param market: 地区代码, 如 'cn' (Default value = "cn")
:returns: DataFrame
"""
order_book_ids = ensure_order_book_ids(order_book_ids, type="CS", market=market)
if len(order_book_ids) == 1:
instrument = instruments(order_book_ids[0], market=market)
start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market)
if start_date is None:
return
start_date, end_date = ensure_date_range(start_date, end_date)
trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market))
df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates)
data = get_client().execute("get_suspended_days", order_book_ids, start_date, end_date, market=market)
for idx, dates in data.items():
for date in dates:
date = to_datetime(int(date))
df.at[date, idx] = True
return df
stock_fields = {"shares_holding": "shares_holding", "holding_ratio": "holding_ratio"}
special_symbols = ["all_connect", "shanghai_connect", "shenzhen_connect"]
symbols_map = {"shanghai_connect": "SH", "shenzhen_connect": "SZ"}
@export_as_api
def get_stock_connect(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False):
"""获取"陆股通"的持股、持股比例
:param order_book_ids: 股票列表
:param start_date: 开始日期: 如'2017-03-17' (Default value = None)
:param end_date: 结束日期: 如'2018-03-16' (Default value = None)
:param fields: 默认为所有字段,可输入shares_holding或者holding_ratio (Default value = None)
:param expect_df: 返回 MultiIndex DataFrame (Default value = False)
:returns: 返回pandas.DataFrame or pandas.Panel
"""
if order_book_ids not in ("shanghai_connect", "shenzhen_connect", "all_connect"):
order_book_ids = ensure_order_book_ids(order_book_ids, type="CS")
start_date, end_date = ensure_date_range(start_date, end_date)
if fields is not None:
fields = ensure_list_of_string(fields)
for f in fields:
if f not in ("shares_holding", "holding_ratio"):
raise ValueError("invalid field: {}".format(f))
else:
fields = ["shares_holding", "holding_ratio"]
data = get_client().execute("get_stock_connect", order_book_ids, start_date, end_date, fields)
if not data:
return None
df = pd.DataFrame(data, columns=["trading_date", "order_book_id"] + fields)
if expect_df:
df.sort_values(["order_book_id", "trading_date"], inplace=True)
df.set_index(["order_book_id", "trading_date"], inplace=True)
return df
df = df.set_index(["trading_date", "order_book_id"])
df = df.to_panel()
df.major_axis.name = None
df.minor_axis.name = None
if len(order_book_ids) == 1:
df = df.minor_xs(order_book_ids[0])
if len(fields) == 1:
df = df[fields[0]]
if len(order_book_ids) != 1 and len(fields) != 1:
warnings.warn("Panel is removed after pandas version 0.25.0."
" the default value of 'expect_df' will change to True in the future.")
return df
MARGIN_FIELDS = (
"margin_balance",
"buy_on_margin_value",
"short_sell_quantity",
"margin_repayment",
"short_balance_quantity",
"short_repayment_quantity",
"short_balance",
"total_balance",
)
MARGIN_SUMMARY_MAP = {"SH": "XSHG", "XSHG": "XSHG", "SZ": "XSHE", "XSHE": "XSHE"}
@export_as_api
def get_securities_margin(
order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market="cn"
):
"""获取股票融资融券数据
:param order_book_ids: 股票代码或代码列表
:param start_date: 开始时间,支持 str, date, datetime, pandasTimestamp
默认为 end_date 之前一个月 (Default value = None)
:param end_date: 结束时间 默认为当前日期前一天 (Default value = None)
:param fields: str 或 list 类型. 默认为 None, 返回所有字段。可选字段包括:
today, week, month, three_month, six_month, year, current_year, total
(Default value = None)
:param expect_df: 返回 MultiIndex DataFrame (Default value = False)
:param market: 地区代码, 如: 'cn' (Default value = "cn")
:returns: 如果传入多个股票代码,且 fields 为多个或者 None,返回 pandas.Panel
如果传入一只股票或者 fields 为单个字段,则返回 pandas.DataFrame
如果传入的股票代码和字段数都是1,则返回 pandas.Series
"""
order_book_ids = ensure_list_of_string(order_book_ids, "order_book_ids")
all_list = []
for order_book_id in order_book_ids:
if order_book_id.upper() in MARGIN_SUMMARY_MAP:
all_list.append(MARGIN_SUMMARY_MAP[order_book_id.upper()])
else:
inst = instruments(order_book_id, market)
if inst.type in ["CS", "ETF", "LOF"]:
all_list.append(inst.order_book_id)
else:
warnings.warn("{} is not stock, ETF, or LOF.".format(order_book_id))
order_book_ids = all_list
if not order_book_ids:
raise ValueError("no valid securities in {}".format(order_book_ids))
if fields is None:
fields = list(MARGIN_FIELDS)
else:
fields = ensure_list_of_string(fields, "fields")
check_items_in_container(fields, MARGIN_FIELDS, "fields")
fields = ensure_order(fields, MARGIN_FIELDS)
start_date, end_date = ensure_date_range(start_date, end_date)
if end_date > ensure_date_or_today_int(None):
end_date = ensure_date_or_today_int(get_previous_trading_date(datetime.date.today()))
trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market))
data = get_client().execute(
"get_securities_margin", order_book_ids, start_date, end_date, market=market
)
if not data:
return
if expect_df:
df = pd.DataFrame(data)
df.sort_values(["order_book_id", "date"], inplace=True)
df.set_index(["order_book_id", "date"], inplace=True)
df = df.reindex(columns=fields)
return df
pl = pd.Panel(items=fields, major_axis=trading_dates, minor_axis=order_book_ids)
for r in data:
for field in fields:
value = r.get(field)
pl.at[field, r["date"], r["order_book_id"]] = value
if len(order_book_ids) == 1:
pl = pl.minor_xs(order_book_ids[0])
if len(fields) == 1:
pl = pl[fields[0]]
if len(order_book_ids) != 1 and len(fields) != 1:
warnings.warn("Panel is removed after pandas version 0.25.0."
" the default value of 'expect_df' will change to True in the future.")
return pl
MARGIN_TYPE = ("stock", "cash")
EXCHANGE_TYPE = {"SZ": "XSHE", "sz": "XSHE", "xshe": "XSHE", "SH": "XSHG", "sh": "XSHG", "xshg": "XSHG"}
EXCHANGE_CONTENT = ["XSHE", "XSHG"]
@export_as_api
def get_margin_stocks(date=None, exchange=None, margin_type='stock', market="cn"):
"""获取融资融券信息
:param date: 查询日期,默认返回今天上一交易日,支持 str, timestamp, datetime 类型
:param exchange: 交易所信息,默认不填写则返回全部。
str类型,默认为 None,返回所有字段。可选字段包括:
'XSHE', 'sz' 代表深交所;'XSHG', 'sh' 代表上交所,不区分大小写
(Default value = None)
:param margin_type: 'stock' 代表融券卖出,'cash',代表融资买入,默认为'stock'
"""
if date:
date = ensure_date_int(date)
else:
date = get_previous_trading_date(datetime.date.today())
date = date.year * 10000 + date.month * 100 + date.day
if exchange is None:
exchange = EXCHANGE_CONTENT
else:
exchange = ensure_string(exchange, "exchange")
if exchange in EXCHANGE_TYPE:
exchange = EXCHANGE_TYPE[exchange]
check_items_in_container(exchange, EXCHANGE_CONTENT, "exchange")
exchange = [exchange]
margin_type = ensure_string(margin_type, "margin_type")
check_items_in_container(margin_type, MARGIN_TYPE, "margin_type")
data = get_client().execute(
"get_margin_stocks", date, exchange, margin_type, market=market
)
if not data:
return []
else:
return sorted(data)
share_fields = {
"total": "total_shares",
"circulation_a": "a_cir_shares",
"non_circulation_a": "a_non_cir_shares",
"total_a": "a_total_shares",
}
anti_fields = {v: k for k, v in share_fields.items()}
@export_as_api
@compatible_with_parm(name="country", value="cn", replace="market")
def get_shares(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market="cn"):
"""获取流通股本信息
:param order_book_ids: 股票名称
:param start_date: 开始日期, 如'2013-01-04' (Default value = None)
:param end_date: 结束日期,如'2014-01-04' (Default value = None)
:param fields: 如'total', 'circulation_a' (Default value = None)
:param expect_df: 返回 MultiIndex DataFrame (Default value = False)
:param market: 地区代码,如'cn' (Default value = "cn")
:returns: 返回一个DataFrame
"""
order_book_ids = ensure_order_book_ids(order_book_ids, market=market)
start_date, end_date = ensure_date_range(start_date, end_date)
if fields:
fields = ensure_list_of_string(fields, "fields")
if 'management_circulation' in fields:
fields.remove('management_circulation')
if fields:
warnings.warn("management_circulation is removed")
else:
raise ValueError("management_circulation is removed")
check_items_in_container(fields, set(share_fields), "fields")
fields = [share_fields[i] for i in fields]
else:
fields = list(share_fields.values())
all_shares = get_client().execute("get_shares", order_book_ids, fields, market=market)
if not all_shares:
return
dates = get_trading_dates_in_type(start_date, end_date, expect_type="datetime", market=market)
df = pd.DataFrame(all_shares)
unique = set(df.order_book_id)
for order_book_id in order_book_ids:
if order_book_id not in unique:
df = df.append(
{"order_book_id": order_book_id, "date": df.date.iloc[-1]}, ignore_index=True
)
df.set_index(["date", "order_book_id"], inplace=True)
df.sort_index(inplace=True)
df = df.unstack(level=1)
index = df.index.union(dates)
df = df.reindex(index)
df = df.fillna(method="ffill")
df = df.loc[list(dates)]
df = df.dropna(how="all")
df = df[fields]
if expect_df:
df = df.stack(1)
df.index.set_names(["date", "order_book_id"], inplace=True)
df = df.reorder_levels(["order_book_id", "date"]).sort_index()
df = df.rename(columns=anti_fields)
return df
pl = df.stack(1).to_panel()
pl.items = [anti_fields[i] for i in pl.items]
if len(order_book_ids) == 1:
pl = pl.minor_xs(order_book_ids[0])
if len(fields) == 1:
pl = pl[anti_fields[fields[0]]]
if len(order_book_ids) != 1 and len(fields) != 1:
warnings.warn("Panel is removed after pandas version 0.25.0."
" the default value of 'expect_df' will change to True in the future.")
return pl
|
from django.db import models
# Create your models here.
class Endpoint(models.Model):
"""
The Endpoint object represents ML API endpoints
Attributes:
name: The name of the endpoints, it will be used in API URL,
owner: The string with owner name,
created_at: The date when endpoint was created
"""
name = models.CharField(max_length=128)
owner = models.CharField(max_length=128)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
class MLAlgorithm(models.Model):
'''
The MLAlgorithm represent the ML algorithm object.
Attributes:
name: The name of the algorithm.
description: The short description of how the algorithm works.
code: The code of the algorithm.
version: The version of the algorithm similar to software versioning.
owner: The name of the owner.
created_at: The date when MLAlgorithm was added.
parent_endpoint: The reference to the Endpoint.
'''
name = models.CharField(max_length=128)
description = models.CharField(max_length=1000)
code = models.CharField(max_length=50000)
version = models.CharField(max_length=128)
owner = models.CharField(max_length=128)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
parent_endpoint = models.ForeignKey(Endpoint, on_delete=models.CASCADE)
class MLAlgorithmStatus(models.Model):
'''
The MLAlgorithmStatus represent status of the MLAlgorithm which can change during the time.
Attributes:
status: The status of algorithm in the endpoint. Can be: testing, staging, production, ab_testing.
active: The boolean flag which point to currently active status.
created_by: The name of creator.
created_at: The date of status creation.
parent_mlalgorithm: The reference to corresponding MLAlgorithm.
'''
status = models.CharField(max_length=128)
active = models.BooleanField()
created_by = models.CharField(max_length=128)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
parent_mlalgorithm = models.ForeignKey(MLAlgorithm, on_delete=models.CASCADE, related_name = "status")
class MLRequest(models.Model):
'''
The MLRequest will keep information about all requests to ML algorithms.
Attributes:
input_data: The input data to ML algorithm in JSON format.
full_response: The response of the ML algorithm.
response: The response of the ML algorithm in JSON format.
feedback: The feedback about the response in JSON format.
created_at: The date when request was created.
parent_mlalgorithm: The reference to MLAlgorithm used to compute response.
'''
input_data = models.CharField(max_length=10000)
full_response = models.CharField(max_length=10000)
response = models.CharField(max_length=10000)
feedback = models.CharField(max_length=10000, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
parent_mlalgorithm = models.ForeignKey(MLAlgorithm, on_delete=models.CASCADE)
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['MLP'] );
|
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from theano.sandbox.rng_mrg import MRG_RandomStreams
@treeano.register_node("randomized_relu")
class RandomizedReLUNode(treeano.NodeImpl):
"""
from "Empirical Evaluation of Rectified Activations in Convolutional
Network"
http://arxiv.org/abs/1505.00853
"""
hyperparameter_names = ("alpha_lower",
"alpha_upper",
"deterministic")
def compute_output(self, network, in_vw):
# gather hyperparameters
deterministic = network.find_hyperparameter(["deterministic"])
l = network.find_hyperparameter(["alpha_lower"],
3)
u = network.find_hyperparameter(["alpha_upper"],
8)
if deterministic:
negative_coefficient = 2.0 / (l + u)
else:
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
alphas = srng.uniform(size=in_vw.symbolic_shape(),
low=l,
high=u)
negative_coefficient = 1.0 / alphas
# return output
network.create_vw(
"default",
variable=treeano.utils.rectify(
in_vw.variable,
negative_coefficient=negative_coefficient),
shape=in_vw.shape,
tags={"output"},
)
@treeano.register_node("uniform_randomized_relu")
class UniformRandomizedReLUNode(treeano.NodeImpl):
"""
like RandomizedReLUNode, but instead of sampling from 1 / uniform(l, u),
sample from uniform(l, u)
"""
hyperparameter_names = ("alpha_lower",
"alpha_upper",
"deterministic")
def compute_output(self, network, in_vw):
# gather hyperparameters
deterministic = network.find_hyperparameter(["deterministic"])
l = network.find_hyperparameter(["alpha_lower"],
1 / 8.)
u = network.find_hyperparameter(["alpha_upper"],
1 / 3.)
if deterministic:
negative_coefficient = (l + u) / 2.
else:
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
negative_coefficient = srng.uniform(size=in_vw.symbolic_shape(),
low=l,
high=u)
# return output
network.create_vw(
"default",
variable=treeano.utils.rectify(
in_vw.variable,
negative_coefficient=negative_coefficient),
shape=in_vw.shape,
tags={"output"},
)
@treeano.register_node("random_walk_relu")
class RandomWalkReLUNode(treeano.NodeImpl):
"""
leaky ReLU node, where leak alpha changes randomly over time
"""
hyperparameter_names = ("step_size",
"initial_alpha",
"inits")
def compute_output(self, network, in_vw):
# gather hyperparameters
initial_alpha = network.find_hyperparameter(
["initial_alpha"],
0)
alpha = network.create_vw(
"alpha",
is_shared=True,
shape=(in_vw.shape[1],),
tags={"state"},
default_inits=[treeano.inits.ConstantInit(initial_alpha)],
).variable
pattern = ["x"] * in_vw.ndim
pattern[1] = 0
alpha_b = alpha.dimshuffle(*pattern)
# return output
network.create_vw(
"default",
variable=treeano.utils.rectify(in_vw.variable,
negative_coefficient=alpha_b),
shape=in_vw.shape,
tags={"output"},
)
def new_update_deltas(self, network):
alpha_vw = network.get_vw("alpha")
step_size = network.find_hyperparameter(["step_size"])
# NOTE: each MRG_RandomStreams has the same seed, so
# all nodes with the same shape end up with the same alphas
srng = MRG_RandomStreams()
steps = srng.uniform(size=alpha_vw.shape,
low=-step_size,
high=step_size)
# TODO clip value of alpha (to prevent it becoming linear)
return treeano.UpdateDeltas({alpha_vw.variable: steps})
|
import click
import psycopg2 as pg2
from flask import current_app, g
from flask.cli import with_appcontext
from psycopg2.extras import DictCursor
def get_db():
if 'db' not in g:
g.db = pg2.connect(
**current_app.config['DATABASE'],
)
g.db.cursor_factory = DictCursor
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
db.autocommit = True
cur = db.cursor()
with current_app.open_resource('schema.sql') as f:
cur.execute(f.read().decode('utf8'))
cur.close()
db.autocommit = False
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.sync.v1.service.sync_list.sync_list_item import SyncListItemList
from twilio.rest.sync.v1.service.sync_list.sync_list_permission import SyncListPermissionList
class SyncListList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid):
"""
Initialize the SyncListList
:param Version version: Version that contains the resource
:param service_sid: The unique SID identifier of the Service Instance that hosts this List object.
:returns: twilio.rest.sync.v1.service.sync_list.SyncListList
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListList
"""
super(SyncListList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, }
self._uri = '/Services/{service_sid}/Lists'.format(**self._solution)
def create(self, unique_name=values.unset, ttl=values.unset):
"""
Create a new SyncListInstance
:param unicode unique_name: Human-readable name for this list
:param unicode ttl: Time-to-live of this List in seconds, defaults to no expiration.
:returns: Newly created SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance
"""
data = values.of({'UniqueName': unique_name, 'Ttl': ttl, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams SyncListInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.sync_list.SyncListInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists SyncListInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.sync_list.SyncListInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SyncListInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SyncListPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SyncListInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SyncListPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a SyncListContext
:param sid: The sid
:returns: twilio.rest.sync.v1.service.sync_list.SyncListContext
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
"""
return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a SyncListContext
:param sid: The sid
:returns: twilio.rest.sync.v1.service.sync_list.SyncListContext
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
"""
return SyncListContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.SyncListList>'
class SyncListPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the SyncListPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The unique SID identifier of the Service Instance that hosts this List object.
:returns: twilio.rest.sync.v1.service.sync_list.SyncListPage
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListPage
"""
super(SyncListPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SyncListInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_list.SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance
"""
return SyncListInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.SyncListPage>'
class SyncListContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid, sid):
"""
Initialize the SyncListContext
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:param sid: The sid
:returns: twilio.rest.sync.v1.service.sync_list.SyncListContext
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
"""
super(SyncListContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'sid': sid, }
self._uri = '/Services/{service_sid}/Lists/{sid}'.format(**self._solution)
# Dependents
self._sync_list_items = None
self._sync_list_permissions = None
def fetch(self):
"""
Fetch a SyncListInstance
:returns: Fetched SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SyncListInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the SyncListInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, ttl=values.unset):
"""
Update the SyncListInstance
:param unicode ttl: Time-to-live of this List in seconds, defaults to no expiration.
:returns: Updated SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance
"""
data = values.of({'Ttl': ttl, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SyncListInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
@property
def sync_list_items(self):
"""
Access the sync_list_items
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
"""
if self._sync_list_items is None:
self._sync_list_items = SyncListItemList(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['sid'],
)
return self._sync_list_items
@property
def sync_list_permissions(self):
"""
Access the sync_list_permissions
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList
"""
if self._sync_list_permissions is None:
self._sync_list_permissions = SyncListPermissionList(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['sid'],
)
return self._sync_list_permissions
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.SyncListContext {}>'.format(context)
class SyncListInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, service_sid, sid=None):
"""
Initialize the SyncListInstance
:returns: twilio.rest.sync.v1.service.sync_list.SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance
"""
super(SyncListInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'unique_name': payload['unique_name'],
'account_sid': payload['account_sid'],
'service_sid': payload['service_sid'],
'url': payload['url'],
'links': payload['links'],
'revision': payload['revision'],
'date_expires': deserialize.iso8601_datetime(payload['date_expires']),
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'created_by': payload['created_by'],
}
# Context
self._context = None
self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListContext for this SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
"""
if self._context is None:
self._context = SyncListContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique 34-character SID identifier of the List.
:rtype: unicode
"""
return self._properties['sid']
@property
def unique_name(self):
"""
:returns: The unique and addressable name of this List.
:rtype: unicode
"""
return self._properties['unique_name']
@property
def account_sid(self):
"""
:returns: The unique SID identifier of the Twilio Account.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The unique SID identifier of the Service Instance that hosts this List object.
:rtype: unicode
"""
return self._properties['service_sid']
@property
def url(self):
"""
:returns: The absolute URL for this List.
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: A dictionary of URL links to nested resources of this List.
:rtype: unicode
"""
return self._properties['links']
@property
def revision(self):
"""
:returns: Contains the current revision of this List, represented by a string identifier.
:rtype: unicode
"""
return self._properties['revision']
@property
def date_expires(self):
"""
:returns: Contains the date this List expires and gets deleted automatically.
:rtype: datetime
"""
return self._properties['date_expires']
@property
def date_created(self):
"""
:returns: The date this List was created, given in UTC ISO 8601 format.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: Specifies the date this List was last updated, given in UTC ISO 8601 format.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def created_by(self):
"""
:returns: The identity of the List creator.
:rtype: unicode
"""
return self._properties['created_by']
def fetch(self):
"""
Fetch a SyncListInstance
:returns: Fetched SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the SyncListInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, ttl=values.unset):
"""
Update the SyncListInstance
:param unicode ttl: Time-to-live of this List in seconds, defaults to no expiration.
:returns: Updated SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListInstance
"""
return self._proxy.update(ttl=ttl, )
@property
def sync_list_items(self):
"""
Access the sync_list_items
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
"""
return self._proxy.sync_list_items
@property
def sync_list_permissions(self):
"""
Access the sync_list_permissions
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionList
"""
return self._proxy.sync_list_permissions
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.SyncListInstance {}>'.format(context)
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Phist(CMakePackage):
"""The Pipelined, Hybrid-parallel Iterative Solver Toolkit provides
implementations of and interfaces to block iterative solvers for sparse
linear and eigenvalue problems. In contrast to other libraries we support
multiple backends (e.g. Trilinos, PETSc and our own optimized kernels),
and interfaces in multiple languages such as C, C++, Fortran 2003 and
Python. PHIST has a clear focus on portability and hardware performance:
in particular support row-major storage of block vectors and using GPUs
(via the ghost library or Trilinos/Tpetra).
"""
homepage = "https://bitbucket.org/essex/phist/"
url = "https://bitbucket.org/essex/phist/get/phist-1.4.3.tar.gz"
git = "https://bitbucket.org/essex/phist/phist.git"
version('develop', branch='devel')
version('master', branch='master')
version('1.7.2', sha256='29b504d78b5efd57b87d2ca6e20bc8a32b1ba55b40f5a5b7189cc0d28e43bcc0')
version('1.6.1', sha256='4ed4869f24f920a494aeae0f7d1d94fe9efce55ebe0d298a5948c9603e07994d')
version('1.6.0', '751f855230d6227b972b5ab7bce2c65f')
version('1.4.3', 'af3300378d4282366d148e38c3a3199a')
variant(name='kernel_lib', default='builtin',
description='select the kernel library (backend) for phist',
values=['builtin',
'epetra',
'tpetra',
'petsc',
'eigen',
'ghost'])
variant(name='outlev', default='2', values=['0', '1', '2', '3', '4', '5'],
description='verbosity. 0: errors 1: +warnings 2: +info '
'3: +verbose 4: +extreme 5; +debug')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('mpi', default=True,
description='enable/disable MPI (note that the kernel library may '
'not support this choice)')
variant('parmetis', default=False,
description='enable/disable ParMETIS partitioning (only actually '
'used with kernel_lib=builtin)')
variant('trilinos', default=False,
description='enable/disable Trilinos third-party libraries. '
'For all kernel_libs, we can use Belos and Anasazi '
'iterative solvers. For the Trilinos backends '
'(kernel_lib=epetra|tpetra) we can use preconditioner '
'packages such as Ifpack, Ifpack2 and ML.')
# ###################### Dependencies ##########################
depends_on('cmake@3.8:', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('python@3:', when='@1.7:', type='build')
depends_on('mpi', when='+mpi')
depends_on('trilinos+anasazi+belos+teuchos', when='+trilinos')
depends_on('trilinos@12:+tpetra', when='kernel_lib=tpetra')
# Epetra backend also works with older Trilinos versions
depends_on('trilinos+epetra', when='kernel_lib=epetra')
depends_on('petsc', when='kernel_lib=petsc')
depends_on('eigen', when='kernel_lib=eigen')
depends_on('ghost', when='kernel_lib=ghost')
depends_on('trilinos', when='+trilinos')
depends_on('parmetis ^metis+int64', when='+parmetis')
def cmake_args(self):
spec = self.spec
kernel_lib = spec.variants['kernel_lib'].value
outlev = spec.variants['outlev'].value
lapacke_libs = \
(spec['lapack:c'].libs + spec['blas:c'].libs).joined(';')
lapacke_include_dir = spec['lapack:c'].headers.directories[0]
args = ['-DPHIST_KERNEL_LIB=%s' % kernel_lib,
'-DPHIST_OUTLEV=%s' % outlev,
'-DTPL_LAPACKE_LIBRARIES=%s' % lapacke_libs,
'-DTPL_LAPACKE_INCLUDE_DIRS=%s' % lapacke_include_dir,
'-DPHIST_ENABLE_MPI:BOOL=%s'
% ('ON' if '+mpi' in spec else 'OFF'),
'-DBUILD_SHARED_LIBS:BOOL=%s'
% ('ON' if '+shared' in spec else 'OFF'),
'-DPHIST_USE_TRILINOS_TPLS:BOOL=%s'
% ('ON' if '+trilinos' in spec else 'OFF'),
'-DPHIST_USE_SOLVER_TPLS:BOOL=%s'
% ('ON' if '+trilinos' in spec else 'OFF'),
'-DPHIST_USE_PRECON_TPLS:BOOL=%s'
% ('ON' if '+trilinos' in spec else 'OFF'),
]
return args
@run_after('build')
@on_package_attributes(run_tests=True)
def check(self):
with working_dir(self.build_directory):
make("check")
@run_after('install')
@on_package_attributes(run_tests=True)
def test_install(self):
with working_dir(self.build_directory):
make("test_install")
|
#!/usr/bin/python
import sys
import os
if len(sys.argv) >= 3 :
filename = sys.argv[1]
refFlat_filename = sys.argv[2]
else:
print("usage: python exp_len.py refSeq_MLE_output.tab known.gpd")
print("or ./exp_len.py refSeq_MLE_output.tab known.gpd")
sys.exit(1)
################################################################################
file = open(filename,'r')
row_i = 3
dt = {}
for line in file:
ls=line.strip().split('\t')
dt[ ls[0] ] = ls[1]
file.close()
################################################################################
used_set=set()
ref=open(refFlat_filename,'r')
len_dt={}
for refline in ref:
refline_list=refline.strip().split()
exon_start_list=refline_list[9].strip(',').split(',')
exon_end_list=refline_list[10].strip(',').split(',')
L = 0
i=0
for start in exon_start_list:
start =int(start)
end = int(exon_end_list[i])
L += (end - start)
i += 1
if refline_list[1] in used_set:
continue
else:
used_set.add(refline_list[1])
if dt.has_key(refline_list[1]):
print refline_list[0] + "\t" + refline_list[1] + "\t" + str(L) + "\t" + str(dt[refline_list[1]])
else:
print refline_list[0] + "\t" + refline_list[1] + "\t" + str(L) + "\t" + "0"
################################################################################
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import operator
import numpy as np
import pytest
import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[
(ov.add, np.add),
(ov.divide, np.divide),
(ov.multiply, np.multiply),
(ov.subtract, np.subtract),
(ov.minimum, np.minimum),
(ov.maximum, np.maximum),
(ov.mod, np.mod),
(ov.equal, np.equal),
(ov.not_equal, np.not_equal),
(ov.greater, np.greater),
(ov.greater_equal, np.greater_equal),
(ov.less, np.less),
(ov.less_equal, np.less_equal),
],
)
def test_binary_op(ng_api_helper, numpy_function):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
parameter_b = ov.parameter(shape, name="B", dtype=np.float32)
model = ng_api_helper(parameter_a, parameter_b)
computation = runtime.computation(model, parameter_a, parameter_b)
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
result = computation(value_a, value_b)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[
(ov.add, np.add),
(ov.divide, np.divide),
(ov.multiply, np.multiply),
(ov.subtract, np.subtract),
(ov.minimum, np.minimum),
(ov.maximum, np.maximum),
(ov.mod, np.mod),
(ov.equal, np.equal),
(ov.not_equal, np.not_equal),
(ov.greater, np.greater),
(ov.greater_equal, np.greater_equal),
(ov.less, np.less),
(ov.less_equal, np.less_equal),
],
)
def test_binary_op_with_scalar(ng_api_helper, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = ng_api_helper(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
)
def test_binary_logical_op(ng_api_helper, numpy_function):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.bool)
parameter_b = ov.parameter(shape, name="B", dtype=np.bool)
model = ng_api_helper(parameter_a, parameter_b)
computation = runtime.computation(model, parameter_a, parameter_b)
value_a = np.array([[True, False], [False, True]], dtype=np.bool)
value_b = np.array([[False, True], [False, True]], dtype=np.bool)
result = computation(value_a, value_b)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"ng_api_helper,numpy_function",
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
)
def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function):
runtime = get_runtime()
value_a = np.array([[True, False], [False, True]], dtype=np.bool)
value_b = np.array([[False, True], [False, True]], dtype=np.bool)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.bool)
model = ng_api_helper(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"operator,numpy_function",
[
(operator.add, np.add),
(operator.sub, np.subtract),
(operator.mul, np.multiply),
(operator.truediv, np.divide),
(operator.eq, np.equal),
(operator.ne, np.not_equal),
(operator.gt, np.greater),
(operator.ge, np.greater_equal),
(operator.lt, np.less),
(operator.le, np.less_equal),
],
)
def test_binary_operators(operator, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[4, 5], [1, 7]], dtype=np.float32)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = operator(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"operator,numpy_function",
[
(operator.add, np.add),
(operator.sub, np.subtract),
(operator.mul, np.multiply),
(operator.truediv, np.divide),
(operator.eq, np.equal),
(operator.ne, np.not_equal),
(operator.gt, np.greater),
(operator.ge, np.greater_equal),
(operator.lt, np.less),
(operator.le, np.less_equal),
],
)
def test_binary_operators_with_scalar(operator, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = operator(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
def test_multiply():
A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1))
B = np.arange(35, dtype=np.int32).reshape((7, 1, 5))
expected = np.multiply(A, B)
result = run_op_node([A, B], ov.multiply)
assert np.allclose(result, expected)
def test_power_v1():
A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1))
B = np.arange(20, dtype=np.float32).reshape((4, 1, 5))
expected = np.power(A, B)
result = run_op_node([A, B], ov.power)
assert np.allclose(result, expected)
|
#!/usr/bin/python3
import glob
import re
lgs=open("locallanguages.txt").read().split('\n')
terms=open("localsubjectterms.txt").read().split('\n')[::-1]#reverse to avoid double indexing
print("found %i language names for autoindexing" % len(lgs))
print("found %i subject terms for autoindexing" % len(terms))
files = glob.glob('chapters/*tex')
for f in files:
print("indexing %s" % f)
c = open(f).read()
for lg in lgs:
lg = lg.strip()
if lg == '':
continue
c = re.sub('(?<!ili{)%s(?![\w}])'%lg, '\ili{%s}'%lg, c)
for term in terms:
term = term.strip()
if term == '':
continue
c = re.sub('(?<!isi{)%s(?![\w}])'%term, '\isi{%s}'%term, c)
nlg = len(re.findall('\\ili{',c))
nt = len(re.findall('\\isi{',c))
outfile = open(f.replace('chapters','indexed'), 'w')
outfile.write(c)
outfile.close()
print(" %s now contains %i indexed languages and %i indexed subject terms"%(f.split('/')[-1],nlg,nt))
print("indexed files are in the folder 'indexed'")
|
# Copyright (C) 2016 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Comment and uncomment lines.
Classes:
CommentPrefix
"""
import re
from mir.qualia.indent import common_indent
class CommentPrefix:
r"""Comments and uncomments lines, given a prefix.
>>> prefix = CommentPrefix('#')
>>> prefix.uncomment(['#export EDITOR=vi\n'])
['export EDITOR=vi\n']
>>> prefix.comment(['export EDITOR=vi\n'])
['#export EDITOR=vi\n']
>>> prefix.is_commented(['export EDITOR=vi\n'])
False
Do not modify the comment_prefix attribute on an instance.
"""
def __init__(self, comment_prefix):
self._comment_prefix = comment_prefix
self._prefix_pattern = re.compile(
fr'^(?P<indent>\s*){re.escape(comment_prefix)}')
def __repr__(self):
cls = type(self).__qualname__
return f'{cls}({self._comment_prefix!r})'
def is_commented(self, lines):
"""Return True if all lines are commented."""
pattern = self._prefix_pattern
return all(pattern.search(line) for line in lines)
def uncomment(self, lines):
r"""Uncomment a sequence of lines.
This will keep uncommenting so long as the lines are all commented.
This is so that uncommenting is an idempotent operation.
>>> prefix = CommentPrefix('#')
>>> prefix.uncomment(['##foo\n', '##bar\n'])
['foo\n', 'bar\n']
>>> prefix.uncomment(prefix.uncomment(['##foo\n', '##bar\n']))
['foo\n', 'bar\n']
In almost all cases, this is desired behavior, but if you need to
preserve levels of commenting, include a line to protect them:
>>> prefix = CommentPrefix('#')
>>> prefix.uncomment(['##foo\n', '##bar\n', '#\n'])
['#foo\n', '#bar\n', '\n']
"""
if not lines:
return []
while self.is_commented(lines):
lines = self._force_uncomment(lines)
return lines
def _force_uncomment(self, lines):
"""Unconditionally uncomment a sequence of lines once."""
return [self._prefix_pattern.sub(r'\g<indent>', line)
for line in lines]
def comment(self, lines):
"""Comment a sequence of lines."""
if not self.is_commented(lines):
return self._force_comment(lines)
return lines
def _force_comment(self, lines):
"""Unconditionally comment a sequence of lines."""
indent = common_indent(lines)
indent_len = len(indent)
prefix = self._comment_prefix
return [f'{indent}{prefix}{line[indent_len:]}' for line in lines]
|
# -*- coding: utf-8 -*-
from .processor import QueryProcessor
class MySqlQueryProcessor(QueryProcessor):
def process_insert_get_id(self, query, sql, values, sequence=None):
"""
Process an "insert get ID" query.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param sql: The sql query to execute
:type sql: str
:param values: The value bindings
:type values: list
:param sequence: The ids sequence
:type sequence: str
:return: The inserted row id
:rtype: int
"""
if not query.get_connection().transaction_level():
with query.get_connection().transaction():
query.get_connection().insert(sql, values)
cursor = query.get_connection().get_cursor()
if hasattr(cursor, 'lastrowid'):
id = cursor.lastrowid
else:
id = query.get_connection().statement('SELECT LAST_INSERT_ID()')
else:
query.get_connection().insert(sql, values)
cursor = query.get_connection().get_cursor()
if hasattr(cursor, 'lastrowid'):
id = cursor.lastrowid
else:
id = query.get_connection().statement('SELECT LAST_INSERT_ID()')
if isinstance(id, int):
return id
if str(id).isdigit():
return int(id)
return id
def process_column_listing(self, results):
"""
Process the results of a column listing query
:param results: The query results
:type results: dict
:return: The processed results
:return: dict
"""
return map(lambda x: x['column_name'], results)
|
from simmate.toolkit.creators.vector.uniform_distribution import (
UniformlyDistributedVectors,
)
from simmate.toolkit.creators.vector.normal_distribution import (
NormallyDistributedVectors,
)
|
import asyncio
from asyncio import Future
from asyncio.tasks import ensure_future
from functools import partial
from prompt_toolkit.application.current import get_app
from prompt_toolkit.layout.containers import HSplit
from prompt_toolkit.layout.dimension import D
from prompt_toolkit.widgets import Button, Label, ProgressBar
from prompt_toolkit.widgets.dialogs import Dialog
class ProgressDialog:
"""Dialog showing a progress bar, with an optional Cancel button."""
def __init__(self, title, run_callback, show_cancel=True):
"""Creates a dialog object which will show a dialog with a progress bar
and an optional cancel button.
Arguments:
- `title`: Title for the dialog box
- `run_callback`: Function to be called to do the actual work. This must be a
normal, non-async function. It must take two keyword arguments: set_percentage
and is_cancelled. When the function is called, two separate functions will
be passed in as those two arguments. The set_percentage argument can be called
with a number between 0 and 100 to set the progress bar to that value, and the
is_cancelled function will return True if the cancel button has been pressed.
The function given will be called with those two arguments only, if other
arguments need passing then use functools.partial to pass them. The function
must be thread-safe, as it is called in a separate thread.
- `show_cancel`: Whether to show a cancel button or not (boolean, default True)
"""
self.future = Future()
def set_cancelled():
self.cancelled = True
self.future.set_result(None)
cancel_button = Button(text="Cancel", handler=(lambda: set_cancelled()))
self.progressbar = ProgressBar()
self.progressbar.percentage = 0
self.run_callback = run_callback
self.cancelled = False
self.dialog = Dialog(
title=title,
body=HSplit([Label(text="In progress..."), self.progressbar]),
buttons=[cancel_button] if show_cancel else [],
width=D(preferred=80),
modal=True,
)
async def coroutine():
# This runs the run_callback function in a separate thread
# but as part of the asyncio loop, so the GUI can still update
# while a potentially-blocking function runs in the background
try:
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(
None,
partial(
self.run_callback,
set_percentage=self.set_percentage,
is_cancelled=self.is_cancelled,
),
)
self.future.set_result(result)
except Exception as e:
try:
self.future.set_result(e)
except asyncio.InvalidStateError:
pass
ensure_future(coroutine())
def set_percentage(self, value: int) -> None:
self.progressbar.percentage = int(value)
# Refresh the GUI
app = get_app()
app.invalidate()
def is_cancelled(self):
return self.cancelled
def __pt_container__(self):
return self.dialog
|
import json
class Configuration():
# class to organize Netlogo simulation parameters
def __init__(self):
# costants
self.constants={
'strategy?' : 3,
'drone.radius': 0.2,
'drone.speedMax': 8.5,
'drone.cruisingSpeed': 2,
'drone.acceleration': 2,
'drone.deceleration': -2,
'drone.velocityAngularMax': 2.6,
'drone.accelerationAng': 7,
'drone.decelerationAng': -7,
'drone.endurance': 24,
'sensing.radius': 2.5,
'sensing.angle': 360,
'rectangleBase': 5, #sensingBase
'rectangleHeight': 4, #sensingHeight
'drone.reachable.radius': 4,
'drone.reachable.angle': 360,
'drone.collision.vision': 6,
'drone.sight.angleMax': 60,
'drone.collision.gapAngle': 20
}
#configuration parameters
self.parameters={
'strategy?' : 3,
'drone.radius': 0.2,
'drone.speedMax': 8.5,
'drone.cruisingSpeed': 2,
'drone.acceleration': 2,
'drone.deceleration': -2,
'drone.velocityAngularMax': 2.6,
'drone.accelerationAng': 7,
'drone.decelerationAng': -7,
'drone.endurance': 24,
'sensing.radius': 2.5,
'sensing.angle': 360,
'rectangleBase': 5, #sensingBase
'rectangleHeight': 4, #sensingHeight
'drone.reachable.radius': 4,
'drone.reachable.angle': 360,
'drone.collision.vision': 6,
'drone.sight.angleMax': 60,
'drone.collision.gapAngle': 20,
'mark.radiusTop': 8,
'mark.radiusDown': 18,
'track.evapRate': 0.16,
'olfactoryHabituation': 22,
'drone.flocking.angle': 42,
'drone.flocking.wiggleVar': 14,
'drone.flocking.radiusSeparate': 15,
'drone.flocking.maxSeparateTurn': 33,
'drone.flocking.radiusAlign': 19,
'drone.flocking.maxAlignTurn': 33,
'drone.flocking.radiusCohere': 21,
'drone.flocking.maxCohereTurn': 24
}
#boundaries of parameters
self.paramBoundaries={
'mark.radiusTop': (1,13),
'mark.radiusDown': (13,19),
'track.evapRate': (0.01,0.2),
'olfactoryHabituation': (1,100),
'drone.flocking.angle': (15,45),
'drone.flocking.wiggleVar': (5,15),
'drone.flocking.radiusSeparate': (6,16),
'drone.flocking.maxSeparateTurn': (30,45),
'drone.flocking.radiusAlign': (16,22),
'drone.flocking.maxAlignTurn': (30,45),
'drone.flocking.radiusCohere': (18,26),
'drone.flocking.maxCohereTurn': (15,30)
}
# print parameters and boundaries
def showParameters(self):
for key,value in self.parameters.items():
if key in self.paramBoundaries:
bounds=self.paramBoundaries[key]
print( key,' =',value,' | bounds= ',bounds)
else:
print( key,' =',value,' | bounds= const value')
# create list for differential_evolution algorythm
def createBoundsList(self):
bounds=[]
for key,value in self.paramBoundaries.items():
bounds.append(value)
return bounds
# name passed as 'name'
def addParameter(self,name,value,min_bounder,max_bounder):
self.parameters[name]=value
self.paramBoundaries[name]=(min_bounder,max_bounder)
#remove parameter
def removeParameter(self,name):
del self.parameters[name]
del self.paramBoundaries[name]
print('removed ' + ' '
+ name
+ ' : ' + str(self.parameters[name])
+ ', bounds = ' + str(self.paramBoundaries[name])
)
# set parameters value from a specified array
# the order of values in the array must
# be the same of Configuration.parameters
def refreshConfiguration(self,x):
count=0
for key,value in self.paramBoundaries.items():
self.parameters[key]=x[count]
count+=1
for key,value in self.constants.items():
self.parameters[key]=self.constants[key]
print('saved new configuration!')
# save parameters to JSON file
def save_toFile(self):
filename='optimized_parameters.json'
with open(filename,'w') as f_obj:
json.dump(self.parameters,f_obj)
print('saved optimized parameters to file!')
# load parameters from JSON file
def loadParameters_fromFile(self):
filename='optimized_parameters.json'
try:
with open(filename) as f_obj:
self.parameters=json.load(f_obj)
except FileNotFoundError:
print('file not found!')
else:
print('loaded parameters from file!')
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Synmock'
copyright = '2020, Ben Granett'
author = 'Ben Granett'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon',
'sphinx.ext.githubpages', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
"""Return the squared distance beetween the intersection of a and b."""
from .intersection_nth_variation import intersection_nth_variation
from typing import Dict
def intersection_squared_variation(a: Dict, b: Dict, overlap: bool=False)->float:
"""Return the squared distance beetween the intersection of a and b."""
return intersection_nth_variation(a, b, 2, overlap)
|
class SecurityFlavor(basestring):
"""
any|none|never|krb5|ntlm|sys
Possible values:
<ul>
<li> "any" - Any,
<li> "none" - Anonymous Access Allowed If Security Type
Not Already Listed,
<li> "never" - Never,
<li> "krb5" - Kerberos 5 Authentication,
<li> "ntlm" - CIFS NTLM,
<li> "sys" - NFS AUTH_SYS,
<li> "spinauth" - SpinAuth
</ul>
"""
@staticmethod
def get_api_name():
return "security-flavor"
|
#Weather
#Functions TODO
# precip accumilation works well hourly
# sign up for storm alert per IKON or EPIC resort
# timer to check the 3 day for storms
# highest winter in state
from datetime import datetime, timedelta
from dateutil import tz
import discord
import googlemaps
import aiohttp
import asyncio
from PIL import Image, ImageDraw, ImageFont
client = discord.Client()
#Keys
gmaps_key = ''
api_key = ''
gmaps = googlemaps.Client(key=gmaps_key)
#Coordinates
latitude = 0
longitude = 0
#URLs
api_url = 'https://api.darksky.net/forecast/'
excludeExceptHourly = "currently,minutely,daily"
excludeExceptDaily = "currently,hourly,minutely"
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
#help()
#func: Takes message author mentionable string and returns a list of commands with an @ author
#param: author: mentionable string for the author of the message
def help(author):
return author + "\n __**Command List:**__ \n **!help:** Displays list of commands \n **!current location:** Displays hourly weather for specified location \n **!forecast location:** Displays 5 day forecast for specified location"
###
### Helper Functions
###
#get_url()
#func: Recieves the message content and exclusion parameter and splits the string, takes second string and any after as location. Inputs into geocoder to gather coordinates and formatted address
#params: message: string contents of message sent, "$cw location", exclude: string that inputs which data to exclude in API JSON request
#returns URL and Location
def get_url(message, exclude):
temp = message.split()
if len(temp) > 2:
count = 1
location = ""
while count < len(temp):
location = location + " " + temp[count]
count = count + 1
#if out of range
else:
try:
location = temp[1]
except IndexError:
return "Index Error", None
geocode_result = gmaps.geocode(location)
#if bad input
if not geocode_result:
return "Input Error", None
latitude = geocode_result[0]["geometry"]["location"]['lat']
longitude = geocode_result[0]["geometry"]["location"]['lng']
location = geocode_result[0]["formatted_address"]
# print(geocode_result[0]["geometry"]["location"])
url = api_url + str(api_key) + "/" + str(latitude) + "," + str(longitude) + "?units=us&exclude=" + exclude
return url, location
#time_zone_util()
#func: Recieves time in UTC and timezone and converts time to specified time zone, returns new time's hour in 12 hour format and either AM or PM
def time_zone_util(time, time_zone):
to_zone = tz.gettz(time_zone)
new_time = int(time.astimezone(to_zone).strftime('%#I'))
am_pm = time.astimezone(to_zone).strftime('%p')
return new_time, am_pm
###
### Primary Functions
###
#currentWeather()
#func: recieves weather API JSON and the formatted address and fills list of data every 3 hours for a total of 12 hours. Creates image to display values
#params: JSON_data is weather API JSON, location is the formatted address for location
def currentWeather(json_data, location):
count = 0
temp, precipChance, precipType, precipIntensity, icon = [None] * 5, [None] * 5, [None] * 5, [None] * 5, [None] * 5
time = json_data["hourly"]["data"][0]["time"]
time_zone = json_data["timezone"]
#Loop goes through the JSON file and outputs the temperature and precip every 4 hours for 8 hours
while count < 5:
hours = 3*count
summary = json_data["hourly"]["summary"]
temp[count]= round(json_data["hourly"]["data"][hours]["temperature"])
icon[count] = json_data["hourly"]["data"][hours]["icon"]
if(icon[count] == "clear-day"):
icon[count] = "clear_day"
if (icon[count] == "clear-night"):
icon[count] = "clear_night"
if (icon[count] == "partly-cloudy-day"):
icon[count] = "partly_cloudy_day"
if (icon[count] == "partly-cloudy-night"):
icon[count] = "partly_cloudy_night"
precipChance[count] = "{:.0%}".format(json_data["hourly"]["data"][hours]["precipProbability"])
if precipChance[count] != "0%" and precipChance[count] != "1%" and precipChance[count] != "2%" and precipChance[count] != "3%" and precipChance[count] != "4%":
precipType[count] = json_data["hourly"]["data"][hours]["precipType"]
precipIntensity[count] = json_data["hourly"]["data"][hours]["precipIntensity"]
if precipType[count] != "snow" and precipIntensity[count] <= .01:
icon[count] = "drizzle"
if precipType[count] != "snow" and .3 <= precipIntensity[count]:
icon[count] = "storm"
count = count + 1
img = Image.new('RGB', (1050, 375), color='white')
#Declare fonts
title_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 50)
location_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 34)
summary_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 21)
time_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 31)
degree_font = ImageFont.truetype('Lib/Fonts/FiraSans-SemiBold.ttf', 34)
precip_font = ImageFont.truetype('Lib/Fonts/FiraSans-Bold.ttf', 24)
precip_value_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 24)
#Icons
clear_day = Image.open('Lib/Icons/Sun.jpg')
clear_night = Image.open('Lib/Icons/Moon.jpg')
rain = Image.open('Lib/Icons/Cloud-Rain.jpg')
partly_cloudy_day = Image.open('Lib/Icons/Cloud-Sun.jpg')
partly_cloudy_night = Image.open('Lib/Icons/Cloud-Moon.jpg')
cloudy = Image.open('Lib/Icons/Cloud.jpg')
snow = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg')
sleet = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg')
wind = Image.open('Lib/Icons/Cloud-Wind.jpg')
fog = Image.open('Lib/Icons/Cloud-Fog-Alt.jpg')
drizzle = Image.open('Lib/Icons/Cloud-Drizzle.jpg')
storm = Image.open('Lib/Icons/Cloud-Lightning.jpg')
#Title + Subtitle
d = ImageDraw.Draw(img)
d.text((35, 11), "Hourly Forecast", font=title_font, fill='black')
d.text((400, 26), location, font=location_font, fill='black')
d.text((35, 68), summary, font=summary_font, fill='black')
# Rectangle
d.rectangle([(24, 96), (218, 352)], fill=(214, 214, 214), outline=None)
d.rectangle([(226, 96), (420, 352)], fill=(214, 214, 214), outline=None)
d.rectangle([(427, 96), (621, 352)], fill=(214, 214, 214), outline=None)
d.rectangle([(629, 96), (823, 352)], fill=(214, 214, 214), outline=None)
d.rectangle([(830, 96), (1024, 352)], fill=(214, 214, 214), outline=None)
# Time
from_zone = tz.gettz('UTC')
utc = datetime.utcnow()
time_utc = utc.replace(tzinfo = from_zone)
time_hour1, am_pm1 = time_zone_util(time_utc, time_zone)
time_hour2,am_pm2 = time_zone_util(time_utc + timedelta(hours=3), time_zone)
time_hour3,am_pm3 = time_zone_util(time_utc + timedelta(hours=6),time_zone)
time_hour4,am_pm4 = time_zone_util(time_utc + timedelta(hours=9),time_zone)
time_hour5,am_pm5 = time_zone_util(time_utc + timedelta(hours=12),time_zone)
# Time Width
time_width, trash = d.textsize(str(time_hour1)+ am_pm1, font=time_font)
time_width2, trash = d.textsize(str(time_hour2)+ am_pm2, font=time_font)
time_width3, trash = d.textsize(str(time_hour3)+ am_pm3, font=time_font)
time_width4, trash = d.textsize(str(time_hour4)+ am_pm4, font=time_font)
time_width5, trash = d.textsize(str(time_hour5)+ am_pm5, font=time_font)
# Time input
d.text((((194 - time_width) / 2) + 24, 105), str(time_hour1) + am_pm1, font=time_font, fill="black")
d.text((((194 - time_width2) / 2) + 226, 105), str(time_hour2) + am_pm2, font=time_font, fill="black")
d.text((((194 - time_width3) / 2) + 427, 105), str(time_hour3) + am_pm3, font=time_font, fill="black")
d.text((((194 - time_width4) / 2) + 629, 105), str(time_hour4) + am_pm4, font=time_font, fill="black")
d.text((((194 - time_width5) / 2) + 830, 105), str(time_hour5) + am_pm5, font=time_font, fill="black")
# Icon
img.paste(eval(icon[0]), (59, 147))
img.paste(eval(icon[1]), (261, 147))
img.paste(eval(icon[2]), (462, 147))
img.paste(eval(icon[3]), (664, 147))
img.paste(eval(icon[4]), (865, 147))
# Degree Text Width
temp_holder = str(str(temp[0]) + u"\u00b0" + "F")
temp_width, throwaway = d.textsize(temp_holder, font=degree_font)
# Degree
d.text((((194 - temp_width) / 2) + 24, 263), str(temp[0]) + u"\u00b0" + "F",font=degree_font, fill="black")
d.text((((194 - temp_width) / 2) + 226, 263), str(temp[1]) + u"\u00b0" + "F",font=degree_font, fill="black")
d.text((((194 - temp_width) / 2) + 427, 263), str(temp[2]) + u"\u00b0" + "F",font=degree_font, fill="black")
d.text((((194 - temp_width) / 2) + 629, 263), str(temp[3]) + u"\u00b0" + "F",font=degree_font, fill="black")
d.text((((194 - temp_width) / 2) + 830, 263), str(temp[4]) + u"\u00b0" + "F",font=degree_font, fill="black")
# Precip
d.text((61, 300), "Precip", font=precip_font, fill=(43, 43, 43))
d.text((263, 300), "Precip", font=precip_font, fill=(43, 43, 43))
d.text((465, 300), "Precip", font=precip_font, fill=(43, 43, 43))
d.text((666, 300), "Precip", font=precip_font, fill=(43, 43, 43))
d.text((867, 300), "Precip", font=precip_font, fill=(43, 43, 43))
# Precip Value
d.text((139, 300), str(precipChance[0]), font=precip_value_font, fill="black")
d.text((341, 300), str(precipChance[1]), font=precip_value_font, fill="black")
d.text((541, 300), str(precipChance[2]), font=precip_value_font, fill="black")
d.text((744, 300), str(precipChance[3]), font=precip_value_font, fill="black")
d.text((945, 300), str(precipChance[4]), font=precip_value_font, fill="black")
img.save("hourly_rendered_image.png")
return
#forecast()
#func: Recieves weather API JSON and the formatted address and fills list of data for every day for a total of 5 days. Creates image to display values
#param: json_data: weather data from API, location: formatted address of location
def forecast(json_data, location):
count = 0
#Loop goes through the JSON file and outputs the temperature and precip every 4 hours for 8 hours
icon, temp_high, temp_low, precipChance, precipType, precipIntensity = [None] * 5, [None] * 5, [None] * 5, [0] * 5, [None] * 5, [None] * 5
while count < 5:
hours = count
summary = json_data["daily"]["summary"]
temp_high[count] = round(json_data["daily"]["data"][hours]["temperatureHigh"])
temp_low[count] = round(json_data["daily"]["data"][hours]["temperatureLow"])
icon[count] = json_data["daily"]["data"][hours]["icon"]
if(icon[count] == "clear-day"):
icon[count] = "clear_day"
if (icon[count] == "clear-night"):
icon[count] = "clear_night"
if (icon[count] == "partly-cloudy-day"):
icon[count] = "partly_cloudy_day"
if (icon[count] == "partly-cloudy-night"):
icon[count] = "partly_cloudy_night"
precipChance[count] = "{:.0%}".format(json_data["daily"]["data"][hours]["precipProbability"])
#Below 4% rain type is not displayed
if precipChance[count] != "0%" and precipChance[count] != "1%" and precipChance[count] != "2%" and precipChance[count] != "3%" and precipChance[count] != "4%":
precipType[count] = json_data["daily"]["data"][hours]["precipType"]
precipIntensity[count] = json_data["daily"]["data"][hours]["precipIntensity"]
if precipType[count] != "snow" and precipIntensity[count] <= .01:
icon[count] = "drizzle"
if precipType[count] != "snow" and .3 <= precipIntensity[count]:
icon[count] = "storm"
count+=1
img = Image.new('RGB', (1050, 375), color='white')
#Declare fonts
title_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 50)
location_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 34)
summary_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 21)
day_font = ImageFont.truetype('Lib/Fonts/FiraSans-ExtraBold.ttf', 31)
degree_font = ImageFont.truetype('Lib/Fonts/FiraSans-SemiBold.ttf', 34)
precip_font = ImageFont.truetype('Lib/Fonts/FiraSans-Bold.ttf', 24)
precip_value_font = ImageFont.truetype('Lib/Fonts/FiraSans-Regular.ttf', 24)
#Day Values
day_of_week = datetime.today().weekday()
week = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
forecast_days = [None] * 5
#For Loop to get next 5 days
day_count = 0
for day_count in range(0,5):
forecast_days[day_count] = week[day_of_week]
day_of_week = day_of_week + 1
day_count = day_count + 1
if day_of_week == 7:
day_of_week = 0
#Icons
clear_day = Image.open('Lib/Icons/Sun.jpg')
clear_night = Image.open('Lib/Icons/Moon.jpg')
rain = Image.open('Lib/Icons/Cloud-Rain.jpg')
partly_cloudy_day = Image.open('Lib/Icons/Cloud-Sun.jpg')
partly_cloudy_night = Image.open('Lib/Icons/Cloud-Moon.jpg')
cloudy = Image.open('Lib/Icons/Cloud.jpg')
snow = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg')
sleet = Image.open('Lib/Icons/Cloud-Snow-Alt.jpg')
wind = Image.open('Lib/Icons/Cloud-Wind.jpg')
fog = Image.open('Lib/Icons/Cloud-Fog-Alt.jpg')
drizzle = Image.open('Lib/Icons/Cloud-Drizzle.jpg')
storm = Image.open('Lib/Icons/Cloud-Lightning.jpg')
#Title + Subtitle
d = ImageDraw.Draw(img)
d.text((35, 11), "5 Day Forecast", font=title_font, fill='black')
d.text((375, 26), location, font=location_font, fill='black')
d.text((35, 68), summary, font=summary_font, fill= 'black')
#Rectangle
d.rectangle([(24,96), (218,352)], fill = (214,214,214), outline=None)
d.rectangle([(226,96), (420,352)], fill = (214,214,214), outline=None)
d.rectangle([(427,96), (621,352)], fill = (214,214,214), outline=None)
d.rectangle([(629,96), (823,352)], fill = (214,214,214), outline=None)
d.rectangle([(830,96), (1024,352)], fill = (214,214,214), outline=None)
#Day of The Week Text Width
text_width, trash =d.textsize(forecast_days[0], font=day_font)
text_width2, trash =d.textsize(forecast_days[1], font=day_font)
text_width3, trash =d.textsize(forecast_days[2], font=day_font)
text_width4, trash =d.textsize(forecast_days[3], font=day_font)
text_width5, trash =d.textsize(forecast_days[4], font=day_font)
#Day of The Week
d.text((((194 - text_width) / 2) + 24, 105), forecast_days[0], font=day_font, fill= "black")
d.text((((194 - text_width2) / 2) + 226, 105), forecast_days[1], font=day_font, fill= "black")
d.text((((194 - text_width3) / 2) + 427, 105), forecast_days[2], font=day_font, fill= "black")
d.text((((194 - text_width4) / 2) + 629, 105), forecast_days[3], font=day_font, fill= "black")
d.text((((194 - text_width5) / 2) + 830, 105), forecast_days[4], font=day_font, fill= "black")
#Icon
img.paste(eval(icon[0]), (59, 147))
img.paste(eval(icon[1]), (261, 147))
img.paste(eval(icon[2]), (462, 147))
img.paste(eval(icon[3]), (664, 147))
img.paste(eval(icon[4]), (865, 147))
#Degree Text Width
temp_holder = str(temp_high[0]) + " - " + str(temp_low[0]) + u"\u00b0" + "F"
temp_width, throwaway = d.textsize(temp_holder, font=degree_font)
#Degree
d.text((((194 - temp_width) / 2) + 24, 263), str(temp_high[0]) + " - " + str(temp_low[0]) + u"\u00b0" + "F", font=degree_font, fill= "black")
d.text((((194 - temp_width) / 2) + 226, 263),str(temp_high[1]) + " - " + str(temp_low[1]) + u"\u00b0" + "F", font=degree_font, fill= "black")
d.text((((194 - temp_width) / 2) + 427, 263), str(temp_high[2]) + " - " + str(temp_low[2]) + u"\u00b0" + "F", font=degree_font, fill= "black")
d.text((((194 - temp_width) / 2) + 629, 263), str(temp_high[3]) + " - " + str(temp_low[3]) + u"\u00b0" + "F", font=degree_font, fill= "black")
d.text((((194 - temp_width) / 2) + 830, 263), str(temp_high[4]) + " - " + str(temp_low[4]) + u"\u00b0" + "F", font=degree_font, fill= "black")
#Precip
d.text((61, 300), "Precip", font=precip_font, fill= (43, 43, 43))
d.text((263, 300), "Precip", font=precip_font, fill= (43, 43, 43))
d.text((465, 300), "Precip", font=precip_font, fill= (43, 43, 43))
d.text((666, 300), "Precip", font=precip_font, fill= (43, 43, 43))
d.text((867, 300), "Precip", font=precip_font, fill= (43, 43, 43))
#Precip Value
d.text((139, 300), str(precipChance[0]), font=precip_value_font, fill= "black")
d.text((341, 300), str(precipChance[1]), font=precip_value_font, fill= "black")
d.text((541, 300), str(precipChance[2]), font=precip_value_font, fill= "black")
d.text((744, 300), str(precipChance[3]), font=precip_value_font, fill= "black")
d.text((945, 300), str(precipChance[4]), font=precip_value_font, fill= "black")
img.save("forecast_rendered_image.png")
return
#Event Function that activates different functions on command message
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('!help'):
output = help(message.author.mention)
await message.channel.send(output)
if message.content.startswith('!current'):
url, location = get_url(message.content, excludeExceptHourly)
print(url)
if url == "Index Error" or url == "Input Error":
if url == "Index Error":
await message.channel.send(message.author.mention + "\n**Error:** Incorrect format, ```!current location``` ")
if url == "Input Error":
await message.channel.send(message.author.mention + "\n**Error:** Invalid input, input name or address of location ```!current location``` ")
else:
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if r.status == 200:
json_data = await r.json()
print(await r.json())
output = currentWeather(json_data, location)
await message.channel.send(file=discord.File('hourly_rendered_image.png'))
if message.content.startswith('!forecast'):
url, location = get_url(message.content, excludeExceptDaily)
print(url)
if url == "Index Error" or url == "Input Error":
if url == "Index Error":
await message.channel.send(message.author.mention + "**\nError:** Incorrect format, ```!forecast location``` ")
if url == "Input Error":
await message.channel.send(message.author.mention + "**\nError:** Invalid input, input name or address of location ```!forecast location``` ")
else:
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if r.status == 200:
json_data = await r.json()
#print(await r.json())
output = forecast(json_data, location)
await message.channel.send(file=discord.File('forecast_rendered_image.png'))
client.run('.XRMUFw.-kdM')
|
from CmdBase import *
from PersistentModules import *
# Cmd
# turn left deg
# turn right deg
# turn to deg
# turn rate deg
class CmdRotate(CmdBase):
def __init__(self, controller, line, engage_object = None):
super().__init__(controller, line, engage_object)
def start(self):
self.mystate_module = self.get_persistent_module('mystate')
self.constants_module = self.get_persistent_module('constants')
self.intent_provider_module = self.get_persistent_module('intent_provider')
self.full_rate = 60
self.low_rate = 10
# Note here we get yaw in radians but later we set it in deg
pitch, roll, yaw = AirSimClientBase.toEulerianAngle(self.mystate_module.get_orientation())
#print("original yaw {0}".format(yaw))
if yaw < 0:
yaw = 2 * 3.14 + yaw
#print("updated yaw {0}".format(yaw))
if (self.line[1] in ['left', 'right', 'to']):
delta = float(self.line[2])*3.14/180
if self.line[1] == 'left':
self.full_rate *= -1
self.low_rate *= -1
yaw -= delta
elif self.line[1] == 'right':
yaw += delta
elif self.line[1] == 'to':
side = 1 # right side
if delta > yaw + 3.14 or (yaw - delta < 3.14 and yaw - delta > 0): # left side # consider current yaw is 0
side = -1
self.full_rate *= side
self.low_rate *= side
yaw = delta
#print("updated 2 yaw {0}".format(yaw))
if yaw > 3.14:
yaw = -2 * 3.14 + yaw
#print("final yaw {0}".format(yaw))
self.final_yaw = yaw
self.intent_provider_module.submit_intent(CmdRotate.__name__,
PModHIntents.ROTATE, [pitch, roll, yaw])
else: # rate
self.rate = float(self.line[2])
self.intent_provider_module.submit_intent(CmdRotate.__name__,
PModHIntents.ROTATE, [self.rate])
def update(self):
if self.line[1] in ['left', 'right', 'to']:
yaw = AirSimClientBase.toEulerianAngle(self.mystate_module.get_orientation())[2]
if yaw < 0:
yaw = 2 * 3.14 + yaw
# Check if movement is complete or < 0.1 angle distance, anyway thats offset
# dist to angle
dist = min(abs(self.final_yaw - yaw), 2 * 3.14 - abs(self.final_yaw - yaw))
#print('{0} {1} {2}'.format(self.final_yaw, yaw, dist))
if abs(dist) < 0.1:
self.get_client().hover()
self.intent_provider_module.mark_as_complete(CmdRotate.__name__)
if self.engage_object != None:
self.engage_object.mark_done()
return True
# Note that this call is cancellable if other movement related call is called
if abs(dist) < 0.5:
self.get_client().rotateByYawRate(self.low_rate, 0.5) # note that this fun uses in degrees (inconsistency)
else: # on full rate
self.get_client().rotateByYawRate(self.full_rate, 0.5) # note that this fun uses in degrees (inconsistency)
return False
else: # Rate
self.get_client().rotateByYawRate(self.rate, 0.5)
# Update other can_process
def can_process(line):
try:
if line[0] in ['turn'] and line[1] in ['left', 'right', 'to', 'rate'] and type(float(line[2])) is float:
return True
return False
except: # some error only if command not proper
return False
|
import pytest
import numpy as np
from numpy import isclose
from numpy.random import RandomState
from cascade_at.model.priors import (
Constant,
Gaussian,
Uniform,
Laplace,
StudentsT,
LogGaussian,
LogLaplace,
LogStudentsT,
PriorError,
)
def test_happy_construction():
Uniform(-1, 1, 0, name="test")
Uniform(-1, 1, 0, 0.5, name="test")
Gaussian(0, 1, -10, 10, name="test2")
Gaussian(0, 1, -10, 10, 0.5, name="test2")
Laplace(0, 1, -10, 10, name="test3")
Laplace(0, 1, -10, 10, 0.5, name="test3")
StudentsT(0, 1, 2.5, -10, 10, name="test4")
LogGaussian(0, 1, 0.5, -10, 10, name="test5")
LogLaplace(0, 1, 0.5, -10, 10, name="test6")
LogStudentsT(0, 1, 2.5, 0.5, -10, 10, name="test7")
def test_prior_equality():
a = Gaussian(0, 1)
b = Gaussian(0, 1)
assert a == b
a = Gaussian(0, 1, -1, 1)
b = Gaussian(0, 1, -1, 1)
assert a == b
a = Uniform(0, 10)
b = Uniform(0, 10)
assert a == b
a = Uniform(0, 10, name="test_prior")
b = Uniform(0, 10, name="test_prior")
assert a == b
def test_prior_nonequality():
a = Gaussian(0, 1)
b = Gaussian(1, 1)
assert a != b
a = Uniform(0, 1)
b = Uniform(-1, 0)
assert a != b
a = Gaussian(0, 1, name="test_prior")
b = Gaussian(0, 1, name="other_test_prior")
assert a != b
a = Gaussian(0, 1)
b = Uniform(0, 1)
assert a != b
def test_prior_sort():
priors = [
Uniform(lower=1e-10, upper=1, mean=5e-5, name="iota"),
Gaussian(0, 1, name="other_test_prior"),
Uniform(0, 1),
]
# NOTE: This is a weak test of actual sorting behavior however all I
# actually care about is that the sort is stable, I don't really care
# what the order is
assert sorted(priors) == sorted(reversed(priors))
def test_prior_hashing():
s = {Gaussian(0, 1), Uniform(0, 1), Gaussian(0, 1), Uniform(0, 2), Uniform(0, 1)}
assert len(s) == 3
assert Gaussian(0, 1) in s
assert Uniform(0, 10) not in s
def test_prior_hashing__near_miss():
assert hash(Gaussian(0, 1.0000000000000001)) == hash(Gaussian(0, 1))
assert hash(Gaussian(0, 1.000000000000001)) != hash(Gaussian(0, 1))
def test_bounds_check():
with pytest.raises(PriorError) as excinfo:
Uniform(0, -1, 1)
assert "Bounds are inconsistent" in str(excinfo.value)
def test_validate_standard_deviation():
with pytest.raises(PriorError) as excinfo:
Gaussian(0, -1)
assert "must be positive" in str(excinfo.value)
@pytest.mark.parametrize("bad_nu", [-1, -3, 0, 2, 1.99])
def test_validate_nu(bad_nu):
with pytest.raises(PriorError) as excinfo:
StudentsT(0, 1, bad_nu)
assert "must be greater" in str(excinfo.value)
@pytest.fixture
def rng():
return RandomState(34257234)
def test_const_fit():
"""A constant distribution is unchanged."""
dist = Constant(0.023)
assert isclose(dist.rvs(), 0.023)
assert isclose(dist.mle([6, 24, 327]).mean, 0.023)
def test_uniform_fit(rng):
dist = Uniform(-0.4, 0.6, 0.5)
draws = dist.rvs(size=10000, random_state=rng)
new_dist = dist.mle(draws)
assert isclose(new_dist.mean, 0.1, atol=0.01)
@pytest.mark.parametrize("cls,params", [
(Gaussian, (0.1, 1, -10, 10)),
(Gaussian, (0.1, 1, 0, 0.2)),
(Laplace, (0, 1, -10, 10)),
(StudentsT, (0, 1, 2.7, -10, 10)),
])
def test_mle(cls, params, rng):
dist = cls(*params)
draw_dist = dist
if hasattr(dist, "mean"):
draw_dist = draw_dist.assign(mean=0.1)
if hasattr(dist, "standard_deviation"):
draw_dist = draw_dist.assign(standard_deviation=0.04)
draws = draw_dist.rvs(size=10000, random_state=rng)
assert np.all((dist.lower <= draws) & (draws <= dist.upper))
new_dist = dist.mle(draws)
if hasattr(dist, "mean"):
assert isclose(new_dist.mean, 0.1, rtol=0.2)
if hasattr(dist, "standard_deviation"):
assert isclose(new_dist.standard_deviation, 0.04, rtol=0.2)
|
from torch.autograd import Variable
from models.proposal_target_layer_cascade import *
import torchvision.models as models
from models.proposal import *
#bocknet
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000,dropout_prob=0.2):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.dropout = nn.Dropout(p=dropout_prob)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.dropout(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class _fasterRCNN(nn.Module):
""" faster RCNN """
def __init__(self, classes, class_agnostic):
super(_fasterRCNN, self).__init__()
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
# loss
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
def forward(self, im_data, im_info, gt_boxes, num_boxes):
batch_size = im_data.size(0)
im_info = im_info.data
gt_boxes = gt_boxes.data
num_boxes = num_boxes.data
# feed image cfgs to base model to obtain base feature map
base_feat = self.RCNN_base(im_data)
# feed base feature map to RPN to obtain rois
rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
# if it is training phase, then use ground truth bboxes for refining
if self.training:
roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)
rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data
rois_label = Variable(rois_label.view(-1).long())
else:
rois_label = None
rpn_loss_cls = 0
rpn_loss_bbox = 0
rois = Variable(rois)
# do roi pooling based on predicted rois
pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))
# feed pooled features to top model
pooled_feat = self._head_to_tail(pooled_feat)
# compute bbox offset
bbox_pred = self.RCNN_bbox_pred(pooled_feat)
if self.training and not self.class_agnostic:
# select the corresponding columns according to roi labels
bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)
bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
bbox_pred = bbox_pred_select.squeeze(1)
# compute object classification probability
cls_score = self.RCNN_cls_score(pooled_feat)
cls_prob = F.softmax(cls_score, 1)
RCNN_loss_cls = 0
RCNN_loss_bbox = 0
cls_prob = cls_prob.view(batch_size, rois.size(1), -1)
bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)
return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
def create_architecture(self):
self._init_modules()
self._init_weights()
#
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Model(nn.Module):
def __init__(self, model_cfg='datanet.yaml', ch=3, nc=None):
super(Model, self).__init__()
if type(model_cfg) is dict:
self.md = model_cfg
else:
import yaml
with open(model_cfg) as f:
self.md = yaml.load(f, Loader=yaml.FullLoader)
if nc and nc != self.md['nc']:
print('Overriding %s nc=%g with nc=%g' % (model_cfg, self.md['nc'], nc))
self.md['nc'] = nc
self.model, self.save = BasicBlock(self.md, ch=[ch])
m = self.model[-1]
if isinstance(m, Detect):
s = 128
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))])
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases()
torch_utils.initialize_weights(self)
self._initialize_biases()
torch_utils.model_info(self)
print('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:]
s = [0.83, 0.67]
y = []
for i, xi in enumerate((x,
torch_utils.scale_img(x.flip(3), s[0]),
torch_utils.scale_img(x, s[1]),
)):
y.append(self.forward_once(xi)[0])
y[1][..., :4] /= s[0] # scale
y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr
y[2][..., :4] /= s[1] # scale
return torch.cat(y, 1), None
else:
return self.forward_once(x, profile)
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
try:
import thop
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
except:
o = 0
t = torch_utils.time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((torch_utils.time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
m = self.model[-1] # Detect() module
for f, s in zip(m.f, m.stride): # from
mi = self.model[f % m.i]
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for f in sorted([x % m.i for x in m.f]): # from
b = self.model[f].bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%g Conv2d.bias:' + '%10.3g' * 6) % (f, *b[:5].mean(1).tolist(), b[5:].mean()))
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ', end='')
for m in self.model.modules():
if type(m) is Conv:
m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv
m.bn = None # remove batchnorm
m.forward = m.fuseforward # update forward
torch_utils.model_info(self)
return self
def BasicBlock(runwget, ch):
anchors, nc, gd, gw = runwget['anchors'], runwget['nc'], runwget['depth_multiple'], runwget['width_multiple']
na = (len(anchors[0]) // 2) # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(runwget['backbone'] + runwget['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
c1, c2 = ch[f], args[0]
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
elif m is Detect:
f = f or list(reversed([(-1 if j == i else j - 1) for j, x in enumerate(ch) if x == no]))
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
class vgg16(_fasterRCNN):
def __init__(self, classes, pretrained=False, class_agnostic=False):
self.model_path = 'cfgs/pretrained_model/vgg16_caffe.pth'
self.dout_base_model = 512
self.pretrained = pretrained
self.class_agnostic = class_agnostic
_fasterRCNN.__init__(self, classes, class_agnostic)
def _init_modules(self):
vgg = models.vgg16()
if self.pretrained:
print("Loading pretrained weights from %s" % (self.model_path))
state_dict = torch.load(self.model_path)
vgg.load_state_dict({k: v for k, v in state_dict.items() if k in vgg.state_dict()})
vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])
self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])
for layer in range(10):
for p in self.RCNN_base[layer].parameters(): p.requires_grad = False
self.RCNN_top = vgg.classifier
self.RCNN_cls_score = nn.Linear(4096, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(4096, 4)
else:
self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes)
def _head_to_tail(self, pool5):
pool5_flat = pool5.view(pool5.size(0), -1)
fc7 = self.RCNN_top(pool5_flat)
return fc7
|
import os
from django.contrib.gis.utils import LayerMapping
from .models import WorldBorder
world_mapping = {
'fips': 'FIPS',
'iso2': 'ISO2',
'iso3': 'ISO3',
'un': 'UN',
'name': 'NAME',
'area': 'AREA',
'pop2005': 'POP2005',
'region': 'REGION',
'subregion': 'SUBREGION',
'lon': 'LON',
'lat': 'LAT',
'mpoly': 'MULTIPOLYGON',
}
world_shp = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'data', 'TM_WORLD_BORDERS-0.3.shp'),
)
def run(verbose=True):
lm = LayerMapping(
WorldBorder, world_shp, world_mapping,
transform=False, encoding='iso-8859-1',
)
lm.save(strict=True, verbose=verbose)
|
from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters, generics, views, exceptions
from rest_framework.response import Response
from core import models
from core.models.base import GameStatus
from service import serializers
from service.permissions import IsAuthenticated
# NOTE this could possibly be replaced by using options
def get_game_filter_choices():
return {
'game_statuses': models.base.GameStatus.CHOICES,
'nation_choice_modes': models.base.NationChoiceMode.CHOICES,
'deadlines': models.base.DeadlineFrequency.CHOICES,
'variants': [(v.id, str(v)) for v in models.Variant.objects.all()],
}
class GameFilterChoicesView(views.APIView):
def get(self, request, format=None):
return Response(get_game_filter_choices())
class BaseMixin:
game_key = 'game'
def get_game(self):
return get_object_or_404(
models.Game.objects,
id=self.kwargs[self.game_key],
status=GameStatus.ACTIVE,
participants=self.request.user.id,
)
def get_user_nation_state(self):
game = self.get_game()
return get_object_or_404(
models.NationState.objects,
turn=game.get_current_turn(),
user=self.request.user.id,
)
class ListGames(generics.ListAPIView):
permission_classes = [IsAuthenticated]
queryset = models.Game.objects.all()
serializer_class = serializers.GameSerializer
filter_backends = [
DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
]
search_fields = [
'name',
'created_by__username'
]
filterset_fields = [
'variant',
'status',
'num_players',
'nation_choice_mode',
'order_deadline',
'retreat_deadline',
'build_deadline',
]
ordering_fields = [
'created_at',
'initialized_at'
]
class CreateGameView(generics.CreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.CreateGameSerializer
def create(self, request, *args, **kwargs):
defaults = {'variant': 1, 'num_players': 7}
request.data.update(defaults)
return super().create(request, *args, **kwargs)
class GameStateView(BaseMixin, generics.RetrieveAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.GameStateSerializer
queryset = models.Game.objects.all()
game_key = 'pk'
class ToggleJoinGame(generics.UpdateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.GameSerializer
queryset = models.Game.objects.all()
def check_object_permissions(self, request, obj):
if request.user not in obj.participants.all():
if obj.participants.count() >= obj.num_players:
raise exceptions.PermissionDenied(
detail='Game is already full.'
)
if obj.status != GameStatus.PENDING:
raise exceptions.PermissionDenied(
detail='Game is not pending.'
)
class CreateOrderView(BaseMixin, generics.CreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.OrderSerializer
def get_serializer_context(self):
context = super().get_serializer_context()
context['nation_state'] = self.get_user_nation_state()
return context
def perform_create(self, serializer):
"""
Delete existing order before creating new order.
"""
models.Order.objects.filter(
source=serializer.validated_data['source'],
turn=serializer.validated_data['turn'],
nation=serializer.validated_data['nation'],
).delete()
super().perform_create(serializer)
class ListOrdersView(BaseMixin, generics.ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.OrderSerializer
def get_queryset(self):
game = get_object_or_404(
models.Game.objects,
id=self.kwargs['game'],
)
user_nation_state = models.NationState.objects.filter(
turn=game.get_current_turn(),
user=self.request.user.id,
).first()
if not user_nation_state:
return models.Order.objects.none()
return models.Order.objects.filter(
turn=user_nation_state.turn,
nation=user_nation_state.nation,
)
class RetrievePrivateNationStateView(BaseMixin, generics.RetrieveAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.PrivateNationStateSerializer
def get_object(self):
game = get_object_or_404(
models.Game.objects,
id=self.kwargs['game'],
)
return models.NationState.objects.filter(
turn=game.get_current_turn(),
user=self.request.user.id,
).first()
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
if not instance:
return Response({})
serializer = self.get_serializer(instance)
return Response(serializer.data)
class DestroyOrderView(BaseMixin, generics.DestroyAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.OrderSerializer
queryset = models.Order.objects.all()
def check_object_permissions(self, request, obj):
user_nation_state = self.get_user_nation_state()
if obj.nation != user_nation_state.nation:
raise exceptions.PermissionDenied(
detail='Order does not belong to this user.'
)
class ToggleFinalizeOrdersView(generics.UpdateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = serializers.PublicNationStateSerializer
queryset = models.NationState.objects.filter(
turn__game__status=GameStatus.ACTIVE
)
def check_object_permissions(self, request, obj):
if request.user != obj.user:
raise exceptions.PermissionDenied(
detail='Cannot finalize orders for other nation.'
)
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RgbToHsv and HsvToRgb op in DE
"""
import colorsys
import numpy as np
from numpy.testing import assert_allclose
import mindspore.dataset as ds
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms_util as util
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def generate_numpy_random_rgb(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 255.
def test_rgb_hsv_hwc():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
hsv_base = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_base = hsv_base.reshape((8, 8, 3))
hsv_de = util.rgb_to_hsvs(rgb_np, True)
assert hsv_base.shape == hsv_de.shape
assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
hsv_flat = hsv_base.reshape(64, 3)
rgb_base = np.array([
colorsys.hsv_to_rgb(
h.astype(np.float64), s.astype(np.float64), v.astype(np.float64))
for h, s, v in hsv_flat
])
rgb_base = rgb_base.reshape((8, 8, 3))
rgb_de = util.hsv_to_rgbs(hsv_base, True)
assert rgb_base.shape == rgb_de.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_batch_hwc():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((4, 2, 8, 3))
hsv_base = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_base = hsv_base.reshape((4, 2, 8, 3))
hsv_de = util.rgb_to_hsvs(rgb_np, True)
assert hsv_base.shape == hsv_de.shape
assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
hsv_flat = hsv_base.reshape((64, 3))
rgb_base = np.array([
colorsys.hsv_to_rgb(
h.astype(np.float64), s.astype(np.float64), v.astype(np.float64))
for h, s, v in hsv_flat
])
rgb_base = rgb_base.reshape((4, 2, 8, 3))
rgb_de = util.hsv_to_rgbs(hsv_base, True)
assert rgb_de.shape == rgb_base.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_chw():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((3, 8, 8))
hsv_base = np.array([
np.vectorize(colorsys.rgb_to_hsv)(
rgb_np[0, :, :].astype(np.float64), rgb_np[1, :, :].astype(np.float64), rgb_np[2, :, :].astype(np.float64))
])
hsv_base = hsv_base.reshape((3, 8, 8))
hsv_de = util.rgb_to_hsvs(rgb_np, False)
assert hsv_base.shape == hsv_de.shape
assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
rgb_base = np.array([
np.vectorize(colorsys.hsv_to_rgb)(
hsv_base[0, :, :].astype(np.float64), hsv_base[1, :, :].astype(np.float64),
hsv_base[2, :, :].astype(np.float64))
])
rgb_base = rgb_base.reshape((3, 8, 8))
rgb_de = util.hsv_to_rgbs(hsv_base, False)
assert rgb_de.shape == rgb_base.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_batch_chw():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_imgs = rgb_flat.reshape((4, 3, 2, 8))
hsv_base_imgs = np.array([
np.vectorize(colorsys.rgb_to_hsv)(
img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64))
for img in rgb_imgs
])
hsv_de = util.rgb_to_hsvs(rgb_imgs, False)
assert hsv_base_imgs.shape == hsv_de.shape
assert_allclose(hsv_base_imgs.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)
rgb_base = np.array([
np.vectorize(colorsys.hsv_to_rgb)(
img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64))
for img in hsv_base_imgs
])
rgb_de = util.hsv_to_rgbs(hsv_base_imgs, False)
assert rgb_base.shape == rgb_de.shape
assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)
def test_rgb_hsv_pipeline():
# First dataset
transforms1 = [
vision.Decode(True),
vision.Resize([64, 64]),
vision.ToTensor()
]
transforms1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
vision.Decode(True),
vision.Resize([64, 64]),
vision.ToTensor(),
vision.RgbToHsv(),
vision.HsvToRgb()
]
transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds2 = ds2.map(operations=transform2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1), ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
assert_allclose(ori_img.flatten(), cvt_img.flatten(), rtol=1e-5, atol=0)
assert ori_img.shape == cvt_img.shape
if __name__ == "__main__":
test_rgb_hsv_hwc()
test_rgb_hsv_batch_hwc()
test_rgb_hsv_chw()
test_rgb_hsv_batch_chw()
test_rgb_hsv_pipeline()
|
import numpy as np
import scipy.signal
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.evaluation.postprocessing import Postprocessing
from algorithms.curiosity import INTRINSIC_REWARD
INTRINSIC_VALUE_TARGETS = "intrinsic_value_targets"
INTRINSIC_VF_PREDS = "intrinsic_vf_preds"
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def compute_advantages_intrinsic(rollout,
last_r,
last_intrinsic_r,
gamma=0.9,
intrinsic_gamma=0.9,
lambda_=1.0,
intrinsic_lambda_=1.0):
"""
Given a rollout, compute its value targets and the advantage. Assumes we are using separate
value function heads for the extrinsic and intrinsic rewards
Args:
rollout (SampleBatch): SampleBatch of a single trajectory
last_r (float): Value estimation for last observation
gamma (float): Discount factor
intrinsic_gamma (float): Discount factor
lambda_ (float): Parameter for GAE
intrinsic_lambda_ (float): Parameter for intrinsic GAE
Returns:
SampleBatch (SampleBatch): Object with experience from rollout and
processed rewards.
"""
traj = {}
trajsize = len(rollout[SampleBatch.ACTIONS])
for key in rollout:
traj[key] = np.stack(rollout[key])
# Extrinsic value predictions and targets
vpred_t = np.concatenate([rollout[SampleBatch.VF_PREDS], np.array([last_r])])
delta_t = (traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])
advantages = discount(delta_t, gamma * lambda_)
traj[Postprocessing.VALUE_TARGETS] = (
advantages + traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)
# Intrinsic value predictions
intrinsic_vpred_t = np.concatenate([rollout[INTRINSIC_VF_PREDS], np.array([last_intrinsic_r])])
intrinsic_delta_t = (traj[INTRINSIC_REWARD] + intrinsic_gamma * intrinsic_vpred_t[1:] - intrinsic_vpred_t[:-1])
intrinsic_advantages = discount(intrinsic_delta_t, intrinsic_gamma * intrinsic_lambda_)
traj[INTRINSIC_VALUE_TARGETS] = (
intrinsic_advantages + traj[INTRINSIC_VF_PREDS]).copy().astype(np.float32)
traj[Postprocessing.ADVANTAGES] = (advantages + intrinsic_advantages).copy().astype(np.float32)
assert all(val.shape[0] == trajsize for val in traj.values()), \
"Rollout stacked incorrectly!"
return SampleBatch(traj)
|
"""
Based on the implementation of https://github.com/jadore801120/attention-is-all-you-need-pytorch
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from maskrcnn_benchmark.modeling.utils import cat
from .utils_motifs import obj_edge_vectors, to_onehot, nms_overlaps, encode_box_info
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
"""
Args:
q (bsz, len_q, dim_q)
k (bsz, len_k, dim_k)
v (bsz, len_v, dim_v)
Note: len_k==len_v, and dim_q==dim_k
Returns:
output (bsz, len_q, dim_v)
attn (bsz, len_q, len_k)
"""
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
"""
Args:
q (bsz, len_q, dim_q)
k (bsz, len_k, dim_k)
v (bsz, len_v, dim_v)
Note: len_k==len_v, and dim_q==dim_k
Returns:
output (bsz, len_q, d_model)
attn (bsz, len_q, len_k)
"""
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size() # len_k==len_v
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
"""
Merge adjacent information. Equal to linear layer if kernel size is 1
Args:
x (bsz, len, dim)
Returns:
output (bsz, len, dim)
"""
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output *= non_pad_mask.float()
enc_output = self.pos_ffn(enc_output)
enc_output *= non_pad_mask.float()
return enc_output, enc_slf_attn
class TransformerEncoder(nn.Module):
"""
A encoder model with self attention mechanism.
"""
def __init__(self, n_layers, n_head, d_k, d_v, d_model, d_inner, dropout=0.1):
super().__init__()
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
def forward(self, input_feats, num_objs):
"""
Args:
input_feats [Tensor] (#total_box, d_model) : bounding box features of a batch
num_objs [list of int] (bsz, ) : number of bounding box of each image
Returns:
enc_output [Tensor] (#total_box, d_model)
"""
original_input_feats = input_feats
input_feats = input_feats.split(num_objs, dim=0)
input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True)
# -- Prepare masks
bsz = len(num_objs)
device = input_feats.device
pad_len = max(num_objs)
num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len)
slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len)
non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1)
# -- Forward
enc_output = input_feats
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(
enc_output,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask)
enc_output = enc_output[non_pad_mask.squeeze(-1)]
return enc_output
class TransformerContext(nn.Module):
def __init__(self, config, obj_classes, rel_classes, in_channels):
super().__init__()
self.cfg = config
# setting parameters
if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX:
self.mode = 'predcls' if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL else 'sgcls'
else:
self.mode = 'sgdet'
self.obj_classes = obj_classes
self.rel_classes = rel_classes
self.num_obj_cls = len(obj_classes)
self.num_rel_cls = len(rel_classes)
self.in_channels = in_channels
self.obj_dim = in_channels
self.embed_dim = self.cfg.MODEL.ROI_RELATION_HEAD.EMBED_DIM
self.hidden_dim = self.cfg.MODEL.ROI_RELATION_HEAD.CONTEXT_HIDDEN_DIM
self.nms_thresh = self.cfg.TEST.RELATION.LATER_NMS_PREDICTION_THRES
self.dropout_rate = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.DROPOUT_RATE
self.obj_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.OBJ_LAYER
self.edge_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.REL_LAYER
self.num_head = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.NUM_HEAD
self.inner_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.INNER_DIM
self.k_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.KEY_DIM
self.v_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.VAL_DIM
# the following word embedding layer should be initalize by glove.6B before using
embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.GLOVE_DIR, wv_dim=self.embed_dim)
self.obj_embed1 = nn.Embedding(self.num_obj_cls, self.embed_dim)
self.obj_embed2 = nn.Embedding(self.num_obj_cls, self.embed_dim)
with torch.no_grad():
self.obj_embed1.weight.copy_(embed_vecs, non_blocking=True)
self.obj_embed2.weight.copy_(embed_vecs, non_blocking=True)
# position embedding
self.bbox_embed = nn.Sequential(*[
nn.Linear(9, 32), nn.ReLU(inplace=True), nn.Dropout(0.1),
nn.Linear(32, 128), nn.ReLU(inplace=True), nn.Dropout(0.1),
])
self.lin_obj = nn.Linear(self.in_channels + self.embed_dim + 128, self.hidden_dim)
self.lin_edge = nn.Linear(self.embed_dim + self.hidden_dim + self.in_channels, self.hidden_dim)
self.out_obj = nn.Linear(self.hidden_dim, self.num_obj_cls)
self.context_obj = TransformerEncoder(self.obj_layer, self.num_head, self.k_dim,
self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)
self.context_edge = TransformerEncoder(self.edge_layer, self.num_head, self.k_dim,
self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)
def forward(self, roi_features, proposals, logger=None):
# labels will be used in DecoderRNN during training
use_gt_label = self.training or self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL
obj_labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) if use_gt_label else None
# label/logits embedding will be used as input
if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL:
obj_embed = self.obj_embed1(obj_labels)
else:
obj_logits = cat([proposal.get_field("predict_logits") for proposal in proposals], dim=0).detach()
obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed1.weight
# bbox embedding will be used as input
assert proposals[0].mode == 'xyxy'
pos_embed = self.bbox_embed(encode_box_info(proposals))
# encode objects with transformer
obj_pre_rep = cat((roi_features, obj_embed, pos_embed), -1)
num_objs = [len(p) for p in proposals]
obj_pre_rep = self.lin_obj(obj_pre_rep)
obj_feats = self.context_obj(obj_pre_rep, num_objs)
# predict obj_dists and obj_preds
if self.mode == 'predcls':
obj_preds = obj_labels
obj_dists = to_onehot(obj_preds, self.num_obj_cls)
edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_labels)), dim=-1)
else:
obj_dists = self.out_obj(obj_feats)
use_decoder_nms = self.mode == 'sgdet' and not self.training
if use_decoder_nms:
boxes_per_cls = [proposal.get_field('boxes_per_cls') for proposal in proposals]
obj_preds = self.nms_per_cls(obj_dists, boxes_per_cls, num_objs)
else:
obj_preds = obj_dists[:, 1:].max(1)[1] + 1
edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_preds)), dim=-1)
# edge context
edge_pre_rep = self.lin_edge(edge_pre_rep)
edge_ctx = self.context_edge(edge_pre_rep, num_objs)
return obj_dists, obj_preds, edge_ctx
def nms_per_cls(self, obj_dists, boxes_per_cls, num_objs):
obj_dists = obj_dists.split(num_objs, dim=0)
obj_preds = []
for i in range(len(num_objs)):
is_overlap = nms_overlaps(boxes_per_cls[i]).cpu().numpy() >= self.nms_thresh # (#box, #box, #class)
out_dists_sampled = F.softmax(obj_dists[i], -1).cpu().numpy()
out_dists_sampled[:, 0] = -1
out_label = obj_dists[i].new(num_objs[i]).fill_(0)
for i in range(num_objs[i]):
box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape)
out_label[int(box_ind)] = int(cls_ind)
out_dists_sampled[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0
out_dists_sampled[box_ind] = -1.0 # This way we won't re-sample
obj_preds.append(out_label.long())
obj_preds = torch.cat(obj_preds, dim=0)
return obj_preds
|
import logging
import numpy as np
import os
import PIL
import PIL.Image
import tensorflow as tf
from tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras import layers
from tensorflow.keras import Model
img_height = 224
img_width = 224
batch_size = 64
data_dir = './100-bird-species/'
data_dir_train = os.path.join(data_dir, 'train')
data_dir_valid = os.path.join(data_dir, 'valid')
data_dir_test = os.path.join(data_dir, 'test')
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir_train,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
valid_ds = tf.keras.utils.image_dataset_from_directory(
data_dir_valid,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
test_ds = tf.keras.utils.image_dataset_from_directory(
data_dir_test,
label_mode='categorical',
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
def normalize(img, label):
return img / 255.0, label
data_augmentation = tf.keras.Sequential([
tf.keras.layers.RandomFlip("horizontal"),
tf.keras.layers.RandomRotation(0.2),
tf.keras.layers.RandomZoom(0.2)
])
train_dataset = (train_ds
.map(normalize)
.map(lambda x, y: (data_augmentation(x), y))
.prefetch(tf.data.AUTOTUNE))
valid_dataset = valid_ds.map(normalize)
test_dataset = test_ds.map(normalize)
def get_birds_mobilenet():
pre_trained_model = MobileNetV2(
include_top=False,
input_shape=(img_height, img_width, 3),
classifier_activation='softmax'
)
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.output
last_layer.trainable = True
x = GlobalAveragePooling2D()(last_layer)
x = Dense(1024, activation='relu')(x)
x = layers.Dense(325, activation='softmax')(x)
model = Model(pre_trained_model.input, x)
return model
model = get_birds_mobilenet()
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
checkpoint_path = "./checkpoints/birds_mobilenet/"
model.load_weights(checkpoint_path)
model_history = model.fit(
train_dataset,
validation_data=valid_dataset,
epochs=200,
callbacks=[
#tf.keras.callbacks.EarlyStopping(patience=5),
tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path, verbose=0, save_freq="epoch")
])
|
import torch.nn as nn
from .base import BaseLM
class IpaLM(BaseLM):
name = 'lstm'
def __init__(self, vocab_size, hidden_size, nlayers=1, dropout=0.1, embedding_size=None, **kwargs):
super().__init__(
vocab_size, hidden_size, nlayers=nlayers, dropout=dropout, embedding_size=embedding_size, **kwargs)
self.embedding = nn.Embedding(vocab_size, self.embedding_size)
self.lstm = nn.LSTM(
self.embedding_size, hidden_size, nlayers, dropout=(dropout if nlayers > 1 else 0), batch_first=True)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(hidden_size, vocab_size)
def forward(self, x, idx):
h_old = self.context(idx)
x_emb = self.dropout(self.get_embedding(x))
c_t, h_t = self.lstm(x_emb, h_old)
c_t = self.dropout(c_t).contiguous()
logits = self.out(c_t)
return logits, h_t
def get_embedding(self, x):
return self.embedding(x)
def initHidden(self, bsz=1):
weight = next(self.parameters()).data
return weight.new(self.nlayers, bsz, self.hidden_size).zero_(), \
weight.new(self.nlayers, bsz, self.hidden_size).zero_()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import itertools
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as np
import jax.test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
def rng():
return onp.random.RandomState(0)
class EinsumTest(jtu.JaxTestCase):
def _check(self, s, *ops):
a = onp.einsum(s, *ops)
b = np.einsum(s, *ops)
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4, check_dtypes=True)
def test_three_operands_1(self):
r = rng()
x = r.randn(3)
y = r.randn(4)
z = r.randn(5)
s = 'i,j,k->ijk'
self._check(s, x, y, z)
def test_three_operands_2(self):
r = rng()
x = r.randn(3)
y = r.randn(4)
z = r.randn(5)
s = 'i,j,k->ijk'
self._check(s, x, y, z)
def test_two_operands_1(self):
r = rng()
x = r.randn(3, 4)
y = r.randn(4)
s = 'ij,j->i'
self._check(s, x, y)
def test_two_operands_2(self):
r = rng()
x = r.randn(3, 4, 5)
y = r.randn(4)
s = 'ijk,j->i'
self._check(s, x, y)
def test_two_operands_3(self):
r = rng()
x = r.randn(3, 4, 3)
y = r.randn(3)
s = 'iji,i->j'
self._check(s, x, y)
def test_two_operands_4(self):
r = rng()
x = r.randn(3, 4)
y = r.randn(3, 4)
s = 'ij,ij->'
self._check(s, x, y)
def test_two_operands_5(self):
r = rng()
x = r.randn(10, 2, 3)
y = r.randn(3, 4)
s = 'nij,jk->nik'
self._check(s, x, y)
def test_two_operands_6(self):
# based on https://github.com/google/jax/issues/37#issuecomment-448572187
r = rng()
x = r.randn(2, 1)
y = r.randn(2, 3, 4)
s = 'sa,shb->shab'
self._check(s, x, y)
def test_one_operand_1(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->j'
self._check(s, x)
def test_one_operand_2(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->kij'
self._check(s, x)
def test_one_operand_3(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->ki'
self._check(s, x)
def test_one_operand_4(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->ki'
self._check(s, x)
def test_one_operand_5(self):
r = rng()
x = r.randn(2, 3, 4, 5)
s = '...ijk->...ki'
self._check(s, x)
def test_one_operand_6(self):
r = rng()
x = r.randn(3, 4, 5)
s = '...ijk->ki'
self._check(s, x)
def test_one_operand_7(self):
r = rng()
x = r.randn(3, 3)
s = 'ii->'
self._check(s, x)
def test_one_operand_8(self):
r = rng()
x = r.randn(3, 3)
s = 'ij->'
self._check(s, x)
def test_one_operand_9(self):
r = rng()
x = r.randn(3, 3, 3)
s = 'iii->'
self._check(s, x)
def test_one_operand_10(self):
r = rng()
x = r.randn(3, 3)
s = 'ii->i'
self._check(s, x)
def test_one_operand_11(self):
r = rng()
x = r.randn(3, 3, 4)
s = 'iij->i'
self._check(s, x)
def test_one_operand_12(self):
r = rng()
x = r.randn(3, 3, 3)
s = 'iii->i'
self._check(s, x)
def test_one_operand_13(self):
r = rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkk->i'
self._check(s, x)
def test_one_operand_14(self):
r = rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkk->ik'
self._check(s, x)
def test_one_operand_15(self):
r = rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkl->il'
self._check(s, x)
def test_one_operand_16(self):
r = rng()
x = r.randn(3, 3)
s = 'ij->ij'
self._check(s, x)
def test_tf_unsupported_1(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = rng()
x = r.randn(2, 3, 5, 1)
y = r.randn(3, 4, 5, 1)
s = 'ij...,jk...->ik...'
self._check(s, x, y)
def test_tf_unsupported_2(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = rng()
x = r.randn(2, 3, 3)
y = r.randn(4)
s = 'ijj,k->ik'
self._check(s, x, y)
def test_tf_unsupported_3(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = rng()
x = r.randn(2, 3)
y = r.randn(2, 3)
z = r.randn(3, 4)
s = 'ij,ij,jk->ik'
self._check(s, x, y, z)
# these tests are based on https://github.com/dask/dask/pull/3412/files
@parameterized.named_parameters(
{"testcase_name": "_{}".format(einstr), "einstr": einstr}
for einstr in [
'abc,bad->abcd',
'abcdef,bcdfg->abcdeg',
'ea,fb,abcd,gc,hd->efgh',
'ab,b',
'aa',
'a,a->',
'a,a->a',
'a,a',
'a,b',
'a,b,c',
'a',
'ba,b',
'ba,b->',
'defab,fedbc->defac',
'ab...,bc...->ac...',
'a...a',
'abc...->cba...',
'...ab->...a',
'a...a->a...',
# Following 2 from # https://stackoverflow.com/a/19203475/1611416
'...abc,...abcd->...d',
'ab...,b->ab...',
# https://github.com/dask/dask/pull/3412#discussion_r182413444
'aa->a',
'ab,ab,c->c',
'aab,bc->ac',
'aab,bcc->ac',
'fdf,cdd,ccd,afe->ae',
'fff,fae,bef,def->abd',
])
def test_from_dask(self, einstr):
r = rng()
if '->' in einstr:
input_str, result_names = einstr.split('->')
else:
input_str = einstr
input_names = input_str.split(',')
dims = itertools.cycle([2, 3, 4])
shapes = defaultdict(lambda: next(dims))
input_shapes = [tuple(shapes[c] for c in names.replace('...', '01'))
for names in input_names]
operands = [r.randn(*shape) for shape in input_shapes]
self._check(einstr, *operands)
def test_ordered_front_batch_dim_case(self):
x = onp.ones((1,8,20,4))
y = onp.ones((1,8,20,4))
s = 'ijkl,ijml->ijkm'
self._check(s, x, y)
if __name__ == '__main__':
absltest.main()
|
#
# PySNMP MIB module SW-STRCTURE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SW-STRCTURE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:12:42 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Bits, Counter32, NotificationType, TimeTicks, ObjectIdentity, Integer32, NotificationType, Counter64, Gauge32, Unsigned32, IpAddress, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "Counter32", "NotificationType", "TimeTicks", "ObjectIdentity", "Integer32", "NotificationType", "Counter64", "Gauge32", "Unsigned32", "IpAddress", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
marconi = MibIdentifier((1, 3, 6, 1, 4, 1, 326))
systems = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2))
external = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20))
dlink = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1))
dlinkcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 1))
golf = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2))
golfproducts = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1))
es2000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3))
golfcommon = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2))
marconi_mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2)).setLabel("marconi-mgmt")
es2000Mgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28))
swStructure = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1))
swStructInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1))
swStructDevType = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 1), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevType.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevType.setDescription('Specifies the device type.')
swStructDevDescr = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevDescr.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevDescr.setDescription('Describes the type of the device.')
swStructDevPortEncodingFactor = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevPortEncodingFactor.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevPortEncodingFactor.setDescription('The factor to encode the global port ID from unit ID and the local port ID. This global port ID is required to access the bridge MIB and spanning tree MIB defined by the standard body. This global port ID will provide a unigue port ID for each port across the entire device. Example: supoposed that the encoding factor is 16, then port 2 located on module 2 will be encoded as port 18')
swStructDevLedInfo = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevLedInfo.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevLedInfo.setDescription('Provides the LED informations of the cpu slot. bit7 - cpu status(always 1) bit6 - console status(0: console not in used, 1: console in used) bit5 - power status(always 1) bit 4 ~ bit 0 - not used.')
swStructDevMaxModuleNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevMaxModuleNum.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevMaxModuleNum.setDescription('Maximum number of modules allowed on the unit.')
swStructDevMaxPortNum = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevMaxPortNum.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevMaxPortNum.setDescription('Maximum number of ports allowed on the unit.')
swStructDevNumOfPortInUse = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevNumOfPortInUse.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevNumOfPortInUse.setDescription('Number of ports which has link being connected to the port.')
swStructDevOperStatus = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("notAvail", 2), ("removed", 3), ("disabled", 4), ("normal", 5), ("nonFatalErr", 9), ("fatalErr", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevOperStatus.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevOperStatus.setDescription('Describes the operation status for the unit.')
swStructDevLastChange = MibScalar((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructDevLastChange.setStatus('mandatory')
if mibBuilder.loadTexts: swStructDevLastChange.setDescription('Provides the time that the unit is up last time.')
swStructModuleTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2), )
if mibBuilder.loadTexts: swStructModuleTable.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleTable.setDescription('A table that contains information about a module.')
swStructModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructModuleUnitIndex"), (0, "SW-STRCTURE-MIB", "swStructModuleIndex"))
if mibBuilder.loadTexts: swStructModuleEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleEntry.setDescription('A list of information for a module.')
swStructModuleUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleUnitIndex.setDescription('ID of the unit in the device.')
swStructModuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleIndex.setDescription('ID of the Module in the device.')
swStructModuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 3), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleType.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleType.setDescription('Type of the module.')
swStructModuleDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleDescr.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleDescr.setDescription('Type of the module in displayed string format.')
swStructModuleVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleVersion.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleVersion.setDescription('Provides PCB version of the module.')
swStructModuleMaxPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleMaxPortNum.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleMaxPortNum.setDescription('Maximum number of ports allowed on the module.')
swStructModuleEncodingOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleEncodingOffset.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleEncodingOffset.setDescription('Each module has a offset for encoding the port ID relative to a unit. This encoding will provide a unigue port ID for ports located on the device. Example: Supposed that the offset for module 2 is 16, then port 2 located on module 2 will be encoded as port 18')
swStructModuleLEDInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleLEDInfo.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleLEDInfo.setDescription('Gets LED informations on specifiled module.')
swStructModuleOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("notAvail", 2), ("removed", 3), ("disabled", 4), ("normal", 5), ("nonFatalErr", 9), ("fatalErr", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleOperStatus.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleOperStatus.setDescription('Provides operation status of the module.')
swStructModuleLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructModuleLastChange.setStatus('mandatory')
if mibBuilder.loadTexts: swStructModuleLastChange.setDescription('Provides the time that the module is up last time.')
swStructPowerTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3), )
if mibBuilder.loadTexts: swStructPowerTable.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerTable.setDescription('A table that contains information about every power.')
swStructPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: swStructPowerEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerEntry.setDescription('A list of information for each power.')
swStructPowerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerIndex.setDescription('ID of the power supply in the unit.')
swStructPowerInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerInfo.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerInfo.setDescription('Displays informations of power. Includes vendor, version and so on.')
swStructPowerTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerTemperature.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerTemperature.setDescription('Displays temperature value of power by Fahrenheit.')
swStructPowerVolt = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerVolt.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerVolt.setDescription('Displays volt value of power by V unit.')
swStructPowerAmp = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerAmp.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerAmp.setDescription('Displays amp value of power by A unit.')
swStructPowerFan1Status = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerFan1Status.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerFan1Status.setDescription('Describes the operation status of the power fan1.')
swStructPowerFan2Status = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerFan2Status.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerFan2Status.setDescription('Describes the operation status of the power fan2.')
swStructPowerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("acFailPsFail", 1), ("acPresentPsFail", 2), ("psGood", 3), ("other", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructPowerStatus.setStatus('mandatory')
if mibBuilder.loadTexts: swStructPowerStatus.setDescription('Describes the operation status of the power supply.')
swStructSystemFanTable = MibTable((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4), )
if mibBuilder.loadTexts: swStructSystemFanTable.setStatus('mandatory')
if mibBuilder.loadTexts: swStructSystemFanTable.setDescription('A table that contains informations about system fans.')
swStructSystemFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1), ).setIndexNames((0, "SW-STRCTURE-MIB", "swStructSystemFanIndex"))
if mibBuilder.loadTexts: swStructSystemFanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: swStructSystemFanEntry.setDescription('A list of informations for each system fan.')
swStructSystemFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructSystemFanIndex.setStatus('mandatory')
if mibBuilder.loadTexts: swStructSystemFanIndex.setDescription('ID of designed system fans.')
swStructSystemFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 2, 2, 28, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fanOk", 1), ("fanFail", 2), ("other", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swStructSystemFanStatus.setStatus('mandatory')
if mibBuilder.loadTexts: swStructSystemFanStatus.setDescription('Describes the operation status of the system fans.')
powerTemperatureWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,5)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerTemperatureWarnning.setDescription('The trap is sent whenever the power state enter the temperature warnning state. ')
powerVoltWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,6)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerVoltWarnning.setDescription('The trap is sent whenever the power state enter the volt warnning state. ')
powerCurrentWarnning = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,7)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerCurrentWarnning.setDescription('The trap is sent whenever the power state enter the current warnning state. ')
powerFan1Fail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,8)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerFan1Fail.setDescription('The trap is sent whenever the power state enter the power fan1 fail state. ')
powerFan2Fail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,9)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerFan2Fail.setDescription('The trap is sent whenever the power state enter the power fan2 fail state. ')
systemFanFail = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,10)).setObjects(("SW-STRCTURE-MIB", "swStructSystemFanIndex"))
if mibBuilder.loadTexts: systemFanFail.setDescription('The trap is sent whenever the power state enter the system fans fail state. ')
powerRemoved = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,11)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerRemoved.setDescription('The trap is sent whenever the power is removed.')
powerInserted = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,12)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerInserted.setDescription('The trap is sent whenever the power is inserted.')
powerBad = NotificationType((1, 3, 6, 1, 4, 1, 326, 2, 20, 1, 2, 1, 3) + (0,13)).setObjects(("SW-STRCTURE-MIB", "swStructPowerIndex"))
if mibBuilder.loadTexts: powerBad.setDescription('The trap is sent whenever the power is bad.')
mibBuilder.exportSymbols("SW-STRCTURE-MIB", swStructPowerInfo=swStructPowerInfo, swStructPowerAmp=swStructPowerAmp, dlinkcommon=dlinkcommon, swStructModuleType=swStructModuleType, swStructModuleMaxPortNum=swStructModuleMaxPortNum, powerCurrentWarnning=powerCurrentWarnning, swStructDevMaxPortNum=swStructDevMaxPortNum, swStructPowerTable=swStructPowerTable, swStructDevPortEncodingFactor=swStructDevPortEncodingFactor, dlink=dlink, swStructPowerFan1Status=swStructPowerFan1Status, swStructModuleOperStatus=swStructModuleOperStatus, powerInserted=powerInserted, external=external, swStructPowerVolt=swStructPowerVolt, powerBad=powerBad, swStructModuleIndex=swStructModuleIndex, swStructModuleVersion=swStructModuleVersion, es2000Mgmt=es2000Mgmt, swStructModuleEntry=swStructModuleEntry, golfcommon=golfcommon, swStructPowerEntry=swStructPowerEntry, swStructModuleUnitIndex=swStructModuleUnitIndex, swStructPowerStatus=swStructPowerStatus, swStructModuleDescr=swStructModuleDescr, swStructModuleLEDInfo=swStructModuleLEDInfo, powerFan1Fail=powerFan1Fail, swStructPowerTemperature=swStructPowerTemperature, swStructModuleTable=swStructModuleTable, swStructDevLastChange=swStructDevLastChange, swStructDevType=swStructDevType, swStructPowerFan2Status=swStructPowerFan2Status, swStructDevMaxModuleNum=swStructDevMaxModuleNum, es2000=es2000, swStructModuleLastChange=swStructModuleLastChange, marconi=marconi, swStructSystemFanStatus=swStructSystemFanStatus, swStructModuleEncodingOffset=swStructModuleEncodingOffset, powerRemoved=powerRemoved, swStructInfo=swStructInfo, systemFanFail=systemFanFail, swStructSystemFanEntry=swStructSystemFanEntry, swStructSystemFanIndex=swStructSystemFanIndex, swStructDevOperStatus=swStructDevOperStatus, golf=golf, swStructSystemFanTable=swStructSystemFanTable, marconi_mgmt=marconi_mgmt, swStructPowerIndex=swStructPowerIndex, powerVoltWarnning=powerVoltWarnning, powerFan2Fail=powerFan2Fail, systems=systems, swStructDevDescr=swStructDevDescr, swStructDevNumOfPortInUse=swStructDevNumOfPortInUse, golfproducts=golfproducts, powerTemperatureWarnning=powerTemperatureWarnning, swStructDevLedInfo=swStructDevLedInfo, swStructure=swStructure)
|
from manimlib.constants import *
from manimlib.mobject.types.vectorized_mobject import VMobject, VGroup
from manimlib.mobject.geometry import Arc, Line, Dot, Polygon, Sector, Circle
from manimlib.utils.color import color_gradient
from manimlib.mobject.number_line import DecimalNumber
from manimlib.mobject.svg.tex_mobject import TexMobject
from manimlib.mobject.svg.text_mobject import Text
from manimlib.utils.rate_functions import linear, smooth
from manimlib.utils.space_ops import *
class Arcs(VGroup):
CONFIG = {
'colors': [RED, YELLOW, BLUE, PINK],
'radius': 1,
'start_angle':0,
'angle_list': [30 * DEGREES, 60 * DEGREES, 90 * DEGREES],
'stroke_width': 40,
}
def __init__(self, **kwargs):
VMobject.__init__(self, **kwargs)
self.create_arcs()
def create_arcs(self, **kwargs):
angle = self.start_angle
colors = color_gradient(self.colors, len(self.angle_list))
for i in range(len(self.angle_list)):
self.add(Arc(radius=self.radius, start_angle=angle, angle=self.angle_list[i], color=colors[i], stroke_width=self.stroke_width, **kwargs))
angle += self.angle_list[i]
class Angle(VGroup):
CONFIG = {
'radius': 1,
'color': RED,
'opacity': 0.4,
'stroke_width': 10,
# 'below_180': True,
}
def __init__(self, A, O, B, **kwargs):
VMobject.__init__(self, **kwargs)
OA, OB = A-O, B-O
theta = np.angle(complex(*OA[:2])/complex(*OB[:2])) # angle of OB to OA
self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius/2,
stroke_width=100 * self.radius, color=self.color).set_stroke(opacity=self.opacity).move_arc_center_to(O))
self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius,
stroke_width=self.stroke_width, color=self.color).move_arc_center_to(O))
class Tracked_Point(VGroup):
CONFIG = {
'size': 0.1,
'point_color': BLUE,
'num_decimal_places': 2,
'coordinates_scale': 0.8,
'coordinates_color': GREEN,
'coordinates_direction': DOWN * 0.25,
'bracket_color': WHITE,
}
def __init__(self, init_loc=ORIGIN, **kwargs):
VGroup.__init__(self, **kwargs)
self.point = Dot(init_loc, color=self.point_color).set_height(self.size)
self.value_x = DecimalNumber(0, color=self.coordinates_color, num_decimal_places=self.num_decimal_places).scale(self.coordinates_scale)
self.value_y = DecimalNumber(0, color=self.coordinates_color, num_decimal_places=self.num_decimal_places).scale(self.coordinates_scale)
text = TexMobject('(', ',', ')').scale(self.coordinates_scale)
self.coordinates_text = VGroup(text[0], self.value_x, text[1], self.value_y, text[2])
self.coordinates_text.add_updater(self.update_coordinates_text)
self.add(self.point)
def update_coordinates_text(self, coords):
for i in range(1, len(coords)):
coords[i].next_to(coords[i-1], RIGHT * 0.5)
coords[2].align_to(coords[1], DOWN)
pos = self.point.get_center()
x, y = self.mapping_func(pos[0], pos[1])
coords[1].set_value(x)
coords[3].set_value(y)
coords.next_to(self.point, self.coordinates_direction)
def mapping_func(self, x, y):
return x, y
class Dashed_Circle(VGroup):
CONFIG = {
'arc_ratio': 0.6,
'arc_num': 36,
'arc_config':{
'color': WHITE,
'stroke_width': 2.5,
},
}
def __init__(self, radius=1, center=ORIGIN, **kwargs):
VGroup.__init__(self, **kwargs)
theta = TAU/self.arc_num
for i in range(self.arc_num):
arc_i = Arc(radius=radius, angle=theta * self.arc_ratio, **self.arc_config)
arc_i.rotate(theta * i, about_point=ORIGIN)
self.add(arc_i)
self.move_to(center)
class Right_angle(VGroup):
CONFIG = {
'size': 0.25,
'stroke_color': WHITE,
'stroke_width': 3.2,
'fill_color': BLUE,
'fill_opacity': 0.5,
'on_the_right': True,
}
def __init__(self, corner=ORIGIN, angle=0, **kwargs):
VGroup.__init__(self, **kwargs)
self.corner = ORIGIN
self.angle = 0
r = UR if self.on_the_right else UL
self.add(Polygon(ORIGIN, RIGHT * self.size * r, UR * self.size * r, UP * self.size * r, stroke_width=0,
fill_color=self.fill_color, fill_opacity=self.fill_opacity),
Line(RIGHT * self.size * r, UR * self.size * r + UP * self.stroke_width/100/2 * 0.8, stroke_width=self.stroke_width, stroke_color=self.stroke_color),
Line(UR * self.size * r + RIGHT * self.stroke_width/100/2 * r * 0.8, UP * self.size * r, stroke_width=self.stroke_width, stroke_color=self.stroke_color),
)
self.move_corner_to(corner)
self.change_angle_to(angle)
def move_corner_to(self, new_corner):
self.shift(new_corner - self.corner)
self.corner = new_corner
return self
def change_angle_to(self, new_angle):
self.rotate(new_angle - self.angle, about_point=self.corner)
self.angle = new_angle
return self
class Trail(VGroup):
CONFIG = {
'max_width': 5,
'nums': 500,
'trail_color': BLUE_B,
# 'rate_func': linear,
'rate_func': lambda t: t ** 1.25,
}
def __init__(self, mob, **kwargs):
VGroup.__init__(self, **kwargs)
self.add(mob)
self.trail = VGroup()
self.path_xyz = []
self.add(self.trail)
self.pos_old = self[0].get_center()
if type(self.trail_color) != str:
self.colors = color_gradient(self.trail_color, self.nums)
# def update_trail(self, trail):
# err=1e-5
# pos_new = self[0].get_center()
# pos_old = self.pos_old
# self.pos_old = pos_new
# # if np.sqrt(sum((pos_new - pos_old) ** 2))>err:
# if sum(abs(pos_new - pos_old))>err:
# trail.add(Line(pos_old, pos_new, color=self.trail_color, plot_depth=0))
#
# if len(trail) > self.nums:
# trail.remove(trail[0])
# # for k in range(self.nums):
# # trail[k].set_stroke(width=self.max_width * self.rate_func(k/self.nums),
# # opacity=self.rate_func(k/self.nums))
# for l in trail:
# k = trail.submobjects.index(l)
# l.set_stroke(width=self.max_width * self.rate_func(k/self.nums),
# opacity=self.rate_func(k/self.nums))
#
# if len(trail) <= self.nums and len(trail) > 0:
# # for k in range(len(trail)):
# # trail[k].set_stroke(width=self.max_width * self.rate_func(k/len(trail)),
# # opacity=self.rate_func(k/len(trail)))
# for l in trail:
# k = trail.submobjects.index(l)
# l.set_stroke(width=self.max_width * self.rate_func(k/len(trail)),
# opacity=self.rate_func(k/len(trail)))
def get_path_xyz(self, err=1e-6):
pos_new = self[0].get_center()
pos_old = self.pos_old
if sum(abs(pos_new - pos_old))>err:
self.path_xyz.append(pos_new)
self.pos_old = pos_new
while len(self.path_xyz) > self.nums:
self.path_xyz.remove(self.path_xyz[0])
def create_path(self):
path = VGroup()
self.get_path_xyz()
if len(self.path_xyz) > 1:
for i in range(len(self.path_xyz)-1):
if type(self.trail_color) == str:
path.add(Line(self.path_xyz[i], self.path_xyz[i+1], stroke_color=self.trail_color,
stroke_opacity=self.rate_func(i/len(self.path_xyz)), plot_depth=self.rate_func(2-i/len(self.path_xyz)),
stroke_width=self.max_width * self.rate_func(i/len(self.path_xyz))))
else:
path.add(Line(self.path_xyz[i], self.path_xyz[i+1], stroke_color=self.colors[i],
stroke_opacity=self.rate_func(i/len(self.path_xyz)), plot_depth=self.rate_func(2-i/len(self.path_xyz)),
stroke_width=self.max_width * self.rate_func(i/len(self.path_xyz))))
# print('i = %d' % i)
# # print(self.path_xyz)
# print(self.color)
# print(self.rate_func(i/len(self.path_xyz)))
# print(self.max_width*self.rate_func(i/len(self.path_xyz)))
return path
def update_path(self, trail):
trail.become(self.create_path())
def start_trace(self):
# self.trail.add_updater(self.update_trail)
self.trail.add_updater(self.update_path)
def stop_trace(self):
self.trial.remove_updater(self.update_path)
def decrease_trail_num(self, trail, dt):
if self.nums > max(self.min_num, 2):
if self.nums <= 2:
trail.become(VGroup())
else:
self.nums -= self.rate
if self.nums < 2:
self.nums = 2
trail.become(self.create_path())
def retrieve_trail(self, rate=2, min_num=0):
# self.stop_trace()
self.nums = len(self.trail)
self.min_num = min_num
self.rate = rate
self.trail.add_updater(self.decrease_trail_num)
class Sun(VGroup):
CONFIG = {
'colors': [RED_B, ORANGE, WHITE],
# 'opacity_func': lambda t: 1.1 - t ** 0.24 if t < 0.1 else 1 - 0.95 * t ** 0.18 - 0.05 * t ** 0.05,
# 'opacity_func': lambda t: 1000 * (1 - t ** 0.00012) if t < 0.1 else 0.75 * (1 - t ** 0.21),
# 'opacity_func': lambda t: 1250 * (1 - abs(t-0.006) ** 0.0001) if t < 0.12 else 0.72 * (1 - t ** 0.2),
'opacity_func': lambda t: 1500 * (1 - abs(t-0.009) ** 0.0001),
'radius': 4,
'layer_num': 80,
# 'rate_func': smooth,
'rate_func': lambda t: t ** 2,
}
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.color_list = color_gradient(self.colors, self.layer_num)
self.add(Dot(color=average_color(self.colors[0], WHITE), plot_depth=4).set_height(0.015 * self.radius))
for i in range(self.layer_num):
# self.add(Arc(radius= self.radius/self.layer_num * (0.5 + i), angle=TAU, color=self.color_list[i],
# stroke_width=100 * self.radius/self.layer_num,
# stroke_opacity=self.opacity_func(i/self.layer_num), plot_depth=5))
self.add(Arc(radius= self.radius * self.rate_func((0.5 + i)/self.layer_num), angle=TAU, color=self.color_list[i],
stroke_width=101 * (self.rate_func((i + 1)/self.layer_num) - self.rate_func(i/self.layer_num)) * self.radius,
stroke_opacity=self.opacity_func(self.rate_func(i/self.layer_num)), plot_depth=5))
class Three_Body(VGroup):
CONFIG = {
'mass': np.array([0.98, 1.025, 1]) * 1.2,
'pos': np.array([[-3., -np.sqrt(3), 0], [0., 3 * np.sqrt(3) - 1, 0], [3, -np.sqrt(3), 0]]) * 0.75,
'velocity': np.array([[1, -np.sqrt(3), 0], [-2, 0, 0], [1, np.sqrt(3), 0]]) * 0.8,
'p_pos': np.array([2, -np.sqrt(3)+1, 0]) * 1.,
'p_velocity':np.array([-1, -1.7, 0]) * 2.4,
'plot_depth':5,
}
def __init__(self, *three_Mobject, **kwargs):
VGroup.__init__(self, **kwargs)
self.sun_01 = three_Mobject[0].move_to(self.pos[0])
self.sun_02 = three_Mobject[1].move_to(self.pos[1])
self.sun_03 = three_Mobject[2].move_to(self.pos[2])
if len(three_Mobject) > 3:
self.planet = three_Mobject[3].move_to(self.p_pos)
self.add(self.sun_01, self.sun_02, self.sun_03)
if len(three_Mobject) > 3:
self.planet = three_Mobject[3].move_to(self.p_pos)
self.add(self.planet)
def get_force(self, x1, x2, m1, m2, G=1):
# force of obj_01 to obj_02, this vector start from obj_02 and end in obj_01
r = np.sqrt(sum((x1 - x2) ** 2))
return G * m1 * m2 * (x1 - x2) / (r ** 3 + 2e-3)
def update_xyz(self, G=1, delta_t =2.5e-3):
m1, m2, m3 = self.mass[0], self.mass[1], self.mass[2]
x1, x2, x3 = self.pos[0], self.pos[1], self.pos[2]
v1, v2, v3 = self.velocity[0], self.velocity[1], self.velocity[2]
f21, f31, f32 = self.get_force(x2, x1, m2, m1, G=G), self.get_force(x3, x1, m3, m1, G=G), self.get_force(x3, x2, m3, m2, G=G)
a1, a2, a3 = (f21 + f31) / m1, (-f21 + f32) / m2, (-f32 - f31) / m3
xp, vp = self.p_pos, self.p_velocity
f1, f2, f3 = self.get_force(x1, xp, m1, 1, G=G), self.get_force(x2, xp, m2, 1, G=G), self.get_force(x3, xp, m3, 1, G=G)
a = (f1 + f2 + f3) / 1.
self.velocity[0] += a1 * delta_t
self.velocity[1] += a2 * delta_t
self.velocity[2] += a3 * delta_t
self.p_velocity += a * delta_t
self.pos[0] += v1 * delta_t
self.pos[1] += v2 * delta_t
self.pos[2] += v3 * delta_t
self.p_pos += vp *delta_t
def reset_velocity(self):
v1, v2, v3 = self.velocity[0], self.velocity[1], self.velocity[2]
m1, m2, m3 = self.mass[0], self.mass[1], self.mass[2]
momentum = v1 * m1 + v2 * m2 + v3 * m3
v = momentum/(m1 + m2 + m3)
v1, v2, v3 = v1 - v, v2 - v, v3 - v
print(v1, v2, v3)
self.p_velocity -= v
self.velocity = np.array([v1, v2, v3])
def update_three_body(self, tb, dt):
self.update_xyz(G=40)
# avervage_pos = (self.pos[0] + self.pos[1] + self.pos[2]) / 3
# tb[0].move_to(self.pos[0] - avervage_pos)
# tb[1].move_to(self.pos[1] - avervage_pos)
# tb[2].move_to(self.pos[2] - avervage_pos)
# if len(tb)>3:
# tb[3].move_to(self.p_pos - avervage_pos)
tb[0].move_to(self.pos[0])
tb[1].move_to(self.pos[1])
tb[2].move_to(self.pos[2])
if len(tb)>3:
tb[3].move_to(self.p_pos)
def start_move(self):
self.add_updater(self.update_three_body)
class MySectors(VGroup):
CONFIG = {
'stroke_width': 0,
'fill_opacity': 1,
'inner_radius': 1.6,
# 'outer_radius': [],
'gap': 0.025,
'start_direction': UP,
'values': [1,2,3],
'labels': None,
# 'data': {'labels': 1.23},
'unit': None,
# 'data_2d': None,
'outer_radius_func': lambda t: t/10 + 0.32,
'label_font': '思源黑体 Bold',
'center': ORIGIN,
}
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.colors = color_gradient([ORANGE, RED, PINK, BLUE, GREEN, YELLOW], len(self.values))
self.sectors, self.labels_group = VGroup(), VGroup()
self.sectors = self.create_sectors()
if not self.labels == None:
self.labels_group = self.create_label()
self.add(self.sectors, self.labels_group)
def create_sectors(self):
angle = TAU/len(self.values)
colors = self.colors
start_a = np.angle(complex(*self.start_direction[0:2]))
for i in range(len(self.values)):
r_i = self.inner_radius + self.outer_radius_func(self.values[i])
sector_i = Sector(arc_center=self.center, inner_radius=self.inner_radius, outer_radius=r_i,
stroke_width=self.stroke_width, start_angle=start_a + i * angle,
angle=angle * (1 - self.gap), color=colors[i], fill_opacity=self.fill_opacity)
self.sectors.add(sector_i)
return self.sectors
def create_label(self):
for tex, value in zip(self.labels, self.values):
i = self.labels.index(tex)
r = self.inner_radius + self.outer_radius_func(self.values[i])
size = TAU * r / len(self.values) * 0.2
tex_i = Text(tex, font=self.label_font, color=WHITE, plot_depth=1).set_height(size)
value_i = Text(str(value), font=self.label_font, color=WHITE, plot_depth=1).set_height(size).next_to(tex_i, DOWN * 0.64 * size)
if not self.unit == None:
unit_i = Text(self.unit, font=self.label_font, color=WHITE, plot_depth=1).set_height(size).next_to(value_i, RIGHT * 0.2 * size)
VGroup(value_i, unit_i).next_to(tex_i, DOWN * 0.64 * size)
label_i = VGroup(tex_i, value_i, unit_i)
else:
label_i = VGroup(tex_i, value_i)
angle = TAU/len(self.values)
start_a = np.angle(complex(*self.start_direction[0:2]))
self.labels_group.add(label_i.shift(self.center + complex_to_R3((r-size * 1.2-r*0.05) * np.exp(1j * (start_a + (i + 0.5) * TAU/len(self.values))))))
return self.labels_group
def create_cicles(self, color=BLUE_A):
circle_01 = Circle(radius=self.inner_radius, stroke_width=12, stroke_color=color, plot_depth=2.5)
circle_02 = Circle(radius=self.inner_radius - 0.15, stroke_width=4, stroke_color=color, plot_depth=2.5)
self.circles = VGroup(circle_01, circle_02).move_to(self.center)
self.add(self.circles)
return self.circles
def create_circle_shadow(self, width=32, num=50, color=BLUE_A):
self.shadow = VGroup(*[Circle(radius=self.inner_radius + (i+0.5) * width/100/num, stroke_width=width/num, stroke_color=color,
stroke_opacity=(i-num) ** 2 * 1/num/num, plot_depth=2) for i in range(num+1)]).move_to(self.center)
self.add(self.shadow)
return self.shadow
class New_Polygon(VGroup):
CONFIG = {
'stroke_color': BLUE,
'stroke_width': 4,
'fill_color': BLUE_B,
'fill_opacity': 0,
}
def __init__(self, *vertices, **kwargs):
VGroup.__init__(self, **kwargs)
self.lines, self.dots = VGroup(plot_depth=1), VGroup(plot_depth=1)
self.poly=Polygon(*vertices, fill_color=self.fill_color,
fill_opacity=self.fill_opacity, plot_depth=0).set_stroke(width=0)
self.add(self.poly, self.lines, self.dots)
n = len(vertices)
for i in range(n):
self.lines.add(Line(vertices[i], vertices[(i+1) % n], color=self.stroke_color,
stroke_width=self.stroke_width))
self.dots.add(Dot(vertices[i], color=self.stroke_color).set_height(self.stroke_width/100))
for dot in self.dots:
dot.add_updater(lambda d: d.set_height(self.stroke_width/100))
class MySector(VGroup):
CONFIG = {
'label': 'label',
'font': '思源黑体 Bold',
'value': 1,
}
def __init__(self, ):
pass
class Shadow_2d(VGroup):
CONFIG = {
'shadow_color': DARK_GRAY,
'shadow_opacity': 0.6,
'blur_width': 0.25,
'layer_num': 40,
'scale_factor': 1,
'shadow_out': True,
'show_basic_shape': True,
'plot_depth':-1,
'rate_func': lambda t: t ** 0.5,
}
def __init__(self, mob_or_points, **kwargs):
VGroup.__init__(self, **kwargs)
if type(mob_or_points) == list:
self.shape = Polygon(*mob_or_points, stroke_width=0, plot_depth=-1)
else:
self.shape = mob_or_points.set_stroke(width=0)
self.shape.set_fill(color=self.shadow_color, opacity=self.shadow_opacity * (1 if self.show_basic_shape else 0)).scale(self.scale_factor)
self.blur_outline = VGroup()
s = (self.shape.get_height() + self.shape.get_width())/2
if self.blur_width > 1e-4:
for i in range(self.layer_num):
layer_i = self.shape.copy().set_stroke(color=self.shadow_color, width=100 * self.blur_width/self.layer_num, opacity=self.shadow_opacity * (1-self.rate_func(i/self.layer_num))).\
set_fill(opacity=0).scale((s + (1 if self.shadow_out else -1) * self.blur_width/self.layer_num * (i+0.5))/ s).set_plot_depth(-2)
self.blur_outline.add(layer_i)
self.add(self.shape, self.blur_outline)
class TransformMobject(VGroup):
CONFIG = {
'rotate_angle': PI/2,
'shift_vect': ORIGIN,
# 'path': ,
'scale_range': (1, 1e-3),
'stroke_colors': [RED, PINK, BLUE],
'num': 10,
'rate_func': linear,
'scale_type': 0,
}
def __init__(self, mob, **kwargs):
VGroup.__init__(self, **kwargs)
if type(self.stroke_colors) == list:
stroke_colors = color_gradient(self.stroke_colors, self.num)
else:
stroke_colors = color_gradient([self.stroke_colors, self.stroke_colors], self.num)
for i in range(self.num):
t = i/(self.num-1)
shift_i = self.rate_func(t) * self.shift_vect
# scale_i = min(self.scale_range) + self.rate_func(t) * (max(self.scale_range)-min(self.scale_range))
if self.scale_type == 0:
scale_i = np.exp(np.log(self.scale_range[0]) + self.rate_func(t) * (np.log(self.scale_range[1])-np.log(self.scale_range[0])))
else:
scale_i = self.scale_range[0] + self.rate_func(t) * (self.scale_range[1]-self.scale_range[0])
theta_i = self.rate_func(t) * self.rotate_angle
mob_i = mob.copy().shift(shift_i)
mob_i.scale(scale_i, about_point=mob_i.get_center_of_mass()).rotate(theta_i, about_point=mob_i.get_center_of_mass()).set_stroke(color=stroke_colors[i])
self.add(mob_i)
|
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
class ExtraValidation(Callback):
"""Log evaluation metrics of an extra validation set. This callback
is useful for model training scenarios where multiple validation sets
are used for evaluation (as Keras by default, provides functionality for
evaluating on a single validation set only).
The evaluation metrics are also logged to TensorBoard.
Args:
validation_data: A tf.data.Dataset pipeline used to evaluate the
model, essentially an extra validation dataset.
tensorboard_path: Path to the TensorBoard logging directory.
validation_freq: Number of epochs to wait before performing
subsequent evaluations.
"""
def __init__(self, validation_data, tensorboard_path, validation_freq=1):
super(ExtraValidation, self).__init__()
self.validation_data = validation_data
self.tensorboard_path = tensorboard_path
self.tensorboard_writer = tf.summary.create_file_writer(self.tensorboard_path)
self.validation_freq = validation_freq
def on_epoch_end(self, epoch, logs=None):
# evaluate at an interval of `validation_freq` epochs
if (epoch + 1) % self.validation_freq == 0:
# gather metric names form model
metric_names = ['{}_{}'.format('epoch', metric.name)
for metric in self.model.metrics]
# TODO: fix `model.evaluate` memory leak on TPU
# gather the evaluation metrics
scores = self.model.evaluate(self.validation_data, verbose=2)
# gather evaluation metrics to TensorBoard
with self.tensorboard_writer.as_default():
for metric_name, score in zip(metric_names, scores):
tf.summary.scalar(metric_name, score, step=epoch)
|
from .anoflows import AnoFlows
|
from flask_unchained.bundles.admin import ModelAdmin
from flask_unchained.bundles.admin.templates import details_link, edit_link
from ..models import Role
class RoleAdmin(ModelAdmin):
model = Role
name = 'Roles'
category_name = 'Security'
menu_icon_value = 'fa fa-check'
column_searchable_list = ('name',)
column_sortable_list = ('name',)
column_formatters = dict(name=details_link('role'))
column_formatters_detail = dict(name=edit_link('role'))
form_columns = ('name',)
column_details_list = ('id', 'name', 'created_at', 'updated_at')
|
"""image_optimizer_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
countries = ["Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua and Barbuda", "Argentina", "Armenia",
"Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus",
"Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil",
"Brunei", "Bulgaria", "Burkina Faso", "Burundi", "Cambodia", "Cameroon", "Canada", "Cape Verde",
"Central African Republic", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo", "Cook Islands",
"Costa Rica", "Cote d'Ivoire", "Croatia", "Cuba", "Cyprus", "Czech Republic",
"Democratic Republic of Congo", "Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador",
"Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Ethiopia", "Fiji", "Finland",
"France", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea",
"Guinea-Bissau", "Guyana", "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran",
"Iraq", "Ireland", "Israel", "Italy", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati",
"Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Lithuania",
"Luxembourg", "Macedonia", "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta",
"Marshall Islands", "Mauritania", "Mauritius", "Mexico", "Micronesia (country)", "Moldova", "Mongolia",
"Montenegro", "Morocco", "Mozambique", "Myanmar", "Namibia", "Nauru", "Nepal", "Netherlands",
"New Zealand", "Nicaragua", "Niger", "Nigeria", "Niue", "North Korea", "Norway", "Oman", "Pakistan",
"Palau", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland", "Portugal", "Qatar",
"Romania", "Russia", "Rwanda", "Saint Kitts and Nevis", "Saint Lucia",
"Saint Vincent and the Grenadines", "Samoa", "Sao Tome and Principe", "Saudi Arabia", "Senegal",
"Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia", "Solomon Islands",
"Somalia", "South Africa", "South Korea", "Spain", "Sri Lanka", "Sudan (former)", "Suriname",
"Swaziland", "Sweden", "Switzerland", "Syria", "Tajikistan", "Tanzania", "Thailand", "Timor", "Togo",
"Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda", "Ukraine",
"United Arab Emirates", "United Kingdom", "United States", "Uruguay", "Uzbekistan", "Vanuatu",
"Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe"]
res = {'06HONGKONG4795': {'date': {'month': 'DEC', 'year': '2006'},
'entity_involved': ['RHMFIUU/HQ', 'USDOC', 'OEA', 'LHINES/DFARROW USDOC', 'FCS',
'the Export Administration Act', 'the Office of Enforcement Analysis',
'the USDOC Bureau of Industry and Security', 'BIS', 'Export Control',
'Advanced Energy-Shenzhen ', 'Baltrans', 'ECCN',
'International Rectifier of Leominster', 'International Rectifier',
'Advanced Energy', 'ECO', 'Airfreight Operations', 'Operations Manager',
'Airfreight', 'Federal Express', "Advanced Energy's", 'BIS '],
'from': 'AMCONSUL HONG KONG',
'keywords': ['subject', 'ankel', 'providers', 'street', 'route'],
'most_common_words': [('Advanced', 14), ('Energy', 14), ('Baltrans', 10), ('Mr.', 10),
('Lam', 9), ('shipment', 8), ('Hong', 8), ('Kong', 8), ('items', 8),
('ECO', 6), ('USDOC', 5), ('export', 5), ('OEA', 4), ('provided', 4)],
'people_involved': ['RUCPDOC', 'RUEHC', 'SIPDIS ', 'WILLIAM ZARIT ', 'BMGT BEXP', 'ETRD ETTC',
'Philip Ankel', 'Tai Yip Street', 'Theodore Shum', 'Gordon Lam', 'Lam',
'Cunningham'],
'place_involved': ['KOWLOON', 'HONG KONG', 'CHINA', 'MASSACHUSETTS', 'UNITED STATES',
'SHENZHEN'],
'place_of_document': 'HONGKONG',
'subject': 'EXTRANCHECK: POST SHIPMENT VERIFICATION: ADVANCED ' 'ENERGY-SHENZHEN C/O '
'BALTRANS LOGISTRIC ',
'tags': ['BMGT', 'BEXP', 'HK', 'ETRD', 'ETTC']},
'06HOCHIMINHCITY917': {'date': {'month': 'AUG', 'year': '2006'},
'entity_involved': ['RUEHC/SECSTATE WASHDC PRIORITY', 'RUCNARF', 'RUEHHM/AMCONSUL HO',
'PHUM PGOV PREF KIRF', 'Consul General', 'State',
'the Montagnard Foundation', 'ConGen', 'GVN',
'Southern Evangelical Church of Vietnam', 'Dak Nong', 'SBU',
'Vietnamese Embassy', 'PNTR', 'Congress', 'WINNICK'],
'from': 'AMCONSUL HO CHI MINH CITY',
'keywords': ['subject', 'migrants', 'congress', 'collective', 'leader'],
'most_common_words': [('police', 12), ('ethnic', 7), ('minority', 7), ('Adrong', 7),
('contact', 7), ('province', 6), ('HCMC', 5), ('United', 5),
('States', 5), ('Central', 5), ('Highlands', 5), ('SECV', 5),
('contacts', 4)],
'people_involved': ['RUEHCHI RUEHDT RUEHNH', 'HO CHI MINH CITY', '000917 ', 'SIPDIS ',
'E.O.', 'DECL', 'Seth Winnick', 'Y Ngo Adrong', 'Adrong', 'Siu Y Kim',
'Gia Lai', 'Chu Se', 'Kim', 'Dega', 'Phu Yen'],
'place_involved': ['CENTRAL HIGHLANDS', 'HCMC', 'UNITED STATES', 'DAK LAK', 'CAMBODIA',
'VIETNAM', 'WASHINGTON'], 'place_of_document': 'HOCHIMINHCITY',
'subject': 'POLICE BRUTALITY RISING; CENTRAL HIGHLANDS DEATH CONFIRMED ',
'tags': ['PHUM', 'PGOV', 'PREF', 'KIRF', 'VM']},
'06JERUSALEM906': {'date': {'month': 'MAR', 'year': '2006'},
'entity_involved': ['RUEHC/SECSTATE WASHDC', '0698', 'RHEHNSC', 'NSC', 'RUEHBS/USEU BRUSSELS',
'FRONT OFFICE', 'NEA/IPA', 'WILLIAMS/GREENE/WAECHTER', 'ABRAMS',
'PHUM PREF EAID ECON', 'SBU', 'the World Food Program', 'WFP', 'ECON',
'the PA Ministry of National Economy', 'UNRWA', 'Market Monitoring'],
'from': 'AMCONSUL JERUSALEM',
'keywords': ['subject', 'vulnerability', 'collective', 'works', 'phum'],
'most_common_words': [('days', 11), ('food', 7), ('IMMEDIATE', 5), ('Gaza', 5), ('price', 5),
('flour', 4), ('WASHDC', 3), ('WFP', 3), ('March', 3), ('Karni', 3),
('stocks', 3), ('report', 3), ('percent', 3), ('JERUSALEM', 2)],
'people_involved': ['000906 ', 'SIPDIS ', 'NEA', 'DORAN', 'MUSTAFA ', 'Arnold Vercken',
'Karni'], 'place_involved': ['GAZA', 'WEST BANK/GAZA COUNTRY', 'U.S.'],
'place_of_document': 'JERUSALEM', 'subject': 'KARNI CLOSURE CAUSING FOOD SHORTAGE IN GAZA ',
'tags': ['PHUM', 'PREF', 'EAID', 'ECON', 'KWBG']},
'09BERLIN831': {'date': {'month': 'JUL', 'year': '2009'},
'entity_involved': ['RUEHC/SECSTATE WASHDC', 'RUEHAD', 'AMEMBASSY ABU DHABI', 'RUEHUJA',
'AMEMBASSY ABUJA PRIORITY', 'RUEHAK', 'AMEMBASSY ANKARA', 'RUEHTH',
'AMEMBASSY ATHENS', 'RUEHBS/', 'AMEMBASSY', 'RUEHEG', 'AMEMBASSY CAIRO',
'RUEHBY', 'AMEMBASSY CANBERRA', 'RUEHCP', 'AMEMBASSY COPENHAGEN', 'RUEHDJ',
'RUEHKL', 'AMEMBASSY KUALA LUMPUR', 'RUEHLI', 'AMEMBASSY LONDON', 'RUEHMD',
'RUEHMV', 'AMEMBASSY MONROVIA', 'RUEHMO', 'RUEHMS/AMEMBASSY MUSCAT',
'RUEHNR', 'AMEMBASSY NAIROBI', 'RUEHNE', 'AMEMBASSY NEW DELHI', 'RUEHNY',
'AMEMBASSY OSLO', 'RUEHOT', 'AMEMBASSY OTTAWA', 'RUEHZP', 'AMEMBASSY PANAMA',
'RUEHFR', 'AMEMBASSY PARIS', 'RUEHRH', 'AMEMBASSY RIYADH', 'RUEHRO',
'RUEHYN', 'RUEHGP/AMEMBASSY SINGAPORE', 'RUEHSM', 'AMEMBASSY STOCKHOLM',
'RUEHTC', 'RUEHKO/AMEMBASSY TOKYO', 'RUCNDT/USMISSION', 'EWWT', 'PHSA',
'PHUM PREL', 'GM', 'CGPCS', 'ON PARTICIPATION ISSUE',
'MFA UN Security Council Action', 'the Contact Group for Piracy', 'Turkish',
'German', 'the International Criminal Tribunal'], 'from': 'AMEMBASSY BERLIN',
'keywords': ['subject', 'expertise', 'stockhausen', '091715z', 'ruehul'],
'most_common_words': [('AMEMBASSY', 32), ('PRIORITY', 32), ('Germany', 5), ('national', 5),
('Stockhausen', 4), ('said', 4), ('cases', 4), ('region', 4),
('BERLIN', 3), ('CGPCS', 3), ('U.S.', 3), ('countries', 3), ('piracy', 3)],
'people_involved': ['RUEHBJ', '0210RUEHLO/', 'SIPDIS ', 'E.O.', 'DECL', 'STAN OTTO',
'Dirk Stockhausen', 'Koenig'],
'place_involved': ['BRUSSELS', 'MOSCOW', 'HAGUE', 'NEW YORK', 'BERLIN', 'GERMANY', 'U.S.',
'SOMALIA', 'NETHERLANDS', 'KENYA', 'CAMBODIA', 'ARUSHA', 'TANZANIA',
'RWANDA'], 'place_of_document': 'BERLIN',
'subject': 'CGPCS: GERMANY AGREES ON PARTICIPATION ISSUE, BUT IS STILL ' 'OFFSIDE REGARDING '
'INTERNATIONAL ' 'TRIBUNAL ',
'tags': ['EWWT', 'MARR', 'PGOV', 'PHSA', 'PHUM', 'PREL', 'MOPS', 'GM']}}
|
# -*- coding:utf-8 -*-
"""
@author: Alden
@email: sunzhenhy@gmail.com
@date: 2018/4/5
@version: 1.0.0.0
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums = sorted(nums)
sum_dict = dict()
res = list()
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
sum_dict.setdefault(nums[i] + nums[j], list()).append([i, j])
# for k, v in sum_dict.items():
# print k, v
for i in range(len(nums) - 3):
if i > 0 and nums[i] == nums[i - 1]:
continue
for j in range(i + 1, len(nums) - 2):
if j > i + 1 and nums[j] == nums[j - 1]:
continue
if target - nums[i] - nums[j] in sum_dict:
tmp_array = sum_dict[target - nums[i] - nums[j]]
first_flag = True
for t_index in range(len(tmp_array)):
if (first_flag and tmp_array[t_index][0] > j) or (tmp_array[t_index][0] > j and nums[tmp_array[t_index][0]] != res[-1][2]):
t = [nums[i], nums[j]]
t.extend([nums[tmp_array[t_index][0]], nums[tmp_array[t_index][1]]])
res.append(t)
first_flag = False
return res
if __name__ == "__main__":
s = Solution()
print s.fourSum([1, 0, -1, 0, -2, 2], 0)
print s.fourSum([-3, -2, -1, 0, 0, 1, 2, 3], 0)
print s.fourSum([-2, -1, 0, 0, 1, 2], 0)
|
import os, logging, sys, subprocess, argparse, time
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from p4_mininet import P4Switch, P4Host
from nc_config import *
from exe_cmd import *
###########################################
## get parameters
###########################################
class MyTopo(Topo):
def __init__(self, sw_path, json_path, switches, thrift_base_port,
pcap_dump_flag, log_dir, hosts, links, **opts):
Topo.__init__(self, **opts)
for i in xrange(switches):
self.addSwitch("s%d" % (i+1),
sw_path = sw_path,
json_path = json_path,
thrift_port = thrift_base_port+i,
pcap_dump = pcap_dump_flag,
device_id = i,
verbose = True,
log_dir = log_dir)
for i in xrange(hosts):
self.addHost("h%d" % (i+1))
for a,b in links:
self.addLink(a, b)
def read_topo(topology_file):
nb_hosts = 0
nb_switches = 0
links = []
with open(topology_file, "r") as f:
line = f.readline()[:-1]
w, nb_switches = line.split()
assert(w == "switches")
line = f.readline()[:-1]
w, nb_hosts = line.split()
assert(w == "hosts")
for line in f:
if not f: break
a, b = line.split()
links.append( (a, b) )
return int(nb_switches), int(nb_hosts), links
def config_mininet(parameters):
switches, hosts, links = read_topo(parameters.topology_file)
topo = MyTopo("%s/targets/simple_switch/simple_switch" % parameters.bmv2,
parameters.switch_json,
switches,
parameters.thrift_base_port,
False,
parameters.project_dir + '/logs/switches',
hosts,
links)
net = Mininet(topo = topo,
host = P4Host,
switch = P4Switch,
controller = None,
autoStaticArp=True )
net.start()
for n in range(hosts):
h = net.get('h%d' % (n + 1))
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
print cmd
h.cmd(cmd)
print "disable ipv6"
h.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
h.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
h.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
h.cmd("sysctl -w net.ipv4.tcp_congestion_control=reno")
h.cmd("iptables -I OUTPUT -p icmp --icmp-type destination-unreachable -j DROP")
h.setIP("10.0.0.%d" % (n + 1))
h.setMAC("aa:bb:cc:dd:ee:0%d" % (n + 1))
for i in range(hosts):
if (i != n):
h.setARP("10.0.0.%d" % (i + 1), "aa:bb:cc:dd:ee:0%d" % (i + 1))
net.get('s1').setMAC("aa:bb:cc:dd:ee:11","s1-eth1")
net.get('s1').setMAC("aa:bb:cc:dd:ee:12","s1-eth2")
net.get('s2').setMAC("aa:bb:cc:dd:ee:21","s2-eth1")
net.get('s2').setMAC("aa:bb:cc:dd:ee:22","s2-eth2")
net.get('s2').setMAC("aa:bb:cc:dd:ee:23","s2-eth3")
net.get('s3').setMAC("aa:bb:cc:dd:ee:31","s3-eth1")
net.get('s3').setMAC("aa:bb:cc:dd:ee:32","s3-eth2")
net.get('s4').setMAC("aa:bb:cc:dd:ee:41","s4-eth1")
net.get('s4').setMAC("aa:bb:cc:dd:ee:42","s4-eth2")
net.get('s4').setMAC("aa:bb:cc:dd:ee:43","s4-eth3")
net.get('s4').setMAC("aa:bb:cc:dd:ee:44","s4-eth4")
net.get('s4').setMAC("aa:bb:cc:dd:ee:45","s4-eth5")
time.sleep(1)
commands_list = ["config/commands.txt", "config/commands_1.txt", "config/commands_2.txt", "config/commands_3.txt"]
file_index = 0
for i in range(switches):
cmd = [parameters.runtime_CLI, parameters.switch_json, str(parameters.thrift_base_port + i)]
with open(commands_list[file_index], "r") as f:
file_index = file_index + 1
print " ".join(cmd)
try:
output = subprocess.check_output(cmd, stdin = f)
print output
except subprocess.CalledProcessError as e:
print e
print e.output
time.sleep(1)
logging.info("Ready !")
return (switches, hosts, net)
|
import cgi
import datetime
import email.message
import json as jsonlib
import typing
import urllib.request
from collections.abc import MutableMapping
from http.cookiejar import Cookie, CookieJar
from urllib.parse import parse_qsl, urlencode
import chardet
import rfc3986
from .config import USER_AGENT
from .decoders import (
ACCEPT_ENCODING,
SUPPORTED_DECODERS,
Decoder,
IdentityDecoder,
MultiDecoder,
TextDecoder,
)
from .exceptions import (
CookieConflict,
HTTPError,
InvalidURL,
NotRedirectResponse,
ResponseClosed,
ResponseNotRead,
StreamConsumed,
)
from .multipart import multipart_encode
from .status_codes import StatusCode
from .utils import (
guess_json_utf,
is_known_encoding,
normalize_header_key,
normalize_header_value,
obfuscate_sensitive_headers,
parse_header_links,
str_query_param,
)
if typing.TYPE_CHECKING: # pragma: no cover
from .middleware.base import BaseMiddleware # noqa: F401
from .dispatch.base import AsyncDispatcher # noqa: F401
PrimitiveData = typing.Optional[typing.Union[str, int, float, bool]]
URLTypes = typing.Union["URL", str]
QueryParamTypes = typing.Union[
"QueryParams",
typing.Mapping[str, PrimitiveData],
typing.List[typing.Tuple[str, PrimitiveData]],
str,
]
HeaderTypes = typing.Union[
"Headers",
typing.Dict[typing.AnyStr, typing.AnyStr],
typing.List[typing.Tuple[typing.AnyStr, typing.AnyStr]],
]
CookieTypes = typing.Union["Cookies", CookieJar, typing.Dict[str, str]]
AuthTypes = typing.Union[
typing.Tuple[typing.Union[str, bytes], typing.Union[str, bytes]],
typing.Callable[["AsyncRequest"], "AsyncRequest"],
"BaseMiddleware",
]
ProxiesTypes = typing.Union[
URLTypes,
"AsyncDispatcher",
typing.Dict[URLTypes, typing.Union[URLTypes, "AsyncDispatcher"]],
]
AsyncRequestData = typing.Union[dict, str, bytes, typing.AsyncIterator[bytes]]
RequestData = typing.Union[dict, str, bytes, typing.Iterator[bytes]]
RequestFiles = typing.Dict[
str,
typing.Union[
typing.IO[typing.AnyStr], # file
typing.Tuple[str, typing.IO[typing.AnyStr]], # (filename, file)
typing.Tuple[
str, typing.IO[typing.AnyStr], str
], # (filename, file, content_type)
],
]
AsyncResponseContent = typing.Union[bytes, typing.AsyncIterator[bytes]]
ResponseContent = typing.Union[bytes, typing.Iterator[bytes]]
class URL:
def __init__(
self,
url: URLTypes,
allow_relative: bool = False,
params: QueryParamTypes = None,
) -> None:
if isinstance(url, str):
self._uri_reference = rfc3986.api.iri_reference(url).encode()
else:
self._uri_reference = url._uri_reference
# Normalize scheme and domain name.
if self.is_absolute_url:
self._uri_reference = self._uri_reference.normalize()
# Add any query parameters.
if params:
query_string = str(QueryParams(params))
self._uri_reference = self._uri_reference.copy_with(query=query_string)
# Enforce absolute URLs by default.
if not allow_relative:
if not self.scheme:
raise InvalidURL("No scheme included in URL.")
if not self.host:
raise InvalidURL("No host included in URL.")
# Allow setting full_path to custom attributes requests
# like OPTIONS, CONNECT, and forwarding proxy requests.
self._full_path: typing.Optional[str] = None
@property
def scheme(self) -> str:
return self._uri_reference.scheme or ""
@property
def authority(self) -> str:
return self._uri_reference.authority or ""
@property
def userinfo(self) -> str:
return self._uri_reference.userinfo or ""
@property
def username(self) -> str:
userinfo = self._uri_reference.userinfo or ""
return userinfo.partition(":")[0]
@property
def password(self) -> str:
userinfo = self._uri_reference.userinfo or ""
return userinfo.partition(":")[2]
@property
def host(self) -> str:
return self._uri_reference.host or ""
@property
def port(self) -> int:
port = self._uri_reference.port
if port is None:
return {"https": 443, "http": 80}[self.scheme]
return int(port)
@property
def path(self) -> str:
return self._uri_reference.path or "/"
@property
def query(self) -> str:
return self._uri_reference.query or ""
@property
def full_path(self) -> str:
if self._full_path is not None:
return self._full_path
path = self.path
if self.query:
path += "?" + self.query
return path
@full_path.setter
def full_path(self, value: typing.Optional[str]) -> None:
self._full_path = value
@property
def fragment(self) -> str:
return self._uri_reference.fragment or ""
@property
def is_ssl(self) -> bool:
return self.scheme == "https"
@property
def is_absolute_url(self) -> bool:
"""
Return `True` for absolute URLs such as 'http://example.com/path',
and `False` for relative URLs such as '/path'.
"""
# We don't use `.is_absolute` from `rfc3986` because it treats
# URLs with a fragment portion as not absolute.
# What we actually care about is if the URL provides
# a scheme and hostname to which connections should be made.
return bool(self.scheme and self.host)
@property
def is_relative_url(self) -> bool:
return not self.is_absolute_url
@property
def origin(self) -> "Origin":
return Origin(self)
def copy_with(self, **kwargs: typing.Any) -> "URL":
if (
"username" in kwargs
or "password" in kwargs
or "host" in kwargs
or "port" in kwargs
):
host = kwargs.pop("host", self.host)
port = kwargs.pop("port", self.port)
username = kwargs.pop("username", self.username)
password = kwargs.pop("password", self.password)
authority = host
if port is not None:
authority += f":{port}"
if username is not None:
userpass = username
if password is not None:
userpass += f":{password}"
authority = f"{userpass}@{authority}"
kwargs["authority"] = authority
return URL(self._uri_reference.copy_with(**kwargs).unsplit())
def join(self, relative_url: URLTypes) -> "URL":
"""
Return an absolute URL, using given this URL as the base.
"""
if self.is_relative_url:
return URL(relative_url)
# We drop any fragment portion, because RFC 3986 strictly
# treats URLs with a fragment portion as not being absolute URLs.
base_uri = self._uri_reference.copy_with(fragment=None)
relative_url = URL(relative_url, allow_relative=True)
return URL(relative_url._uri_reference.resolve_with(base_uri).unsplit())
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other: typing.Any) -> bool:
return isinstance(other, (URL, str)) and str(self) == str(other)
def __str__(self) -> str:
return self._uri_reference.unsplit()
def __repr__(self) -> str:
class_name = self.__class__.__name__
url_str = str(self)
if self._uri_reference.userinfo:
url_str = (
rfc3986.urlparse(url_str)
.copy_with(userinfo=f"{self.username}:[secure]")
.unsplit()
)
return f"{class_name}({url_str!r})"
class Origin:
"""
The URL scheme and authority information, as a comparable, hashable object.
"""
def __init__(self, url: URLTypes) -> None:
if not isinstance(url, URL):
url = URL(url)
self.scheme = url.scheme
self.is_ssl = url.is_ssl
self.host = url.host
self.port = url.port
def __eq__(self, other: typing.Any) -> bool:
return (
isinstance(other, self.__class__)
and self.scheme == other.scheme
and self.host == other.host
and self.port == other.port
)
def __hash__(self) -> int:
return hash((self.scheme, self.host, self.port))
def __repr__(self) -> str:
class_name = self.__class__.__name__
return (
f"{class_name}(scheme={self.scheme!r} host={self.host!r} port={self.port})"
)
class QueryParams(typing.Mapping[str, str]):
"""
URL query parameters, as a multi-dict.
"""
def __init__(self, *args: QueryParamTypes, **kwargs: typing.Any) -> None:
assert len(args) < 2, "Too many arguments."
assert not (args and kwargs), "Cannot mix named and unnamed arguments."
value = args[0] if args else kwargs
if isinstance(value, str):
items = parse_qsl(value)
elif isinstance(value, QueryParams):
items = value.multi_items()
elif isinstance(value, list):
items = value # type: ignore
else:
items = value.items() # type: ignore
self._list = [(str(k), str_query_param(v)) for k, v in items]
self._dict = {str(k): str_query_param(v) for k, v in items}
def getlist(self, key: typing.Any) -> typing.List[str]:
return [item_value for item_key, item_value in self._list if item_key == key]
def keys(self) -> typing.KeysView:
return self._dict.keys()
def values(self) -> typing.ValuesView:
return self._dict.values()
def items(self) -> typing.ItemsView:
return self._dict.items()
def multi_items(self) -> typing.List[typing.Tuple[str, str]]:
return list(self._list)
def get(self, key: typing.Any, default: typing.Any = None) -> typing.Any:
if key in self._dict:
return self._dict[key]
return default
def update(self, params: QueryParamTypes = None) -> None: # type: ignore
if not params:
return
params = QueryParams(params)
for param in params:
self[param] = params[param]
def __getitem__(self, key: typing.Any) -> str:
return self._dict[key]
def __setitem__(self, key: str, value: str) -> None:
self._dict[key] = value
found_indexes = []
for idx, (item_key, _) in enumerate(self._list):
if item_key == key:
found_indexes.append(idx)
for idx in reversed(found_indexes[1:]):
del self._list[idx]
if found_indexes:
idx = found_indexes[0]
self._list[idx] = (key, value)
else:
self._list.append((key, value))
def __contains__(self, key: typing.Any) -> bool:
return key in self._dict
def __iter__(self) -> typing.Iterator[typing.Any]:
return iter(self.keys())
def __len__(self) -> int:
return len(self._dict)
def __eq__(self, other: typing.Any) -> bool:
if not isinstance(other, self.__class__):
return False
return sorted(self._list) == sorted(other._list)
def __str__(self) -> str:
return urlencode(self._list)
def __repr__(self) -> str:
class_name = self.__class__.__name__
query_string = str(self)
return f"{class_name}({query_string!r})"
class Headers(typing.MutableMapping[str, str]):
"""
HTTP headers, as a case-insensitive multi-dict.
"""
def __init__(self, headers: HeaderTypes = None, encoding: str = None) -> None:
if headers is None:
self._list = [] # type: typing.List[typing.Tuple[bytes, bytes]]
elif isinstance(headers, Headers):
self._list = list(headers.raw)
elif isinstance(headers, dict):
self._list = [
(normalize_header_key(k, encoding), normalize_header_value(v, encoding))
for k, v in headers.items()
]
else:
self._list = [
(normalize_header_key(k, encoding), normalize_header_value(v, encoding))
for k, v in headers
]
self._encoding = encoding
@property
def encoding(self) -> str:
"""
Header encoding is mandated as ascii, but we allow fallbacks to utf-8
or iso-8859-1.
"""
if self._encoding is None:
for encoding in ["ascii", "utf-8"]:
for key, value in self.raw:
try:
key.decode(encoding)
value.decode(encoding)
except UnicodeDecodeError:
break
else:
# The else block runs if 'break' did not occur, meaning
# all values fitted the encoding.
self._encoding = encoding
break
else:
# The ISO-8859-1 encoding covers all 256 code points in a byte,
# so will never raise decode errors.
self._encoding = "iso-8859-1"
return self._encoding
@encoding.setter
def encoding(self, value: str) -> None:
self._encoding = value
@property
def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]:
"""
Returns a list of the raw header items, as byte pairs.
May be mutated in-place.
"""
return self._list
def keys(self) -> typing.List[str]: # type: ignore
return [key.decode(self.encoding) for key, value in self._list]
def values(self) -> typing.List[str]: # type: ignore
return [value.decode(self.encoding) for key, value in self._list]
def items(self) -> typing.List[typing.Tuple[str, str]]: # type: ignore
return [
(key.decode(self.encoding), value.decode(self.encoding))
for key, value in self._list
]
def get(self, key: str, default: typing.Any = None) -> typing.Any:
try:
return self[key]
except KeyError:
return default
def getlist(self, key: str, split_commas: bool = False) -> typing.List[str]:
"""
Return multiple header values.
"""
get_header_key = key.lower().encode(self.encoding)
values = [
item_value.decode(self.encoding)
for item_key, item_value in self._list
if item_key == get_header_key
]
if not split_commas:
return values
split_values = []
for value in values:
split_values.extend([item.strip() for item in value.split(",")])
return split_values
def update(self, headers: HeaderTypes = None) -> None: # type: ignore
headers = Headers(headers)
for header in headers:
self[header] = headers[header]
def copy(self) -> "Headers":
return Headers(self.items(), encoding=self.encoding)
def __getitem__(self, key: str) -> str:
"""
Return a single header value.
If there are multiple headers with the same key, then we concatenate
them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2
"""
normalized_key = key.lower().encode(self.encoding)
items = []
for header_key, header_value in self._list:
if header_key == normalized_key:
items.append(header_value.decode(self.encoding))
if items:
return ", ".join(items)
raise KeyError(key)
def __setitem__(self, key: str, value: str) -> None:
"""
Set the header `key` to `value`, removing any duplicate entries.
Retains insertion order.
"""
set_key = key.lower().encode(self.encoding)
set_value = value.encode(self.encoding)
found_indexes = []
for idx, (item_key, _) in enumerate(self._list):
if item_key == set_key:
found_indexes.append(idx)
for idx in reversed(found_indexes[1:]):
del self._list[idx]
if found_indexes:
idx = found_indexes[0]
self._list[idx] = (set_key, set_value)
else:
self._list.append((set_key, set_value))
def __delitem__(self, key: str) -> None:
"""
Remove the header `key`.
"""
del_key = key.lower().encode(self.encoding)
pop_indexes = []
for idx, (item_key, _) in enumerate(self._list):
if item_key == del_key:
pop_indexes.append(idx)
if not pop_indexes:
raise KeyError(key)
for idx in reversed(pop_indexes):
del self._list[idx]
def __contains__(self, key: typing.Any) -> bool:
get_header_key = key.lower().encode(self.encoding)
for header_key, _ in self._list:
if header_key == get_header_key:
return True
return False
def __iter__(self) -> typing.Iterator[typing.Any]:
return iter(self.keys())
def __len__(self) -> int:
return len(self._list)
def __eq__(self, other: typing.Any) -> bool:
if not isinstance(other, Headers):
return False
return sorted(self._list) == sorted(other._list)
def __repr__(self) -> str:
class_name = self.__class__.__name__
encoding_str = ""
if self.encoding != "ascii":
encoding_str = f", encoding={self.encoding!r}"
as_list = list(obfuscate_sensitive_headers(self.items()))
as_dict = dict(as_list)
no_duplicate_keys = len(as_dict) == len(as_list)
if no_duplicate_keys:
return f"{class_name}({as_dict!r}{encoding_str})"
return f"{class_name}({as_list!r}{encoding_str})"
class BaseRequest:
def __init__(
self,
method: str,
url: typing.Union[str, URL],
*,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
):
self.method = method.upper()
self.url = URL(url, params=params)
self.headers = Headers(headers)
if cookies:
self._cookies = Cookies(cookies)
self._cookies.set_cookie_header(self)
def encode_data(
self, data: dict = None, files: RequestFiles = None, json: typing.Any = None
) -> typing.Tuple[bytes, str]:
if json is not None:
content = jsonlib.dumps(json).encode("utf-8")
content_type = "application/json"
elif files is not None:
content, content_type = multipart_encode(data or {}, files)
elif data is not None:
content = urlencode(data, doseq=True).encode("utf-8")
content_type = "application/x-www-form-urlencoded"
else:
content = b""
content_type = ""
return content, content_type
def prepare(self) -> None:
content: typing.Optional[bytes] = getattr(self, "content", None)
is_streaming = getattr(self, "is_streaming", False)
auto_headers: typing.List[typing.Tuple[bytes, bytes]] = []
has_host = "host" in self.headers
has_user_agent = "user-agent" in self.headers
has_accept = "accept" in self.headers
has_content_length = (
"content-length" in self.headers or "transfer-encoding" in self.headers
)
has_accept_encoding = "accept-encoding" in self.headers
has_connection = "connection" in self.headers
if not has_host:
url = self.url
if url.userinfo:
url = url.copy_with(username=None, password=None)
auto_headers.append((b"host", url.authority.encode("ascii")))
if not has_user_agent:
auto_headers.append((b"user-agent", USER_AGENT.encode("ascii")))
if not has_accept:
auto_headers.append((b"accept", b"*/*"))
if not has_content_length:
if is_streaming:
auto_headers.append((b"transfer-encoding", b"chunked"))
elif content:
content_length = str(len(content)).encode()
auto_headers.append((b"content-length", content_length))
if not has_accept_encoding:
auto_headers.append((b"accept-encoding", ACCEPT_ENCODING.encode()))
if not has_connection:
auto_headers.append((b"connection", b"keep-alive"))
for item in reversed(auto_headers):
self.headers.raw.insert(0, item)
@property
def cookies(self) -> "Cookies":
if not hasattr(self, "_cookies"):
self._cookies = Cookies()
return self._cookies
def __repr__(self) -> str:
class_name = self.__class__.__name__
url = str(self.url)
return f"<{class_name}({self.method!r}, {url!r})>"
class AsyncRequest(BaseRequest):
def __init__(
self,
method: str,
url: typing.Union[str, URL],
*,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
data: AsyncRequestData = None,
files: RequestFiles = None,
json: typing.Any = None,
):
super().__init__(
method=method, url=url, params=params, headers=headers, cookies=cookies
)
if data is None or isinstance(data, dict):
content, content_type = self.encode_data(data, files, json)
self.is_streaming = False
self.content = content
if content_type:
self.headers["Content-Type"] = content_type
elif isinstance(data, (str, bytes)):
data = data.encode("utf-8") if isinstance(data, str) else data
self.is_streaming = False
self.content = data
else:
assert hasattr(data, "__aiter__")
self.is_streaming = True
self.content_aiter = data
self.prepare()
async def read(self) -> bytes:
"""
Read and return the response content.
"""
if not hasattr(self, "content"):
self.content = b"".join([part async for part in self.stream()])
return self.content
async def stream(self) -> typing.AsyncIterator[bytes]:
if self.is_streaming:
async for part in self.content_aiter:
yield part
elif self.content:
yield self.content
class Request(BaseRequest):
def __init__(
self,
method: str,
url: typing.Union[str, URL],
*,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
data: RequestData = None,
files: RequestFiles = None,
json: typing.Any = None,
):
super().__init__(
method=method, url=url, params=params, headers=headers, cookies=cookies
)
if data is None or isinstance(data, dict):
content, content_type = self.encode_data(data, files, json)
self.is_streaming = False
self.content = content
if content_type:
self.headers["Content-Type"] = content_type
elif isinstance(data, (str, bytes)):
data = data.encode("utf-8") if isinstance(data, str) else data
self.is_streaming = False
self.content = data
else:
assert hasattr(data, "__iter__")
self.is_streaming = True
self.content_iter = data
self.prepare()
def read(self) -> bytes:
if not hasattr(self, "content"):
self.content = b"".join([part for part in self.stream()])
return self.content
def stream(self) -> typing.Iterator[bytes]:
if self.is_streaming:
for part in self.content_iter:
yield part
elif self.content:
yield self.content
class BaseResponse:
def __init__(
self,
status_code: int,
*,
http_version: str = None,
headers: HeaderTypes = None,
request: BaseRequest = None,
on_close: typing.Callable = None,
elapsed: datetime.timedelta = None,
):
self.status_code = status_code
self.http_version = http_version
self.headers = Headers(headers)
self.request = request
self.on_close = on_close
self.elapsed = datetime.timedelta(0) if elapsed is None else elapsed
self.call_next: typing.Optional[typing.Callable] = None
@property
def reason_phrase(self) -> str:
return StatusCode.get_reason_phrase(self.status_code)
@property
def url(self) -> typing.Optional[URL]:
"""
Returns the URL for which the request was made.
Requires that `request` was provided when instantiating the response.
"""
return None if self.request is None else self.request.url
@property
def content(self) -> bytes:
if not hasattr(self, "_content"):
if hasattr(self, "_raw_content"):
raw_content = self._raw_content # type: ignore
content = self.decoder.decode(raw_content)
content += self.decoder.flush()
self._content = content
else:
raise ResponseNotRead()
return self._content
@property
def text(self) -> str:
if not hasattr(self, "_text"):
content = self.content
if not content:
self._text = ""
else:
encoding = self.encoding
self._text = content.decode(encoding, errors="replace")
return self._text
@property
def encoding(self) -> str:
if not hasattr(self, "_encoding"):
encoding = self.charset_encoding
if encoding is None or not is_known_encoding(encoding):
encoding = self.apparent_encoding
if encoding is None or not is_known_encoding(encoding):
encoding = "utf-8"
self._encoding = encoding
return self._encoding
@encoding.setter
def encoding(self, value: str) -> None:
self._encoding = value
@property
def charset_encoding(self) -> typing.Optional[str]:
"""
Return the encoding, as specified by the Content-Type header.
"""
content_type = self.headers.get("Content-Type")
if content_type is None:
return None
parsed = cgi.parse_header(content_type)
media_type, params = parsed[0], parsed[-1]
if "charset" in params:
return params["charset"].strip("'\"")
# RFC 2616 specifies that 'iso-8859-1' should be used as the default
# for 'text/*' media types, if no charset is provided.
# See: https://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
if media_type.startswith("text/"):
return "iso-8859-1"
return None
@property
def apparent_encoding(self) -> typing.Optional[str]:
"""
Return the encoding, as it appears to autodetection.
"""
return chardet.detect(self.content)["encoding"]
@property
def decoder(self) -> Decoder:
"""
Returns a decoder instance which can be used to decode the raw byte
content, depending on the Content-Encoding used in the response.
"""
if not hasattr(self, "_decoder"):
decoders: typing.List[Decoder] = []
values = self.headers.getlist("content-encoding", split_commas=True)
for value in values:
value = value.strip().lower()
try:
decoder_cls = SUPPORTED_DECODERS[value]
decoders.append(decoder_cls())
except KeyError:
continue
if len(decoders) == 1:
self._decoder = decoders[0]
elif len(decoders) > 1:
self._decoder = MultiDecoder(decoders)
else:
self._decoder = IdentityDecoder()
return self._decoder
@property
def is_redirect(self) -> bool:
return StatusCode.is_redirect(self.status_code) and "location" in self.headers
def raise_for_status(self) -> None:
"""
Raise the `HttpError` if one occurred.
"""
message = (
"{0.status_code} {error_type}: {0.reason_phrase} for url: {0.url}\n"
"For more information check: https://httpstatuses.com/{0.status_code}"
)
if StatusCode.is_client_error(self.status_code):
message = message.format(self, error_type="Client Error")
elif StatusCode.is_server_error(self.status_code):
message = message.format(self, error_type="Server Error")
else:
message = ""
if message:
raise HTTPError(message, response=self)
def json(self, **kwargs: typing.Any) -> typing.Union[dict, list]:
if self.charset_encoding is None and self.content and len(self.content) > 3:
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return jsonlib.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
pass
return jsonlib.loads(self.text, **kwargs)
@property
def cookies(self) -> "Cookies":
if not hasattr(self, "_cookies"):
assert self.request is not None
self._cookies = Cookies()
self._cookies.extract_cookies(self)
return self._cookies
@property
def links(self) -> typing.Dict[typing.Optional[str], typing.Dict[str, str]]:
"""
Returns the parsed header links of the response, if any
"""
header = self.headers.get("link")
ldict = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get("rel") or link.get("url")
ldict[key] = link
return ldict
def __repr__(self) -> str:
return f"<Response [{self.status_code} {self.reason_phrase}]>"
class AsyncResponse(BaseResponse):
def __init__(
self,
status_code: int,
*,
http_version: str = None,
headers: HeaderTypes = None,
content: AsyncResponseContent = None,
on_close: typing.Callable = None,
request: AsyncRequest = None,
history: typing.List["BaseResponse"] = None,
elapsed: datetime.timedelta = None,
):
super().__init__(
status_code=status_code,
http_version=http_version,
headers=headers,
request=request,
on_close=on_close,
elapsed=elapsed,
)
self.history = [] if history is None else list(history)
if content is None or isinstance(content, bytes):
self.is_closed = True
self.is_stream_consumed = True
self._raw_content = content or b""
else:
self.is_closed = False
self.is_stream_consumed = False
self._raw_stream = content
async def read(self) -> bytes:
"""
Read and return the response content.
"""
if not hasattr(self, "_content"):
self._content = b"".join([part async for part in self.stream()])
return self._content
async def stream(self) -> typing.AsyncIterator[bytes]:
"""
A byte-iterator over the decoded response content.
This allows us to handle gzip, deflate, and brotli encoded responses.
"""
if hasattr(self, "_content"):
yield self._content
else:
async for chunk in self.raw():
yield self.decoder.decode(chunk)
yield self.decoder.flush()
async def stream_text(self) -> typing.AsyncIterator[str]:
"""
A str-iterator over the decoded response content
that handles both gzip, deflate, etc but also detects the content's
string encoding.
"""
decoder = TextDecoder(encoding=self.charset_encoding)
async for chunk in self.stream():
yield decoder.decode(chunk)
yield decoder.flush()
async def raw(self) -> typing.AsyncIterator[bytes]:
"""
A byte-iterator over the raw response content.
"""
if hasattr(self, "_raw_content"):
yield self._raw_content
else:
if self.is_stream_consumed:
raise StreamConsumed()
if self.is_closed:
raise ResponseClosed()
self.is_stream_consumed = True
async for part in self._raw_stream:
yield part
await self.close()
async def next(self) -> "AsyncResponse":
"""
Get the next response from a redirect response.
"""
if not self.is_redirect:
raise NotRedirectResponse()
assert self.call_next is not None
return await self.call_next()
async def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
if not self.is_closed:
self.is_closed = True
if self.on_close is not None:
await self.on_close()
class Response(BaseResponse):
def __init__(
self,
status_code: int,
*,
http_version: str = None,
headers: HeaderTypes = None,
content: ResponseContent = None,
on_close: typing.Callable = None,
request: Request = None,
history: typing.List["BaseResponse"] = None,
elapsed: datetime.timedelta = None,
):
super().__init__(
status_code=status_code,
http_version=http_version,
headers=headers,
request=request,
on_close=on_close,
elapsed=elapsed,
)
self.history = [] if history is None else list(history)
if content is None or isinstance(content, bytes):
self.is_closed = True
self.is_stream_consumed = True
self._raw_content = content or b""
else:
self.is_closed = False
self.is_stream_consumed = False
self._raw_stream = content
def read(self) -> bytes:
"""
Read and return the response content.
"""
if not hasattr(self, "_content"):
self._content = b"".join([part for part in self.stream()])
return self._content
def stream(self) -> typing.Iterator[bytes]:
"""
A byte-iterator over the decoded response content.
This allows us to handle gzip, deflate, and brotli encoded responses.
"""
if hasattr(self, "_content"):
yield self._content
else:
for chunk in self.raw():
yield self.decoder.decode(chunk)
yield self.decoder.flush()
def stream_text(self) -> typing.Iterator[str]:
"""
A str-iterator over the decoded response content
that handles both gzip, deflate, etc but also detects the content's
string encoding.
"""
decoder = TextDecoder(encoding=self.charset_encoding)
for chunk in self.stream():
yield decoder.decode(chunk)
yield decoder.flush()
def raw(self) -> typing.Iterator[bytes]:
"""
A byte-iterator over the raw response content.
"""
if hasattr(self, "_raw_content"):
yield self._raw_content
else:
if self.is_stream_consumed:
raise StreamConsumed()
if self.is_closed:
raise ResponseClosed()
self.is_stream_consumed = True
for part in self._raw_stream:
yield part
self.close()
def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
if not self.is_closed:
self.is_closed = True
if self.on_close is not None:
self.on_close()
class Cookies(MutableMapping):
"""
HTTP Cookies, as a mutable mapping.
"""
def __init__(self, cookies: CookieTypes = None) -> None:
if cookies is None or isinstance(cookies, dict):
self.jar = CookieJar()
if isinstance(cookies, dict):
for key, value in cookies.items():
self.set(key, value)
elif isinstance(cookies, Cookies):
self.jar = CookieJar()
for cookie in cookies.jar:
self.jar.set_cookie(cookie)
else:
self.jar = cookies
def extract_cookies(self, response: BaseResponse) -> None:
"""
Loads any cookies based on the response `Set-Cookie` headers.
"""
assert response.request is not None
urlib_response = self._CookieCompatResponse(response)
urllib_request = self._CookieCompatRequest(response.request)
self.jar.extract_cookies(urlib_response, urllib_request) # type: ignore
def set_cookie_header(self, request: BaseRequest) -> None:
"""
Sets an appropriate 'Cookie:' HTTP header on the `Request`.
"""
urllib_request = self._CookieCompatRequest(request)
self.jar.add_cookie_header(urllib_request)
def set(self, name: str, value: str, domain: str = "", path: str = "/") -> None:
"""
Set a cookie value by name. May optionally include domain and path.
"""
kwargs = {
"version": 0,
"name": name,
"value": value,
"port": None,
"port_specified": False,
"domain": domain,
"domain_specified": bool(domain),
"domain_initial_dot": domain.startswith("."),
"path": path,
"path_specified": bool(path),
"secure": False,
"expires": None,
"discard": True,
"comment": None,
"comment_url": None,
"rest": {"HttpOnly": None},
"rfc2109": False,
}
cookie = Cookie(**kwargs) # type: ignore
self.jar.set_cookie(cookie)
def get( # type: ignore
self, name: str, default: str = None, domain: str = None, path: str = None
) -> typing.Optional[str]:
"""
Get a cookie by name. May optionally include domain and path
in order to specify exactly which cookie to retrieve.
"""
value = None
for cookie in self.jar:
if cookie.name == name:
if domain is None or cookie.domain == domain: # type: ignore
if path is None or cookie.path == path:
if value is not None:
message = f"Multiple cookies exist with name={name}"
raise CookieConflict(message)
value = cookie.value
if value is None:
return default
return value
def delete(self, name: str, domain: str = None, path: str = None) -> None:
"""
Delete a cookie by name. May optionally include domain and path
in order to specify exactly which cookie to delete.
"""
if domain is not None and path is not None:
return self.jar.clear(domain, path, name)
remove = []
for cookie in self.jar:
if cookie.name == name:
if domain is None or cookie.domain == domain: # type: ignore
if path is None or cookie.path == path:
remove.append(cookie)
for cookie in remove:
self.jar.clear(cookie.domain, cookie.path, cookie.name) # type: ignore
def clear(self, domain: str = None, path: str = None) -> None:
"""
Delete all cookies. Optionally include a domain and path in
order to only delete a subset of all the cookies.
"""
args = []
if domain is not None:
args.append(domain)
if path is not None:
assert domain is not None
args.append(path)
self.jar.clear(*args)
def update(self, cookies: CookieTypes = None) -> None: # type: ignore
cookies = Cookies(cookies)
for cookie in cookies.jar:
self.jar.set_cookie(cookie)
def __setitem__(self, name: str, value: str) -> None:
return self.set(name, value)
def __getitem__(self, name: str) -> str:
value = self.get(name)
if value is None:
raise KeyError(name)
return value
def __delitem__(self, name: str) -> None:
return self.delete(name)
def __len__(self) -> int:
return len(self.jar)
def __iter__(self) -> typing.Iterator[str]:
return (cookie.name for cookie in self.jar)
def __bool__(self) -> bool:
for _ in self.jar:
return True
return False
class _CookieCompatRequest(urllib.request.Request):
"""
Wraps a `Request` instance up in a compatibility interface suitable
for use with `CookieJar` operations.
"""
def __init__(self, request: BaseRequest) -> None:
super().__init__(
url=str(request.url),
headers=dict(request.headers),
method=request.method,
)
self.request = request
def add_unredirected_header(self, key: str, value: str) -> None:
super().add_unredirected_header(key, value)
self.request.headers[key] = value
class _CookieCompatResponse:
"""
Wraps a `Request` instance up in a compatibility interface suitable
for use with `CookieJar` operations.
"""
def __init__(self, response: BaseResponse):
self.response = response
def info(self) -> email.message.Message:
info = email.message.Message()
for key, value in self.response.headers.items():
info[key] = value
return info
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
import os
import copy
from subprocess import Popen
from CodeInterfaceBaseClass import CodeInterfaceBase
from MooseBasedAppInterface import MooseBasedApp
from RattlesnakeInterface import Rattlesnake
from RELAP7Interface import RELAP7
class MAMMOTHInterface(CodeInterfaceBase):
"""
This class is used to couple raven with MAMMOTH (A moose based application, can call Rattlesnake, Bison and Relap-7)
"""
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
CodeInterfaceBase.__init__(self)
self.MooseInterface = MooseBasedApp() #used to perturb MAMMOTH input files
self.MooseInterface.addDefaultExtension()
self.BisonInterface = MooseBasedApp() #used to perturb Bison input files
self.BisonInterface.addDefaultExtension()
self.RattlesnakeInterface = Rattlesnake() #used to perturb Rattlesnake and Yak input files
#FIXME Would like to use RELAP7() as interface, but Distributions block appears to be out of date when running Mammoth
#self.Relap7Interface = RELAP7() #used to perturb RELAP7 input files
self.Relap7Interface = MooseBasedApp()
self.Relap7Interface.addDefaultExtension()
def findInps(self,inputFiles):
"""
Locates the input files required by MAMMOTH
@ In, inputFiles, list, list of Files objects
@ Out, inputDict, dict, dictionary containing MAMMOTH required input files
"""
inputDict = {}
inputDict['MammothInput'] = []
inputDict['BisonInput'] = []
inputDict['RattlesnakeInput'] = []
inputDict['Relap7Input'] = []
inputDict['AncillaryInput'] = []
allowedDriverAppInput = ['bisoninput','rattlesnakeinput','relap7input']
for inputFile in inputFiles:
fileType = inputFile.getType()
if fileType.strip().lower().split('|')[0] == "mammothinput":
inputDict['MammothInput'].append(inputFile)
inputDict['DriverAppInput'] = fileType.strip().lower().split('|')[-1]
if fileType.strip().lower().split('|')[-1] == "bisoninput":
inputDict['BisonInput'].append(inputFile)
elif fileType.strip().lower().split('|')[-1] == "rattlesnakeinput" or \
fileType.strip().lower() == "yakxsinput" or \
fileType.strip().lower() == "yakxsaliasinput" or \
fileType.strip().lower() == "instantxsinput" or \
fileType.strip().lower() == "instantxsaliasinput":
inputDict['RattlesnakeInput'].append(inputFile)
elif fileType.strip().lower().split('|')[-1] == "relap7input":
inputDict['Relap7Input'].append(inputFile)
elif fileType.strip().lower() == "ancillaryinput":
inputDict['AncillaryInput'] = []
# Mammoth input is not found
if len(inputDict['MammothInput']) == 0:
errorMessage = 'No MAMMOTH input file specified! Please prepend "MAMMOTHInput|" to the driver App input \n'
errorMessage += 'file\'s type in the RAVEN input file.'
raise IOError(errorMessage)
# Multiple mammoth files are found
elif len(inputDict['MammothInput']) > 1:
raise IOError('Multiple MAMMOTH input files are provided! Please limit the number of input files to one.')
# Mammoth input found, but driverAppInput is not in the allowedDriverAppInput list
elif len(inputDict['MammothInput']) == 1 and inputDict['DriverAppInput'] not in allowedDriverAppInput:
errorMessage = 'A MAMMOTH input file was specified, but the driver app is not currently supported by this\n'
errorMessage += 'interface. The MAMMOTH input file can only be specified as one of the following types:'
for goodDriverAppInput in allowedDriverAppInput:
errorMessage += '\nMAMMOTHInput|' + goodDriverAppInput
raise IOError(errorMessage)
return inputDict
def generateCommand(self, inputFiles, executable, clargs=None, fargs=None):
"""
Generate a command to run Mammoth using an input with sampled variables
See base class. Collects all the clargs and the executable to produce the command-line call.
Returns tuple of commands and base file name for run.
Commands are a list of tuples, indicating parallel/serial and the execution command to use.
@ In, inputFiles, list, List of input files (length of the list depends on the number of inputs have
been added in the Step is running this code)
@ In, executable, string, executable name with absolute path (e.g. /home/path_to_executable/code.exe)
@ In, clargs, dict, optional, dictionary containing the command-line flags the user can specify in the input
(e.g. under the node < Code >< clargstype = 0 input0arg = 0 i0extension = 0 .inp0/ >< /Code >)
@ In, fargs, dict, optional, a dictionary containing the axuiliary input file variables the user can specify
in the input (e.g. under the node < Code >< fargstype = 0 input0arg = 0 aux0extension = 0 .aux0/ >< /Code >)
@ Out, returnCommand, tuple, tuple containing the generated command. returnCommand[0] is the command to run the
code (string), returnCommand[1] is the name of the output root
"""
inputDict = self.findInps(inputFiles)
mammothInput = inputDict['MammothInput']
mooseCommand, mooseOut = self.MooseInterface.generateCommand(mammothInput,executable,clargs,fargs)
returnCommand = mooseCommand, mooseOut
return returnCommand
def createNewInput(self, currentInputFiles, origInputFiles, samplerType, **Kwargs):
"""
Generates new perturbed input files for Mammoth and associated Moose based applications.
@ In, currentInputFiles, list, list of current input files
@ In, origInputFiles, list, list of the original input files
@ In, samplerType, string, Sampler type (e.g. MonteCarlo, Adaptive, etc. see manual Samplers section)
@ In, Kwargs, dict, dictionary of parameters. In this dictionary there is another dictionary called "SampledVars"
where RAVEN stores the variables that got sampled (e.g. Kwargs['SampledVars'] => {'var1':10,'var2':40})
@ Out, newInputFiles, list, list of new input files (modified or not)
"""
#split up sampledAars in Kwargs between Bison, Rattlesnake and Relap-7
bisonArgs = copy.deepcopy(Kwargs)
bisonArgs['SampledVars'] = {}
perturbBison = False
rattlesnakeArgs = copy.deepcopy(Kwargs)
rattlesnakeArgs['SampledVars'] = {}
perturbRattlesnake = False
relap7Args = copy.deepcopy(Kwargs)
relap7Args['SampledVars'] = {}
perturbRelap7 = False
foundAlias = False
for varName,varValue in Kwargs['SampledVars'].items():
# get the variable's full name
if len(varName.split('@')) == 2:
appName = varName.split('@')[0].lower()
baseVarName = varName.split('@')[-1]
elif len(varName.split('@')) == 1:
appName = None
baseVarName = varName
else:
errorMessage = 'Variable names passed to the MAMMOTH Code Interface must either\n'
errorMessage += 'specifiy to which App input they belong by prepending the App\'s name\n'
errorMessage += 'followed by "@" to the base variable\'s name or alias or have no App\n'
errorMessage += 'name to signify a passthrough variable. Please check that\n'
errorMessage += varName+'\n'
errorMessage += 'fits within this syntax specification.'
raise IOError(errorMessage)
# Identify which app's input the variable goes into and separate appArgs
if appName == 'bison':
bisonArgs['SampledVars'][baseVarName] = varValue
perturbBison = True
elif appName == 'rattlesnake':
rattlesnakeArgs['SampledVars'][baseVarName] = varValue
perturbRattlesnake = True
elif appName == 'relap7':
relap7Args['SampledVars'][baseVarName] = varValue
perturbRelap7 = True
elif appName == None:
# It's a dummy variable. Doesn't need to be added to any argument lists, just continue.
pass
else:
errorMessage = appName+' is not an App supported by the MAMMOTH Code Interface!\n'
errorMessage += 'Please specify a supported App in which to send \n'
errorMessage += baseVarName+'\n'
errorMessage += 'or add the desired App to the MAMMOTH Code Interface.'
raise IOError(errorMessage)
# Check if the user wants to perturb yak xs libraries
for inputFile in currentInputFiles:
fileType = inputFile.getType()
if fileType.strip().lower() == "yakxsaliasinput":
foundAlias = True
break
elif fileType.strip().lower() == "instantxsaliasinput":
foundAlias = True
break
inputDicts = self.findInps(currentInputFiles)
# Bison Interface
if perturbBison:
bisonInps = inputDicts['BisonInput']
bisonInTypes = []
for bisonIn in bisonInps:
bisonInTypes.append(bisonIn.getType().strip().lower().split('|')[-1])
if 'bisoninput' not in bisonInTypes:
errorMessage = 'Variable(s):\n'
for bisonVarName in bisonArgs['SampledVars'].keys():
errorMessage += bisonVarName + '\n'
errorMessage += 'are specified as Bison parameters, but no Bison input file is listed!'
raise IOError(errorMessage)
elif bisonInTypes.count('bisoninput') > 1:
errorMessage = 'Multiple Bison input files specified! This interface currently only\n'
errorMessage += 'supports one input for each App utilized.'
raise IOError(errorMessage)
origBisonInps = origInputFiles[currentInputFiles.index(bisonInps[0])]
bisonInps = self.BisonInterface.createNewInput(bisonInps,[origBisonInps],samplerType,**bisonArgs)
# Rattlesnake Interface
if perturbRattlesnake or foundAlias:
rattlesnakeInps = inputDicts['RattlesnakeInput']
rattlesnakeInTypes = []
for rattlesnakeIn in rattlesnakeInps:
rattlesnakeInTypes.append(rattlesnakeIn.getType().strip().lower().split('|')[-1])
if 'rattlesnakeinput' not in rattlesnakeInTypes:
errorMessage = 'Variable(s):\n'
for rattlesnakeVarName in rattlesnakeArgs['SampledVars'].keys():
errorMessage += rattlesnakeVarName + '\n'
errorMessage += 'are specified as Rattlesnake parameters, but no Rattlesnake input file is listed!'
raise IOError(errorMessage)
elif rattlesnakeInTypes.count('rattlesnakeinput') > 1:
errorMessage = 'Multiple Rattlesnake input files specified! This interface currently only\n'
errorMessage += 'supports one input for each App utilized.'
raise IOError(errorMessage)
origRattlesnakeInps = origInputFiles[currentInputFiles.index(rattlesnakeInps[0])]
rattlesnakeInps = self.RattlesnakeInterface.createNewInput(rattlesnakeInps,
[origRattlesnakeInps],samplerType,**rattlesnakeArgs)
# Relap7 Interface
if perturbRelap7:
relap7Inps = inputDicts['Relap7Input']
relap7InTypes = []
for relap7In in relap7Inps:
relap7InTypes.append(relap7In.getType().strip().lower().split('|')[-1])
if 'relap7input' not in relap7InTypes:
errorMessage = 'Variable(s):\n'
for relap7VarName in relap7Args['SampledVars'].keys():
errorMessage += relap7VarName + '\n'
errorMessage += 'are specified as Relap7 parameters, but no Relap7 input file is listed!'
raise IOError(errorMessage)
elif relap7InTypes.count('relap7input') > 1:
errorMessage = 'Multiple Relap7 input files specified! This interface currently only\n'
errorMessage += 'supports one input for each App utilized.'
raise IOError(errorMessage)
origRelap7Inps = origInputFiles[currentInputFiles.index(relap7Inps[0])]
relap7Inps = self.Relap7Interface.createNewInput(relap7Inps,[origRelap7Inps],samplerType,**relap7Args)
return currentInputFiles
def finalizeCodeOutput(self, command, output, workingDir):
"""
this method is called by the RAVEN code at the end of each run (if the method is present).
Cleans up files in the working directory that are not needed after the run
@ In, command, string, the command used to run the just ended job
@ In, output, string, the Output name root
@ In, workingDir, string, current working dir
@ Out, output, string, optional, present in case the root of the output file gets changed in this method (in this case None)
"""
#may need to implement this method, such as remove unused files, ...
pass
|
#Explicit type conversion from int to float
num1 = 10
num2 = 20
num3 = num1 + num2
print(num3)
print(type(num3))
num4 = float(num1 + num2)
print(num4)
print(type(num4))
#Explicit type conversion from float to int
num1 = 10.2
num2 = 20.6
num3 = (num1 + num2)
print(num3)
print(type(num3))
num4 = int(num1 + num2)
print(num4)
print(type(num4))
#Type Conversion between Numbers and Strings
priceIcecream = 25
priceBrownie = 45
totalPrice = priceIcecream + priceBrownie
print("The total is Rs." + str(totalPrice) )
|
from database import (
fix_ids,
ImageModel,
CategoryModel,
AnnotationModel,
DatasetModel,
TaskModel,
ExportModel
)
# import pycocotools.mask as mask
import numpy as np
import time
import json
import os
from celery import shared_task
from ..socket import create_socket
from mongoengine import Q
from config import Config
from pathlib import PurePath
def bbox2seg(bbox):
return [bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3],bbox[0],bbox[1]+bbox[3]]
@shared_task
def export_annotations(task_id, dataset_id, categories):
task = TaskModel.objects.get(id=task_id)
dataset = DatasetModel.objects.get(id=dataset_id)
task.update(status="PROGRESS")
socket = create_socket()
task.info("Beginning Export (COCO Format)")
db_categories = CategoryModel.objects(id__in=categories, deleted=False) \
.only(*CategoryModel.COCO_PROPERTIES)
db_images = ImageModel.objects(
deleted=False, dataset_id=dataset.id).only(
*ImageModel.COCO_PROPERTIES)
db_annotations = AnnotationModel.objects(
deleted=False, category_id__in=categories)
total_items = db_categories.count()
coco = {
'images': [],
'categories': [],
'annotations': []
}
total_items += db_images.count()
progress = 0
# iterate though all categoires and upsert
category_names = []
for category in fix_ids(db_categories):
if len(category.get('keypoint_labels', [])) > 0:
category['keypoints'] = category.pop('keypoint_labels', [])
category['skeleton'] = category.pop('keypoint_edges', [])
else:
if 'keypoint_edges' in category:
del category['keypoint_edges']
if 'keypoint_labels' in category:
del category['keypoint_labels']
task.info(f"Adding category: {category.get('name')}")
coco.get('categories').append(category)
category_names.append(category.get('name'))
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
total_annotations = db_annotations.count()
total_images = db_images.count()
for image in db_images:
image = fix_ids(image)
if Config.EXPORT_RELPATH and 'relpath' in image:
image['file_name'] = image['relpath']
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
annotations = db_annotations.filter(image_id=image.get('id'))\
.only(*AnnotationModel.COCO_PROPERTIES)
annotations = fix_ids(annotations)
if len(annotations) == 0:
continue
num_annotations = 0
for annotation in annotations:
has_keypoints = len(annotation.get('keypoints', [])) > 0
has_segmentation = len(annotation.get('segmentation', [])) > 0
if has_keypoints or has_segmentation:
if not has_keypoints:
if 'keypoints' in annotation:
del annotation['keypoints']
else:
arr = np.array(annotation.get('keypoints', []))
arr = arr[2::3]
annotation['num_keypoints'] = len(arr[arr > 0])
num_annotations += 1
coco.get('annotations').append(annotation)
'''
if num_annotations > 0:
image["num_annotations"]=num_annotations
image["annotated"]=True
'''
task.info(
f"Exporting {num_annotations} annotations for image {image.get('id')}")
coco.get('images').append(image)
task.info(
f"Done export {total_annotations} annotations and {total_images} images from {dataset.name}")
timestamp = time.time()
directory = f"{dataset.directory}.exports/"
file_path = f"{directory}coco-{timestamp}.json"
if not os.path.exists(directory):
os.makedirs(directory)
task.info(f"Writing export to file {file_path}")
with open(file_path, 'w') as fp:
json.dump(coco, fp)
task.info("Creating export object")
export = ExportModel(dataset_id=dataset.id, path=file_path, tags=[
"COCO", *category_names])
export.save()
task.set_progress(100, socket=socket)
@shared_task
def import_annotations(task_id, dataset_id, coco_json):
task = TaskModel.objects.get(id=task_id)
dataset = DatasetModel.objects.get(id=dataset_id)
# UR added relpath
directory = os.path.join(Config.DATASET_DIRECTORY, dataset.name)
task.update(status="PROGRESS")
socket = create_socket()
task.info("Beginning Import")
images = ImageModel.objects(dataset_id=dataset.id)
categories = CategoryModel.objects
coco_images = coco_json.get('images', [])
coco_annotations = coco_json.get('annotations', [])
coco_categories = coco_json.get('categories', [])
task.info(f"Importing {len(coco_categories)} categories, "
f"{len(coco_images)} images, and "
f"{len(coco_annotations)} annotations")
total_items = sum([
len(coco_categories),
len(coco_annotations),
len(coco_images)
])
progress = 0
task.info("===== Importing Categories =====")
# category id mapping ( file : database )
categories_id = {}
# Create any missing categories
for category in coco_categories:
category_name = category.get('name')
category_id = category.get('id')
category_model = categories.filter(name__iexact=category_name).first()
if category_model is None:
task.warning(
f"{category_name} category not found (creating a new one)")
new_category = CategoryModel(
name=category_name,
keypoint_edges=category.get('skeleton', []),
keypoint_labels=category.get('keypoints', [])
)
new_category.save()
category_model = new_category
dataset.categories.append(new_category.id)
task.info(f"{category_name} category found")
# map category ids
categories_id[category_id] = category_model.id
# update progress
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
dataset.update(set__categories=dataset.categories)
task.info("===== Loading Images =====")
# image id mapping ( file: database )
images_id = {}
categories_by_image = {}
# Find all images
for image in coco_images:
image_id = image.get('id')
image_filename = image.get('file_name')
# update progress
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
# UR added relpath
image_model = images.filter(relpath=image_filename).all()
if len(image_model) == 0:
task.warning(f"Could not find image {image_filename}")
continue
if len(image_model) > 1:
task.error(
f"Too many images found with the same file name: {image_filename}")
continue
task.info(f"Image {image_filename} found")
image_model = image_model[0]
images_id[image_id] = image_model
categories_by_image[image_id] = list()
task.info("===== Import Annotations =====")
for annotation in coco_annotations:
image_id = annotation.get('image_id')
category_id = annotation.get('category_id')
segmentation = annotation.get('segmentation', [])
keypoints = annotation.get('keypoints', [])
# is_crowd = annotation.get('iscrowed', False)
area = annotation.get('area', 0)
bbox = annotation.get('bbox', [0, 0, 0, 0])
isbbox = annotation.get('isbbox', False)
progress += 1
task.set_progress((progress / total_items) * 100, socket=socket)
has_segmentation = (len(segmentation) > 0 or isbbox) and sum(bbox) > 1
has_keypoints = len(keypoints) > 0
if not has_segmentation and not has_keypoints:
task.warning(
f"Annotation {annotation.get('id')} has no segmentation, bbox or keypoints")
continue
try:
image_model = images_id[image_id]
category_model_id = categories_id[category_id]
image_categories = categories_by_image[image_id]
except KeyError:
task.warning(
f"Could not find image assoicated with annotation {annotation.get('id')}")
continue
annotation_model = AnnotationModel.objects(
image_id=image_model.id,
category_id=category_model_id,
segmentation=segmentation,
keypoints=keypoints,
bbox = bbox
).first()
if annotation_model is None:
task.info(f"Creating annotation data ({image_id}, {category_id})")
annotation_model = AnnotationModel(image_id=image_model.id)
annotation_model.category_id = category_model_id
annotation_model.color = annotation.get('color')
annotation_model.metadata = annotation.get('metadata', {})
if has_segmentation:
if len(segmentation) < 1 or len(segmentation[0]) < 1: ## we have an empty segment with a bbox
task.info(f"Creating segment from bbox {bbox}")
segmentation = [bbox2seg(bbox)]
isbbox = True
annotation_model.segmentation = segmentation
annotation_model.area = area
annotation_model.bbox = bbox
if has_keypoints:
annotation_model.keypoints = keypoints
annotation_model.isbbox = isbbox
annotation_model.save()
image_categories.append(category_id)
else:
annotation_model.update(deleted=False, isbbox=isbbox)
task.info(
f"Annotation already exists (i:{image_id}, c:{category_id})")
for image_id in images_id:
image_model = images_id[image_id]
category_ids = categories_by_image[image_id]
all_category_ids = list(image_model.category_ids)
all_category_ids += category_ids
num_annotations = AnnotationModel.objects(
Q(image_id=image_id) & Q(deleted=False) &
(Q(area__gt=0) | Q(keypoints__size__gt=0))
).count()
image_model.update(
set__annotated=True,
set__category_ids=list(set(all_category_ids)),
set__num_annotations=num_annotations
)
task.set_progress(100, socket=socket)
__all__ = ["export_annotations", "import_annotations"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.