text
stringlengths 2
999k
|
|---|
"""
Created on Fri Oct 29 18:54:18 2021
@author: Krishna Nuthalapati
"""
import numpy as np
def iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou_score = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou_score
def nms(boxes, scores, thresh):
num_boxes = boxes.shape[0]
indices = np.zeros((num_boxes), dtype=int)
# print("PRINTING : ", num_boxes)
for i in range(num_boxes):
if indices[i] == -1:
continue
for j in range(i+1, num_boxes):
if indices[j] == -1:
continue
base_box = boxes[i]
curr_box = boxes[j]
iou_score = iou(base_box, curr_box)
if iou_score >= thresh:
if scores[i]>scores[j]:
indices[i] = 1
indices[j] = -1
continue
indices[j] = 1
indices[i] = -1
idxs = np.where(indices == 1)[0]
return idxs
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from future.utils import viewvalues
from collections import defaultdict
import logging
import time
logger = logging.getLogger(__name__)
def index_list():
return defaultdict(list)
class Blocker:
'''Takes in a record and returns all blocks that record belongs to'''
def __init__(self, predicates):
self.predicates = predicates
self.index_fields = defaultdict(index_list)
self.index_predicates = []
for full_predicate in predicates:
for predicate in full_predicate:
if hasattr(predicate, 'index'):
self.index_fields[predicate.field][predicate.type].append(
predicate)
self.index_predicates.append(predicate)
def __call__(self, records, target=False):
start_time = time.clock()
predicates = [(':' + str(i), predicate)
for i, predicate
in enumerate(self.predicates)]
for i, record in enumerate(records):
record_id, instance = record
for pred_id, predicate in predicates:
block_keys = predicate(instance, target=target)
for block_key in block_keys:
yield block_key + pred_id, record_id
if i and i % 10000 == 0:
logger.info('%(iteration)d, %(elapsed)f2 seconds',
{'iteration': i,
'elapsed': time.clock() - start_time})
def resetIndices(self):
# clear canopies to reduce memory usage
for predicate in self.index_predicates:
predicate.reset()
def index(self, data, field):
'''Creates TF/IDF index of a given set of data'''
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.index(preprocess(doc))
for index_type, index, _ in indices:
index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
def unindex(self, data, field):
'''Remove index of a given set of data'''
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.unindex(preprocess(doc))
for index_type, index, _ in indices:
index._index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
def indexAll(self, data_d):
for field in self.index_fields:
unique_fields = {record[field]
for record
in viewvalues(data_d)
if record[field]}
self.index(unique_fields, field)
def extractIndices(index_fields):
indices = []
for index_type, predicates in index_fields.items():
predicate = predicates[0]
index = predicate.index
preprocess = predicate.preprocess
if predicate.index is None:
index = predicate.initIndex()
indices.append((index_type, index, preprocess))
return indices
|
#encoding=utf8
'''
Detection with SSD
In this example, we will load a SSD model and use it to detect objects.
'''
import os
import sys
import argparse
import numpy as np
from PIL import Image, ImageDraw
# Make sure that caffe is on the python path:
caffe_root = './'
os.chdir(caffe_root)
sys.path.insert(0, os.path.join(caffe_root, 'python'))
import caffe
from google.protobuf import text_format
from caffe.proto import caffe_pb2
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in range(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
class CaffeDetection:
def __init__(self, gpu_id, model_def, model_weights, image_resize, labelmap_file):
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
self.image_resize = image_resize
# Load the net in the test phase for inference, and configure input preprocessing.
self.net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.transformer.set_transpose('data', (2, 0, 1))
self.transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel
# the reference model operates on images in [0,255] range instead of [0,1]
self.transformer.set_raw_scale('data', 255)
# the reference model has channels in BGR order instead of RGB
self.transformer.set_channel_swap('data', (2, 1, 0))
# load PASCAL VOC labels
file = open(labelmap_file, 'r')
self.labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), self.labelmap)
def detect(self, image_file, conf_thresh=0.5, topn=5):
'''
SSD detection
'''
# set net to batch size of 1
# image_resize = 300
self.net.blobs['data'].reshape(1, 3, self.image_resize, self.image_resize)
image = caffe.io.load_image(image_file)
#Run the net and examine the top_k results
transformed_image = self.transformer.preprocess('data', image)
self.net.blobs['data'].data[...] = transformed_image
# Forward pass.
detections = self.net.forward()['detection_out']
# Parse the outputs.
det_label = detections[0,0,:,1]
det_conf = detections[0,0,:,2]
det_xmin = detections[0,0,:,3]
det_ymin = detections[0,0,:,4]
det_xmax = detections[0,0,:,5]
det_ymax = detections[0,0,:,6]
# Get detections with confidence higher than 0.6.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_thresh]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(self.labelmap, top_label_indices)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
result = []
for i in range(min(topn, top_conf.shape[0])):
xmin = top_xmin[i] # xmin = int(round(top_xmin[i] * image.shape[1]))
ymin = top_ymin[i] # ymin = int(round(top_ymin[i] * image.shape[0]))
xmax = top_xmax[i] # xmax = int(round(top_xmax[i] * image.shape[1]))
ymax = top_ymax[i] # ymax = int(round(top_ymax[i] * image.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = top_labels[i]
result.append([xmin, ymin, xmax, ymax, label, score, label_name])
return result
def main(args):
'''main '''
detection = CaffeDetection(args.gpu_id,
args.model_def, args.model_weights,
args.image_resize, args.labelmap_file)
result = detection.detect(args.image_file)
print result
img = Image.open(args.image_file)
draw = ImageDraw.Draw(img)
width, height = img.size
print width, height
for item in result:
xmin = int(round(item[0] * width))
ymin = int(round(item[1] * height))
xmax = int(round(item[2] * width))
ymax = int(round(item[3] * height))
draw.rectangle([xmin, ymin, xmax, ymax], outline=(255, 0, 0))
draw.text([xmin, ymin], item[-1] + str(item[-2]), (0, 0, 255))
print item
print [xmin, ymin, xmax, ymax]
print [xmin, ymin], item[-1]
img.save('detect_result.jpg')
def parse_args():
'''parse args'''
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=0, help='gpu id')
parser.add_argument('--labelmap_file',
default='data/VOC0712/labelmap_voc.prototxt')
parser.add_argument('--model_def',
default='models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt')
parser.add_argument('--image_resize', default=300, type=int)
parser.add_argument('--model_weights',
default='models/VGGNet/VOC0712/SSD_300x300/'
'VGG_VOC0712_SSD_300x300_iter_120000.caffemodel')
parser.add_argument('--image_file', default='examples/images/fish-bike.jpg')
return parser.parse_args()
if __name__ == '__main__':
main(parse_args())
|
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for Openapi Documentor."""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
|
import itertools
import dgl
import torch
from rdkit import Chem
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from fgh_gnn.utils import FGROUP_MOLS, get_ring_fragments, ogb_graph_to_mol
class FGroupHetGraphBuilder:
def __init__(self, vocab):
self.vocab = vocab
self.fgroup_vocab = vocab.loc[vocab['type'] == 'fgroup']
self.ring_vocab = vocab.loc[vocab['type'] == 'ring']
self.ring_smiles_set = set(self.ring_vocab['name'].unique())
self.misc_ring_idx = len(vocab) - 1
def build_fgroup_heterograph(self, raw_graph):
atom_feats = torch.from_numpy(raw_graph['node_feat'])
bond_feats = torch.from_numpy(raw_graph['edge_feat'])
a2a_edges = torch.from_numpy(raw_graph['edge_index'])
# build tree
mol = ogb_graph_to_mol(raw_graph)
clusters = self._make_clusters(mol)
cluster_feats = torch.tensor([c.features for c in clusters],
dtype=torch.long)
c2atom_edges, atom2c_edges = self._make_inter_edges(clusters)
c2c_edges, overlap_feats = \
self._make_intracluster_edges(raw_graph, clusters)
data_dict = {
('atom', 'bond', 'atom'): (a2a_edges[0], a2a_edges[1]),
('cluster', 'refine', 'atom'): (c2atom_edges[0], c2atom_edges[1]),
('atom', 'pool', 'cluster'): (atom2c_edges[0], atom2c_edges[1]),
('cluster', 'overlap', 'cluster'): (c2c_edges[0], c2c_edges[1])
}
num_nodes_dict = {
'atom': raw_graph['num_nodes'],
'cluster': len(clusters)
}
g = dgl.heterograph(data_dict=data_dict, num_nodes_dict=num_nodes_dict)
g.nodes['atom'].data['x'] = atom_feats
g.nodes['cluster'].data['x'] = cluster_feats
g.edges['bond'].data['x'] = bond_feats
g.edges['overlap'].data['x'] = overlap_feats
return g
def _make_clusters(self, mol):
clusters = []
# add all functional groups
for row in self.fgroup_vocab.itertuples():
row_idx = row.Index
fgroup_query = FGROUP_MOLS[row.name]
matches = mol.GetSubstructMatches(fgroup_query)
for match_idxs in matches:
clusters.append(Cluster(row_idx, 'fgroup', match_idxs))
# add all rings
for ring_idxs in get_ring_fragments(mol):
ring_smiles = Chem.MolFragmentToSmiles(mol, list(ring_idxs),
isomericSmiles=False,
kekuleSmiles=True)
if ring_smiles in self.ring_smiles_set:
row_idx = self.ring_vocab.index[self.ring_vocab['name']
== ring_smiles]
row_idx = int(row_idx[0])
else:
row_idx = self.misc_ring_idx
clusters.append(Cluster(row_idx, 'ring', ring_idxs))
# add all remaining singular atoms
leftover_atoms = set(range(mol.GetNumAtoms()))
for cluster in clusters:
leftover_atoms.difference_update(cluster.atom_idxs)
for atom_idx in leftover_atoms:
atomic_num = mol.GetAtomWithIdx(atom_idx).GetAtomicNum()
clusters.append(Cluster(atomic_num, 'atom', (atom_idx,)))
return clusters
def _make_inter_edges(self, clusters):
c2atom_edges = [[], []]
atom2c_edges = [[], []]
for cluster_idx, cluster in enumerate(clusters):
for atom_idx in cluster.atom_idxs:
c2atom_edges[0].append(cluster_idx)
c2atom_edges[1].append(atom_idx)
atom2c_edges[0].append(atom_idx)
atom2c_edges[1].append(cluster_idx)
c2atom_edges = torch.tensor(c2atom_edges, dtype=torch.long)
atom2c_edges = torch.tensor(atom2c_edges, dtype=torch.long)
return c2atom_edges, atom2c_edges
def _make_intracluster_edges(self, raw_graph, clusters):
edge_index = raw_graph['edge_index']
edge_dict = {i: set() for i in range(raw_graph['num_nodes'])}
for i, j in zip(edge_index[0], edge_index[1]):
edge_dict[i].add(j)
num_clusters = len(clusters)
adj_matrix = [[0] * num_clusters for _ in range(num_clusters)]
cluster_neighbours = []
for cluster in clusters:
neighbours = set()
for atom_idx in cluster.atom_idxs:
neighbours.add(atom_idx)
neighbours.update(edge_dict[atom_idx])
cluster_neighbours.append(neighbours)
for i, j in itertools.combinations(range(num_clusters), r=2):
ci, cj = clusters[i], clusters[j]
if ci.atom_idxs & cj.atom_idxs:
edge_weight = len(ci.atom_idxs & cj.atom_idxs) + 1
elif cluster_neighbours[i] & cluster_neighbours[j]:
edge_weight = 1
else:
continue
adj_matrix[i][j] = edge_weight
adj_matrix[j][i] = edge_weight
# build spanning tree
adj_matrix = csr_matrix(adj_matrix)
span_tree = minimum_spanning_tree(adj_matrix, overwrite=True)
adj_matrix = torch.from_numpy(span_tree.toarray()).long()
adj_matrix = to_bidirectional(adj_matrix)
# represent as sparse matrix
adj_matrix = adj_matrix.to_sparse().coalesce()
edge_index = adj_matrix.indices()
edge_feats = adj_matrix.values()
return edge_index, edge_feats
class Cluster:
def __init__(self, vocab_id, cluster_type, atom_idxs):
# for sanity
if not isinstance(vocab_id, int):
raise ValueError()
self.vocab_id = vocab_id
self.cluster_type_idx = ('fgroup', 'ring', 'atom').index(cluster_type)
self.atom_idxs = frozenset(atom_idxs)
self.features = [self.vocab_id, self.cluster_type_idx]
# Helper Method
def to_bidirectional(X):
X_T = X.t()
sym_sum = X + X_T
X_min = torch.min(X, X_T)
return torch.where(X_min > 0, X_min, sym_sum)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START translate_v3_get_supported_languages_for_target]
from google.cloud import translate
def get_supported_languages_with_target(project_id="YOUR_PROJECT_ID"):
"""Listing supported languages with target language name."""
client = translate.TranslationServiceClient()
parent = client.location_path(project_id, "global")
# Supported language codes: https://cloud.google.com/translate/docs/languages
response = client.get_supported_languages(
display_language_code="is", # target language code
parent=parent
)
# List language codes of supported languages
for language in response.languages:
print(u"Language Code: {}".format(language.language_code))
print(u"Display Name: {}".format(language.display_name))
# [END translate_v3_get_supported_languages_for_target]
|
from .api.config.application_config import ApplicationConfig
from .console_application import ConsoleApplication
from .config.default_application_config import DefaultApplicationConfig
__version__ = "0.2.4"
|
#!/bin/env python
#==========================================================================
# (c) 2004-2005 Total Phase, Inc.
#--------------------------------------------------------------------------
# Project : Aardvark Sample Code
# File : aamonitor_filtered.py
#--------------------------------------------------------------------------
# Perform I2C monitoring functions with the Aardvark I2C/SPI adapter with
# the ability to filter the data based on slave address.
#--------------------------------------------------------------------------
# Redistribution and use of this file in source and binary forms, with
# or without modification, are permitted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==========================================================================
#==========================================================================
# IMPORTS
#==========================================================================
import sys, time
from aardvark_py import *
#==========================================================================
# CONSTANTS
#==========================================================================
BUFFER_SIZE = 32767
TIMEFORMAT = "%Y-%m-%d %H:%M:%S"
#==========================================================================
# FUNCTIONS
#==========================================================================
def dump (handle, filter_addr, timeout):
# Wait for data on the bus
print "Waiting %d ms for first transaction..." % timeout
print " Filtering on 0x%03x" % filter_addr
result = aa_async_poll(handle, timeout)
if (result == AA_ASYNC_NO_DATA):
print " no data pending."
return
print " data received"
last_data0 = 0
last_data1 = 0
# Loop until aa_async_poll times out
while 1:
# Read the next monitor transaction.
# This function has an internal timeout (see datasheet), though
# since we have already checked for data using aa_async_poll,
# the timeout should never be exercised.
(status, data) = aa_i2c_monitor_read(handle, BUFFER_SIZE)
if (status < 0):
print "error: %s" % aa_status_string(status)
return
# The display flag indicates if the filtered address has been matched
# and the data should be displayed.
display = 0
# The display_buffer is used to hold the start condition because it
# is sent before the address is known, so the output needs to be
# cached to display later.
display_buffer = ""
for i in range(len(data)):
if (data[i] == AA_I2C_MONITOR_CMD_START):
# Generate a timestamp. This time stamp does not accurately
# reflect the actual time that the transaction occurred, but
# is generated to give the user a relative time for the
# transaction.
fmtstamp = time.strftime(TIMEFORMAT, time.localtime(time.time()))
# Cache the start condition
display_buffer = "\n%s : [S] " % fmtstamp
elif (data[i] == AA_I2C_MONITOR_CMD_STOP):
if display:
sys.stdout.write("[P]\n")
# After a stop condition, reset the display flag for
# next message
display = 0
else:
nack = (data[i] & AA_I2C_MONITOR_NACK)
if nack: nack_str = "*"
else: nack_str = ""
# 7-bit addresses
if (last_data0 == AA_I2C_MONITOR_CMD_START and
((data[i] & 0xf8) != 0xf0 or nack)):
# Test to see if 7-bit address matches
if ((data[i] & 0xff) >> 1 == filter_addr):
# If the address matches, the set display to 1
display = 1
# Write out the start condition
sys.stdout.write(display_buffer)
# And reset the buffer
display_buffer = ""
# Now process regularly
if (data[i] & 0x01): dir_str = "r"
else: dir_str = "w"
sys.stdout.write("<%02x:%s>%s " %
((data[i] & 0xff) >> 1,
dir_str,
nack_str
))
# 10-bit addresses
# See Philips specification for more details.
elif (last_data1 == AA_I2C_MONITOR_CMD_START and
(last_data0 & 0xf8) == 0xf0):
# Test to see if 10-bit address matches
if (((last_data0 << 7) & 0x300) | (data[i] & 0xff)) == filter_addr:
# If the address matches, the set display to 1
display = 1
# Write out the start condition
sys.stdout.write(display_buffer)
# Reset the buffer
display_buffer = ""
if (last_data0 & 0x01): dir_str = "r"
else: dir_str = "w"
sys.stdout.write("<%03x:%s>%s " %
(((last_data0 << 7) & 0x300) | (data[i] & 0xff),
dir_str,
nack_str
))
# Normal data
elif (last_data0 != AA_I2C_MONITOR_CMD_START):
if display:
sys.stdout.write("%02x%s " % (data[i] & 0xff, nack_str))
last_data1 = last_data0
last_data0 = data[i]
sys.stdout.flush()
# print "\nWaiting %d ms for subsequent transaction..." % INTERVAL_TIMEOUT
# Use aa_async_poll to wait for the next transaction
result = aa_async_poll(handle, timeout)
if (result == AA_ASYNC_NO_DATA):
print " No more data pending."
break
#==========================================================================
# MAIN PROGRAM
#==========================================================================
if (len(sys.argv) < 4):
print "usage: aamonitor PORT ADDR TIMEOUT"
print " where:"
print " PORT is the Aardvark adapter port number"
print " ADDR is the slave address as an integer"
print " TIMEOUT is the timeout interval in ms"
sys.exit()
port = int(sys.argv[1])
filter_addr = int(sys.argv[2], 0)
timeout = int(sys.argv[3])
# Open the device
handle = aa_open(port)
if (handle <= 0):
print "Unable to open Aardvark device on port %d" % port
print "Error code = %d" % handle
sys.exit()
# Ensure that the I2C subsystem is enabled
aa_configure(handle, AA_CONFIG_SPI_I2C)
# Disable the I2C bus pullup resistors (2.2k resistors).
# This command is only effective on v2.0 hardware or greater.
# The pullup resistors on the v1.02 hardware are enabled by default.
aa_i2c_pullup(handle, AA_I2C_PULLUP_NONE)
# Disable the Aardvark adapter's power pins.
# This command is only effective on v2.0 hardware or greater.
# The power pins on the v1.02 hardware are not enabled by default.
aa_target_power(handle, AA_TARGET_POWER_NONE)
# Enable the monitor
result = aa_i2c_monitor_enable(handle)
if (result < 0):
print "error: %s\n" % aa_status_string(result)
sys.exit()
print "Enabled I2C monitor."
# Watch the I2C port
dump(handle, filter_addr, timeout)
# Disable the slave and close the device
aa_i2c_monitor_disable(handle)
aa_close(handle)
|
# -*- coding: utf-8 -*-
# Markdown parsers.
#
#
# Author: Moogen Tian <http://blog.galeo.me>
#
# Legal:
#
# This file is published under BSD License.
#
# And the code structure references:
#
# * pagewise (by ainm <ainm at gmx.com>, with personal public license)
#
# * mynt (by Andrew Fricke, the author of Hoep, with BSD license)
#
# please NOTICE that!
#
# Hoep only accepts and returns *unicode* objects in Python 2 and
# *str* objects in Python 3.
from __future__ import unicode_literals
import re
import sys
#
# Error handling.
#
class MDParserException(Exception):
pass
def error(message, *args):
"""
Raise a MDParserException with a given message.
"""
raise MDParserException(message % args)
def warning(message, *args):
"""
Just display a message to standard error.
"""
sys.stderr.write("WARNING: " + message % args)
def halt(message, *args):
"""
Display a message to standard error and stop the program.
"""
sys.stderr.write("FATAL: " + message % args)
sys.exit(1)
#
# Markup support.
#
# Tables with bootstrap
def tablestrap(content, class_=''):
if class_:
class_ = class_.split()
if isinstance(class_, list):
if 'table' not in class_:
class_ = ['table'] + class_
class_ = ' '.join(class_)
if class_:
class_ = 'class="%s"' % class_
return ''.join(['<table ', class_, '>\n',
content, '\n</table>'])
# Pygments.
HAVE_PYGMENTS = True
try:
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
except ImportError:
HAVE_PYGMENTS = False
def require_pygments():
"""
For error reporting when trying to use a markup language
with pygments, but pygments isn't installed.
"""
if not HAVE_PYGMENTS:
error("please, install Pygments <http://pygments.org/>.")
def hl_with_pygments(text, lang, fmt_options={}):
s = ''
formatter = HtmlFormatter(**fmt_options)
try:
lexer = get_lexer_by_name(lang, stripall=True)
except ValueError:
s = '<div class="highlight"><span class="err">'\
'Error: language "%s" is not supported</span></div>' % lang
lexer = get_lexer_by_name('text', stripall=True)
return ''.join([s, highlight(text, lexer, formatter)])
# Available renderers will add themselves to this hash.
# The key is the renderer name, the value is another hash
# with two keys/values, the renderer constructor/options.
MARKUP_RENDERERS = {}
def xlate_exts_flags(exts_flags_opts, parser_exts_flags):
actual_exts = 0
actual_flags = 0
exts = exts_flags_opts['extensions']
flags = exts_flags_opts['render_flags']
parser_exts = parser_exts_flags['extensions']
parser_flags = parser_exts_flags['render_flags']
if ('fenced_code' in exts) or ('tables' in exts):
require_pygments()
for ext in exts:
if ext in parser_exts:
actual_exts |= parser_exts[ext]
else:
warning("ignoring unknown extension: %s", str(ext))
for flag in flags:
if flag in parser_flags:
actual_flags |= parser_flags[flag]
else:
warning("ignoring unknown render flag: %s", str(flag))
return actual_exts, actual_flags
#
# Misaka.
#
HAVE_MISAKA = True
try:
import misaka
from misaka import HtmlRenderer
MISAKA_EXTS_FLAGS = {
'extensions': {
'tables': misaka.EXT_TABLES,
'fenced_code': misaka.EXT_FENCED_CODE,
'footnotes': misaka.EXT_FOOTNOTES,
'autolink': misaka.EXT_AUTOLINK,
'strikethrough': misaka.EXT_STRIKETHROUGH,
'underline': misaka.EXT_UNDERLINE,
'highlight': misaka.EXT_HIGHLIGHT,
'quote': misaka.EXT_QUOTE,
'superscript': misaka.EXT_SUPERSCRIPT,
'math': misaka.EXT_MATH,
'no_intra_emphasis': misaka.EXT_NO_INTRA_EMPHASIS,
'space_headers': misaka.EXT_SPACE_HEADERS,
'math_explicit': misaka.EXT_MATH_EXPLICIT,
'disable_indented_code': misaka.EXT_DISABLE_INDENTED_CODE
},
'render_flags': {
'skip_html': misaka.HTML_SKIP_HTML,
'escape': misaka.HTML_ESCAPE,
'hard_wrap': misaka.HTML_HARD_WRAP,
'use_xhtml': misaka.HTML_USE_XHTML,
}
}
class MisakaRenderer(HtmlRenderer):
def __init__(self, tbl_class='', fmt_options={}, *args, **kwargs):
super(MisakaRenderer, self).__init__(*args, **kwargs)
self.tbl_class = tbl_class
self.fmt_options = fmt_options
if HAVE_PYGMENTS:
def blockcode(self, text, lang):
return hl_with_pygments(text, lang, self.fmt_options)
def table(self, content):
return tablestrap(content, self.tbl_class)
def misaka_renderer(options, tbl_class='', fmt_options={}):
"""
Returns a function that can be used to transform Markdown to HTML
using Misaka, preconfigured with the given extensions/flags.
"""
Renderer = MisakaRenderer
used_exts, used_flags = xlate_exts_flags(options, MISAKA_EXTS_FLAGS)
return misaka.Markdown(Renderer(tbl_class, fmt_options, used_flags), used_exts)
MARKUP_RENDERERS['misaka'] = {
'renderer': misaka_renderer,
'options': ['extensions', 'render_flags'],
}
except ImportError:
HAVE_MISAKA = False
#
# hoep
#
HAVE_HOEP = True
try:
import hoep as h
HOEP_EXTS_FLAGS = {
'extensions': {
'autolink': h.EXT_AUTOLINK,
'disable_indented_code': h.EXT_DISABLE_INDENTED_CODE,
'fenced_code': h.EXT_FENCED_CODE,
'footnotes': h.EXT_FOOTNOTES,
'highlight': h.EXT_HIGHLIGHT,
'lax_spacing': h.EXT_LAX_SPACING,
'no_intra_emphasis': h.EXT_NO_INTRA_EMPHASIS,
'quote': h.EXT_QUOTE,
'space_headers': h.EXT_SPACE_HEADERS,
'strikethrough': h.EXT_STRIKETHROUGH,
'superscript': h.EXT_SUPERSCRIPT,
'tables': h.EXT_TABLES,
'underline': h.EXT_UNDERLINE
},
'render_flags': {
'escape': h.HTML_ESCAPE,
'expand_tabs': h.HTML_EXPAND_TABS,
'hard_wrap': h.HTML_HARD_WRAP,
'safelink': h.HTML_SAFELINK,
'skip_html': h.HTML_SKIP_HTML,
'skip_images': h.HTML_SKIP_IMAGES,
'skip_links': h.HTML_SKIP_LINKS,
'skip_style': h.HTML_SKIP_STYLE,
'smartypants': h.HTML_SMARTYPANTS,
'toc': h.HTML_TOC,
'use_xhtml': h.HTML_USE_XHTML
}
}
class HoepRenderer(h.Hoep):
def __init__(self, extensions=0, render_flags=0, tbl_class='',
fmt_options={}):
super(HoepRenderer, self).__init__(extensions, render_flags)
self._toc_ids = {}
self._toc_patterns = (
(r'<[^<]+?>', ''),
(r'[^a-z0-9_.\s-]', ''),
(r'\s+', '-'),
(r'^[^a-z]+', ''),
(r'^$', 'section')
)
self.tbl_class = tbl_class
self.fmt_options = fmt_options
if HAVE_PYGMENTS:
def block_code(self, text, lang):
"""Highlight code with pygments.
"""
return hl_with_pygments(text, lang, self.fmt_options)
def table(self, header, body):
content = header + body
return tablestrap(content, self.tbl_class)
def header(self, text, level):
if self.render_flags & h.HTML_TOC:
identifier = text.lower()
for pattern, replace in self._toc_patterns:
identifier = re.sub(pattern, replace, identifier)
if identifier in self._toc_ids:
self._toc_ids[identifier] += 1
identifier = '{0}-{1}'.format(identifier, self._toc_ids[identifier])
else:
self._toc_ids[identifier] = 1
return ('<h{0} id="{1}">{2}'
'<a class="headerlink" href="#{1}" title="Link to header title.">¶</a>'
'</h{0}>').format(level, identifier, text)
else:
return '<h{0}>{1}</h{0}>'.format(level, text)
def preprocess(self, markdown):
self._toc_ids.clear()
return markdown
def hoep_renderer(options, **kwargs):
"""
Returns a function that can be used to transform Markdown to HTML
using Hoep, preconfigured with the given extensions/flags.
"""
used_exts, used_flags = xlate_exts_flags(options, HOEP_EXTS_FLAGS)
return HoepRenderer(used_exts, used_flags, **kwargs).render
MARKUP_RENDERERS['hoep'] = {
'renderer': hoep_renderer,
'options': ['extensions', 'render_flags']
}
except ImportError:
HAVE_HOEP = False
class MarkupProvider(object):
def __init__(self, markup, options):
"""
Arguments:
- `markup`: str, 'misaka' | 'hoep'.
- `options`: dict, has the keys: 'extensions' and 'render_flags'.
"""
if markup not in MARKUP_RENDERERS:
error("Unavailable markup renderer: %s", markup)
self.markup = markup
if ('extensions' not in options) and ('render_flags' not in options):
error("Key error in options, must contain 'extensions' and 'render_flags'.")
self.options = options
def _get_option(self, option, markup_options={}):
"""
Lookup 'option' in 'markup_options' (a dict)
but fall back to default option if unbound.
"""
if markup_options and (option in markup_options):
return markup_options[option]
else:
return self.options[option]
def get_renderer(self, markup_options={}, **kwargs):
"""
Will return a function to render the item content
based on the options specified in it. All unspecified
options will be taken from the base configuration.
"""
options = {}
for option in MARKUP_RENDERERS[self.markup]['options']:
options[option] = self._get_option(option, markup_options)
return MARKUP_RENDERERS[self.markup]['renderer'](options, **kwargs)
|
import unittest
from moment import moment
class TestIsSameOrBefore(unittest.TestCase):
def test_default(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertTrue(a.isSameOrBefore([2021, 5, 1]))
self.assertFalse(a.isSameOrBefore(b))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertTrue(a.isSameOrBefore('2021-04-22 04:02:09.957000 +0800'))
self.assertFalse(a.isSameOrBefore(b))
def test_year(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'year'))
self.assertTrue(a.isSameOrBefore(b, 'year', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-1-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'year'))
self.assertTrue(a.isSameOrBefore(b, 'year', True))
def test_month(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'month'))
self.assertTrue(a.isSameOrBefore(b, 'month', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'month'))
self.assertTrue(a.isSameOrBefore(b, 'month', True))
def test_quarter(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-5-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'quarter'))
self.assertTrue(a.isSameOrBefore(b, 'quarter', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'quarter'))
self.assertTrue(a.isSameOrBefore(b, 'quarter', True))
def test_week(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-21 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'week'))
self.assertTrue(a.isSameOrBefore(b, 'week', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-18 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'week'))
self.assertTrue(a.isSameOrBefore(b, 'week', True))
def test_isoWeek(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-21 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'isoWeek'))
self.assertTrue(a.isSameOrBefore(b, 'isoWeek', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-19 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'isoWeek'))
self.assertTrue(a.isSameOrBefore(b, 'isoWeek', True))
def test_day(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'day'))
self.assertTrue(a.isSameOrBefore(b, 'day', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'day'))
self.assertTrue(a.isSameOrBefore(b, 'day', True))
def test_date(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'date'))
self.assertTrue(a.isSameOrBefore(b, 'date', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'date'))
self.assertTrue(a.isSameOrBefore(b, 'date', True))
def test_hour(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:12:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'hour'))
self.assertTrue(a.isSameOrBefore(b, 'hour', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'hour'))
self.assertTrue(a.isSameOrBefore(b, 'hour', True))
def test_minute(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:39.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'minute'))
self.assertTrue(a.isSameOrBefore(b, 'minute', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'minute'))
self.assertTrue(a.isSameOrBefore(b, 'minute', True))
def test_second(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:9.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'second'))
self.assertTrue(a.isSameOrBefore(b, 'second', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:9.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'second'))
self.assertTrue(a.isSameOrBefore(b, 'second', True))
if __name__ == '__main__':
unittest.main()
|
"""
Version Sofware: 0.0.0
Version Python: 3.7
"""
|
import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import tensorflow as tf
class Optimizer:
def __init__(self, lr=1e-3):
self._lr = lr
self._opt = None
@property
def name(self):
return str(self)
def minimize(self, x, *args, **kwargs):
return self._opt.minimize(x, *args, **kwargs)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
class MBGD(Optimizer):
def __init__(self, lr=1e-3):
Optimizer.__init__(self, lr)
self._opt = tf.train.GradientDescentOptimizer(self._lr)
class Momentum(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum)
class NAG(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum, use_nesterov=True)
class AdaDelta(Optimizer):
def __init__(self, lr=1e-3, rho=0.95, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdadeltaOptimizer(self._lr, rho, eps)
class AdaGrad(Optimizer):
def __init__(self, lr=1e-3, init=0.1):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdagradOptimizer(self._lr, init)
class Adam(Optimizer):
def __init__(self, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdamOptimizer(self._lr, beta1, beta2, eps)
class RMSProp(Optimizer):
def __init__(self, lr=1e-3, decay=0.9, momentum=0.0, eps=1e-10):
Optimizer.__init__(self, lr)
self._opt = tf.train.RMSPropOptimizer(self._lr, decay, momentum, eps)
# Factory
class OptFactory:
available_optimizers = {
"MBGD": MBGD, "Momentum": Momentum, "NAG": NAG,
"AdaDelta": AdaDelta, "AdaGrad": AdaGrad,
"Adam": Adam, "RMSProp": RMSProp
}
def get_optimizer_by_name(self, name, lr, *args, **kwargs):
try:
optimizer = self.available_optimizers[name](lr, *args, **kwargs)
return optimizer
except KeyError:
raise NotImplementedError("Undefined Optimizer '{}' found".format(name))
|
import os.path
import sys
from setuptools import setup
with open("README.md", encoding="utf-8") as fh:
long_description = fh.read()
requirements = ["numpy"]
if sys.version_info[1] == 6:
requirements.append("dataclasses")
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, "opencv_wrapper", "__version__.py"), "r") as f:
exec(f.read(), about)
setup(
name=about["__title__"],
version=about["__version__"],
author=about["__author__"],
author_email=about["__author_email__"],
description=about["__description__"],
license=about["__license__"],
long_description=long_description,
long_description_content_type="text/markdown",
url=about["__url__"],
packages=["opencv_wrapper"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Image Recognition",
"Typing :: Typed",
],
keywords="OpenCV",
install_requires=requirements,
python_requires=">=3.6",
)
|
# Copyright (c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# importing required modules
import sys
from xml.etree import ElementTree as ET
import subprocess
import wget
import logging
import inspect
import os
import shutil
import pymysql
import sqlparse
import glob
import ast
import stat
import re
from pathlib import Path
import urllib.request as urllib2
from xml.dom import minidom
import configure_product as cp
from subprocess import Popen, PIPE
from const import TEST_PLAN_PROPERTY_FILE_NAME, INFRA_PROPERTY_FILE_NAME, LOG_FILE_NAME, DB_META_DATA, \
PRODUCT_STORAGE_DIR_NAME, DEFAULT_DB_USERNAME, LOG_STORAGE, TESTNG_DIST_XML_PATH, TESTNG_SERVER_MGT_DIST, LOG_FILE_PATHS, DIST_POM_PATH, NS, ZIP_FILE_EXTENSION, DATABASE_NAME
git_repo_url = None
git_branch = None
os_type = None
workspace = None
dist_name = None
dist_zip_name = None
product_id = None
log_file_name = None
target_path = None
db_engine = None
db_engine_version = None
latest_product_release_api = None
latest_product_build_artifacts_api = None
sql_driver_location = None
db_host = None
db_port = None
db_username = None
db_password = None
tag_name = None
test_mode = None
wum_product_version = None
use_custom_testng_file = None
database_config = {}
def read_proprty_files():
global db_engine
global db_engine_version
global git_repo_url
global git_branch
global latest_product_release_api
global latest_product_build_artifacts_api
global sql_driver_location
global db_host
global db_port
global db_username
global db_password
global workspace
global product_id
global database_config
global wum_product_version
global test_mode
global use_custom_testng_file
workspace = os.getcwd()
property_file_paths = []
test_plan_prop_path = Path(workspace + "/" + TEST_PLAN_PROPERTY_FILE_NAME)
infra_prop_path = Path(workspace + "/" + INFRA_PROPERTY_FILE_NAME)
if Path.exists(test_plan_prop_path) and Path.exists(infra_prop_path):
property_file_paths.append(test_plan_prop_path)
property_file_paths.append(infra_prop_path)
for path in property_file_paths:
with open(path, 'r') as filehandle:
for line in filehandle:
if line.startswith("#"):
continue
prop = line.split("=")
key = prop[0]
val = prop[1]
if key == "DBEngine":
db_engine = val.strip()
elif key == "DBEngineVersion":
db_engine_version = val
elif key == "PRODUCT_GIT_URL":
git_repo_url = val.strip().replace('\\', '')
product_id = git_repo_url.split("/")[-1].split('.')[0]
elif key == "PRODUCT_GIT_BRANCH":
git_branch = val.strip()
elif key == "LATEST_PRODUCT_RELEASE_API":
latest_product_release_api = val.strip().replace('\\', '')
elif key == "LATEST_PRODUCT_BUILD_ARTIFACTS_API":
latest_product_build_artifacts_api = val.strip().replace('\\', '')
elif key == "SQL_DRIVERS_LOCATION_UNIX" and not sys.platform.startswith('win'):
sql_driver_location = val.strip()
elif key == "SQL_DRIVERS_LOCATION_WINDOWS" and sys.platform.startswith('win'):
sql_driver_location = val.strip()
elif key == "DatabaseHost":
db_host = val.strip()
elif key == "DatabasePort":
db_port = val.strip()
elif key == "DBUsername":
db_username = val.strip()
elif key == "DBPassword":
db_password = val.strip()
elif key == "TEST_MODE":
test_mode = val.strip()
elif key == "WUM_PRODUCT_VERSION":
wum_product_version = val.strip()
elif key == "USE_CUSTOM_TESTNG":
use_custom_testng_file = val.strip()
else:
raise Exception("Test Plan Property file or Infra Property file is not in the workspace: " + workspace)
def validate_property_readings():
missing_values = ""
if db_engine is None:
missing_values += " -DBEngine- "
if git_repo_url is None:
missing_values += " -PRODUCT_GIT_URL- "
if product_id is None:
missing_values += " -product-id- "
if git_branch is None:
missing_values += " -PRODUCT_GIT_BRANCH- "
if latest_product_release_api is None:
missing_values += " -LATEST_PRODUCT_RELEASE_API- "
if latest_product_build_artifacts_api is None:
missing_values += " -LATEST_PRODUCT_BUILD_ARTIFACTS_API- "
if sql_driver_location is None:
missing_values += " -SQL_DRIVERS_LOCATION_<OS_Type>- "
if db_host is None:
missing_values += " -DatabaseHost- "
if db_port is None:
missing_values += " -DatabasePort- "
if db_password is None:
missing_values += " -DBPassword- "
if test_mode is None:
missing_values += " -TEST_MODE- "
if wum_product_version is None:
missing_values += " -WUM_PRODUCT_VERSION- "
if use_custom_testng_file is None:
missing_values += " -USE_CUSTOM_TESTNG- "
if missing_values != "":
logger.error('Invalid property file is found. Missing values: %s ', missing_values)
return False
else:
return True
def get_db_meta_data(argument):
switcher = DB_META_DATA
return switcher.get(argument, False)
def construct_url(prefix):
url = prefix + db_host + ":" + db_port
return url
def function_logger(file_level, console_level=None):
global log_file_name
log_file_name = LOG_FILE_NAME
function_name = inspect.stack()[1][3]
logger = logging.getLogger(function_name)
# By default, logs all messages
logger.setLevel(logging.DEBUG)
if console_level != None:
# StreamHandler logs to console
ch = logging.StreamHandler()
ch.setLevel(console_level)
ch_format = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(ch_format)
logger.addHandler(ch)
# log in to a file
fh = logging.FileHandler("{0}.log".format(function_name))
fh.setLevel(file_level)
fh_format = logging.Formatter('%(asctime)s - %(lineno)d - %(levelname)-8s - %(message)s')
fh.setFormatter(fh_format)
logger.addHandler(fh)
return logger
def download_file(url, destination):
"""Download a file using wget package.
Download the given file in _url_ as the directory+name provided in _destination_
"""
wget.download(url, destination)
def get_db_hostname(url, db_type):
"""Retreive db hostname from jdbc url
"""
if db_type == 'ORACLE':
hostname = url.split(':')[3].replace("@", "")
else:
hostname = url.split(':')[2].replace("//", "")
return hostname
def run_sqlserver_commands(query):
"""Run SQL_SERVER commands using sqlcmd utility.
"""
subprocess.call(
['sqlcmd', '-S', db_host, '-U', database_config['user'], '-P', database_config['password'], '-Q', query])
def get_mysql_connection(db_name=None):
if db_name is not None:
conn = pymysql.connect(host=get_db_hostname(database_config['url'], 'MYSQL'), user=database_config['user'],
passwd=database_config['password'], db=db_name)
else:
conn = pymysql.connect(host=get_db_hostname(database_config['url'], 'MYSQL'), user=database_config['user'],
passwd=database_config['password'])
return conn
def run_mysql_commands(query):
"""Run mysql commands using mysql client when db name not provided.
"""
conn = get_mysql_connection()
conectr = conn.cursor()
conectr.execute(query)
conn.close()
def get_ora_user_carete_query(database):
query = "CREATE USER {0} IDENTIFIED BY {1};".format(
database, database_config["password"])
return query
def get_ora_grant_query(database):
query = "GRANT CONNECT, RESOURCE, DBA TO {0};".format(
database)
return query
def execute_oracle_command(query):
"""Run oracle commands using sqlplus client when db name(user) is not provided.
"""
connect_string = "{0}/{1}@//{2}/{3}".format(database_config["user"], database_config["password"],
db_host, "ORCL")
session = Popen(['sqlplus64', '-S', connect_string], stdin=PIPE, stdout=PIPE, stderr=PIPE)
session.stdin.write(bytes(query, 'utf-8'))
return session.communicate()
def create_oracle_user(database):
"""This method is able to create the user and grant permission to the created user in oracle
"""
user_creating_query = get_ora_user_carete_query(database)
print("User_creating query is: "+user_creating_query)
logger.info(execute_oracle_command(user_creating_query))
permission_granting_query = get_ora_grant_query(database)
return execute_oracle_command(permission_granting_query)
def run_oracle_script(script, database):
"""Run oracle commands using sqlplus client when dbname(user) is provided.
"""
connect_string = "{0}/{1}@//{2}/{3}".format(database, database_config["password"],
db_host, "ORCL")
session = Popen(['sqlplus', '-S', connect_string], stdin=PIPE, stdout=PIPE, stderr=PIPE)
session.stdin.write(bytes(script, 'utf-8'))
return session.communicate()
def run_sqlserver_script_file(db_name, script_path):
"""Run SQL_SERVER script file on a provided database.
"""
subprocess.call(
['sqlcmd', '-S', db_host, '-U', database_config["user"], '-P', database_config["password"], '-d', db_name, '-i',
script_path])
def run_mysql_script_file(db_name, script_path):
"""Run MYSQL db script file on a provided database.
"""
conn = get_mysql_connection(db_name)
connector = conn.cursor()
sql = open(script_path).read()
sql_parts = sqlparse.split(sql)
for sql_part in sql_parts:
if sql_part.strip() == '':
continue
connector.execute(sql_part)
conn.close()
def copy_file(source, target):
"""Copy the source file to the target.
"""
if sys.platform.startswith('win'):
source = cp.winapi_path(source)
target = cp.winapi_path(target)
shutil.copy(source, target)
else:
shutil.copy(source, target)
def get_dist_name():
"""Get the product name by reading distribution pom.
"""
global dist_name
global dist_zip_name
global product_version
dist_pom_path = Path(workspace + "/" + product_id + "/" + DIST_POM_PATH[product_id])
print(dist_pom_path)
if sys.platform.startswith('win'):
dist_pom_path = cp.winapi_path(dist_pom_path)
ET.register_namespace('', NS['d'])
artifact_tree = ET.parse(dist_pom_path)
artifact_root = artifact_tree.getroot()
parent = artifact_root.find('d:parent', NS)
artifact_id = artifact_root.find('d:artifactId', NS).text
print("ArtifactID" + artifact_id)
product_version = parent.find('d:version', NS).text
print("ProdVersion" + product_version)
dist_name = artifact_id + "-" + product_version
dist_zip_name = dist_name + ZIP_FILE_EXTENSION
return dist_name
def get_dist_name_wum():
global dist_name
global product_version
product_version=wum_product_version
os.chdir(PRODUCT_STORAGE_DIR_NAME)
name = glob.glob('*.zip')[0]
dist_name=os.path.splitext(name)[0]
logger.info("dist_name:" + dist_name)
return dist_name
def setup_databases(db_names):
"""Create required databases.
"""
base_path = Path(workspace + "/" + PRODUCT_STORAGE_DIR_NAME + "/" + dist_name + "/" + 'dbscripts')
print("Base path is: "+str(base_path))
engine = db_engine.upper()
print("Engine is: "+engine)
db_meta_data = get_db_meta_data(engine)
print("DB metadata is: "+str(db_meta_data))
if db_meta_data:
databases = db_meta_data["DB_SETUP"][product_id]
print("Databases is: "+str(databases))
if databases:
for db_name in db_names:
db_scripts = databases[db_name]
if len(db_scripts) == 0:
if engine == 'SQLSERVER-SE':
# create database for MsSQL
run_sqlserver_commands('CREATE DATABASE {0}'.format(db_name))
elif engine == 'MYSQL':
# create database for MySQL
run_mysql_commands('CREATE DATABASE IF NOT EXISTS {0};'.format(db_name))
elif engine == 'ORACLE-SE2':
# create database for Oracle
print("DB_Name is: "+db_name)
create_oracle_user(db_name)
else:
if engine == 'SQLSERVER-SE':
# create database for MsSQL
run_sqlserver_commands('CREATE DATABASE {0}'.format(db_name))
for db_script in db_scripts:
path = base_path / db_script
# run db scripts
run_sqlserver_script_file(db_name, str(path))
elif engine == 'MYSQL':
# create database for MySQL
run_mysql_commands('CREATE DATABASE IF NOT EXISTS {0};'.format(db_name))
# run db scripts
for db_script in db_scripts:
path = base_path / db_script
run_mysql_script_file(db_name, str(path))
elif engine == 'ORACLE-SE2':
# create oracle schema
create_oracle_user(db_name)
# run db script
for db_script in db_scripts:
path = base_path / db_script
run_oracle_script('@{0}'.format(str(path)), db_name)
logger.info('Database setting up is done.')
else:
raise Exception("Database setup configuration is not defined in the constant file")
else:
raise Exception("Database meta data is not defined in the constant file")
def construct_db_config():
"""Use properties which are get by reading property files and construct the database config object which will use
when configuring the databases.
"""
db_meta_data = get_db_meta_data(db_engine.upper())
if db_meta_data:
database_config["driver_class_name"] = db_meta_data["driverClassName"]
database_config["password"] = db_password
database_config["sql_driver_location"] = sql_driver_location + "/" + db_meta_data["jarName"]
database_config["url"] = construct_url(db_meta_data["prefix"])
database_config["db_engine"] = db_engine
if db_username is None:
database_config["user"] = DEFAULT_DB_USERNAME
else:
database_config["user"] = db_username
else:
raise BaseException(
"DB config parsing is failed. DB engine name in the property file doesn't match with the constant: " + str(
db_engine.upper()))
def build_module(module_path):
"""Build a given module.
"""
logger.info('Start building a module. Module: ' + str(module_path))
if sys.platform.startswith('win'):
subprocess.call(['mvn', 'clean', 'install', '-B',
'-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn'],
shell=True, cwd=module_path)
else:
subprocess.call(['mvn', 'clean', 'install', '-B',
'-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn'],
cwd=module_path)
logger.info('Module build is completed. Module: ' + str(module_path))
def save_log_files():
log_storage = Path(workspace + "/" + LOG_STORAGE)
if not Path.exists(log_storage):
Path(log_storage).mkdir(parents=True, exist_ok=True)
log_file_paths = LOG_FILE_PATHS[product_id]
if log_file_paths:
for file in log_file_paths:
absolute_file_path = Path(workspace + "/" + product_id + "/" + file)
if Path.exists(absolute_file_path):
copy_file(absolute_file_path, log_storage)
else:
logger.error("File doesn't contain in the given location: " + str(absolute_file_path))
def clone_repo():
"""Clone the product repo
"""
try:
subprocess.call(['git', 'clone', '--branch', git_branch, git_repo_url], cwd=workspace)
logger.info('product repository cloning is done.')
except Exception as e:
logger.error("Error occurred while cloning the product repo: ", exc_info=True)
def checkout_to_tag(name):
"""Checkout to the given tag
"""
try:
git_path = Path(workspace + "/" + product_id)
tag = "tags/" + name
subprocess.call(["git", "fetch", "origin", tag], cwd=git_path)
subprocess.call(["git", "checkout", "-B", tag, name], cwd=git_path)
logger.info('checkout to the branch: ' + tag)
except Exception as e:
logger.error("Error occurred while cloning the product repo and checkout to the latest tag of the branch",
exc_info=True)
def get_latest_tag_name(product):
"""Get the latest tag name from git location
"""
global tag_name
git_path = Path(workspace + "/" + product)
latest_rev = subprocess.Popen(["git", "rev-list", "--tags", "--max-count=1"], stdout=subprocess.PIPE, cwd=git_path)
binary_val_of_tag_name = subprocess.Popen(
["git", "describe", "--tags", latest_rev.stdout.read().strip().decode("utf-8")], stdout=subprocess.PIPE,
cwd=git_path)
tag_name = binary_val_of_tag_name.stdout.read().strip().decode("utf-8")
print(tag_name)
return tag_name
def get_product_file_path():
"""Get the absolute path of the distribution which is located in the storage directory
"""
# product download path and file name constructing
product_download_dir = Path(workspace + "/" + PRODUCT_STORAGE_DIR_NAME)
if not Path.exists(product_download_dir):
Path(product_download_dir).mkdir(parents=True, exist_ok=True)
return product_download_dir / dist_zip_name
def get_relative_path_of_dist_storage(xml_path):
"""Get the relative path of distribution storage
"""
print("xml_path is: "+xml_path)
dom = minidom.parse(urllib2.urlopen(xml_path)) # parse the data
artifact_elements = dom.getElementsByTagName('artifact')
for artifact in artifact_elements:
file_name_elements = artifact.getElementsByTagName("fileName")
for file_name in file_name_elements:
print("file_name.firstChild.nodeValue is: "+file_name.firstChild.nodeValue)
print("dist_zip_name: "+dist_zip_name)
#if file_name.firstChild.nodeValue == dist_zip_name:
if file_name.firstChild.nodeValue == file_name.firstChild.nodeValue:
parent_node = file_name.parentNode
print("disStorage:==" + parent_node.getElementsByTagName("relativePath")[0].firstChild.nodeValue)
return parent_node.getElementsByTagName("relativePath")[0].firstChild.nodeValue
return None
def get_latest_released_dist():
"""Get the latest released distribution
"""
# construct the distribution downloading url
relative_path = get_relative_path_of_dist_storage(latest_product_release_api + "xml")
print("relatine path is "+relative_path)
if relative_path is None:
raise Exception("Error occured while getting relative path")
dist_downl_url = latest_product_release_api.split('/api')[0] + "/artifact/" + relative_path
# download the last released pack from Jenkins
download_file(dist_downl_url, str(get_product_file_path()))
logger.info('downloading the latest released pack from Jenkins is completed.')
def get_latest_stable_artifacts_api():
"""Get the API of the latest stable artifacts
"""
dom = minidom.parse(urllib2.urlopen(latest_product_build_artifacts_api + "xml"))
main_artifact_elements = dom.getElementsByTagName('mainArtifact')
print("Main artifact elements: "+str(main_artifact_elements))
for main_artifact in main_artifact_elements:
canonical_name_elements = main_artifact.getElementsByTagName("canonicalName")
print("Canonical name: "+str(canonical_name_elements))
for canonical_name in canonical_name_elements:
print("canonical_name.firstChild.nodeValue is: "+canonical_name.firstChild.nodeValue)
print("dist_name is: "+dist_name)
if canonical_name.firstChild.nodeValue == dist_name + ".pom":
#if canonical_name.firstChild.nodeValue == dist_name + "-rc4-SNAPSHOT.pom":
parent_node = main_artifact.parentNode
print("printing msg "+parent_node.getElementsByTagName("url")[0].firstChild.nodeValue)
return parent_node.getElementsByTagName("url")[0].firstChild.nodeValue
return None
def get_latest_stable_dist():
"""Download the latest stable distribution
"""
build_num_artifact = get_latest_stable_artifacts_api()
print("buildnumArti: "+ str(build_num_artifact))
build_num_artifact = re.sub(r'http.//(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})', "https://wso2.org", build_num_artifact)
#print("buildnumArtiafterReSub:" + build_num_artifact)
if build_num_artifact is None:
raise Exception("Error occured while getting latest stable build artifact API path")
relative_path = get_relative_path_of_dist_storage(build_num_artifact + "api/xml")
print("relativePath:" + relative_path)
if relative_path is None:
raise Exception("Error occured while getting relative path")
dist_downl_url = build_num_artifact + "artifact/" + relative_path
print("dist_downl_url is: "+dist_downl_url)
download_file(dist_downl_url, str(get_product_file_path()))
logger.info('downloading the latest stable pack from Jenkins is completed.')
def create_output_property_fle():
"""Create output property file which is used when generating email
"""
output_property_file = open("output.properties", "w+")
if test_mode == "WUM":
logger.info("PRODUCT GIT URL: " + git_repo_url)
# temporally fix. Needs to be change.get the git url without username and the password
head, sep, tail = git_repo_url.partition('//')
uri=head
head, sep, tail = git_repo_url.partition('@')
urn=tail
git_url=uri+"//"+urn
git_url = git_url + "/tree/" + git_branch
logger.info("GIT URL: " + git_url)
output_property_file.write("GIT_LOCATION=%s\r\n" % git_url)
output_property_file.write("GIT_REVISION=%s\r\n" % git_branch)
else:
git_url = git_repo_url + "/tree/" + git_branch
output_property_file.write("GIT_LOCATION=%s\r\n" % git_url)
output_property_file.write("GIT_REVISION=%s\r\n" % tag_name)
output_property_file.close()
def replace_file(source, destination):
"""Replace source file to the destination
"""
logger.info('replacing files from:' + str(source) + "to: " + str(destination))
if sys.platform.startswith('win'):
source = cp.winapi_path(source)
destination = cp.winapi_path(destination)
shutil.move(source, destination)
def set_custom_testng():
if use_custom_testng_file == "TRUE":
testng_source = Path(workspace + "/" + "testng.xml")
testng_destination = Path(workspace + "/" + product_id + "/" + TESTNG_DIST_XML_PATH)
testng_server_mgt_source = Path(workspace + "/" + "testng-server-mgt.xml")
testng_server_mgt_destination = Path(workspace + "/" + product_id + "/" + TESTNG_SERVER_MGT_DIST)
# replace testng source
replace_file(testng_source, testng_destination)
# replace testng server mgt source
replace_file(testng_server_mgt_source, testng_server_mgt_destination)
def main():
try:
global logger
global dist_name
logger = function_logger(logging.DEBUG, logging.DEBUG)
if sys.version_info < (3, 6):
raise Exception(
"To run run-intg-test.py script you must have Python 3.6 or latest. Current version info: " + sys.version_info)
read_proprty_files()
if not validate_property_readings():
raise Exception(
"Property file doesn't have mandatory key-value pair. Please verify the content of the property file "
"and the format")
# construct database configuration
construct_db_config()
# clone the repository
clone_repo()
# set the custom testng.xml or the product testng.xml
set_custom_testng()
if test_mode == "WUM":
dist_name = get_dist_name_wum()
elif test_mode == "RELEASE":
checkout_to_tag(get_latest_tag_name(product_id))
dist_name = get_dist_name()
get_latest_released_dist()
elif test_mode == "SNAPSHOT":
dist_name = get_dist_name()
print("getDistNameMain: "+ dist_name)
get_latest_stable_dist()
db_names = cp.configure_product(dist_name, product_id, database_config, workspace, product_version)
print("DB names is: "+str(db_names))
if db_names is None or not db_names:
raise Exception("Failed the product configuring")
setup_databases(db_names)
intg_module_path = Path(workspace + "/" + product_id + "/" + 'modules/integration')
build_module(intg_module_path)
save_log_files()
create_output_property_fle()
except Exception as e:
logger.error("Error occurred while running the run-intg.py script", exc_info=True)
except BaseException as e:
logger.error("Error occurred while doing the configuration", exc_info=True)
if __name__ == "__main__":
main()
|
from datetime import datetime
import os
import pickle
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from mcmc_unlearner import sgmcmcUnlearner
import utils
import models
class myUnlearner(sgmcmcUnlearner):
def _apply_sample(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
lo = -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
self.optimizer.zero_grad()
lo.backward()
self.optimizer.step()
def _fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
def _z_fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return F.cross_entropy(self.model(x), y, reduction='sum')
def get_args():
parser = argparse.ArgumentParser()
utils.add_shared_args(parser)
parser.add_argument('--rm-idx-path', type=str, default=None)
parser.add_argument('--save-freq', type=int, default=-1)
return parser.parse_args()
def get_forget_idx(dataset, kill_num):
kill_val = 0
if 'targets' in vars(dataset).keys():
labels = np.array(dataset.targets)
elif 'labels' in vars(dataset).keys():
labels = np.array(dataset.labels)
else:
raise NotImplementedError
randidx = np.random.permutation( np.where(labels==kill_val)[0] )
return randidx[:kill_num]
def evaluate(model, loader, cpu):
''' average log predictive probability '''
loss = utils.AverageMeter()
acc = utils.AverageMeter()
n = len(loader.sampler.indices)
model.eval()
for x, y in loader:
if not cpu: x, y = x.cuda(), y.cuda()
with torch.no_grad():
_y = model(x)
lo = - model.log_prior() + F.cross_entropy(_y,y) * n
lo = lo.item()
ac = (_y.argmax(dim=1) == y).sum().item() / len(y)
loss.update(lo, len(y))
acc.update(ac, len(y))
return loss.average(), acc.average()
def forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log):
remain_train_loss, remain_train_acc = evaluate(model, train_loader, args.cpu)
forgetted_train_loss, forgetted_train_acc = evaluate(model, forgetted_train_loader, args.cpu)
test_loss, test_acc = evaluate(model, test_loader, args.cpu)
utils.add_log(log, 'remain_train_loss', remain_train_loss)
utils.add_log(log, 'remain_train_acc', remain_train_acc)
utils.add_log(log,'forgetted_train_loss', forgetted_train_loss)
utils.add_log(log,'forgetted_train_acc', forgetted_train_acc)
utils.add_log(log, 'test_loss', test_loss)
utils.add_log(log, 'test_acc', test_acc)
logger.info('remaining train loss {:.2e} \t train acc {:.2%}'
.format(remain_train_loss, remain_train_acc))
logger.info('forgetted train loss {:.2e} \t train acc {:.2%}'
.format(forgetted_train_loss, forgetted_train_acc))
logger.info('test loss {:.2e} \t test acc {:.2%}'
.format(test_loss, test_acc))
logger.info('')
def save_checkpoint(save_dir, save_name, log, model, optimizer):
with open('{}/{}-log.pkl'.format(save_dir, save_name), 'wb') as f:
pickle.dump(log, f)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, '{}/{}-model.pkl'.format(save_dir, save_name))
def main(args, logger):
''' retrieve lots of data '''
trainset, testset = utils.get_dataset(args.dataset)
if args.rm_idx_path is not None:
with open(args.rm_idx_path, 'rb') as f:
forgetted_idx = pickle.load(f)
else:
forgetted_idx = get_forget_idx(trainset, args.ifs_kill_num)
forgetted_idx_loader = utils.IndexBatchSampler(
batch_size=args.ifs_rm_bs, indices=forgetted_idx)
train_sampler = utils.DataSampler(trainset, args.batch_size)
train_loader = utils.DataLoader(trainset, args.batch_size)
train_loader.remove(forgetted_idx)
forgetted_train_loader = utils.DataLoader(trainset, args.batch_size)
forgetted_train_loader.set_sampler_indices(forgetted_idx)
test_loader = utils.DataLoader(testset, args.batch_size)
''' end of retrieving data '''
model = utils.get_mcmc_bnn_arch(args.arch, args.dataset, args.prior_sig)
if not args.cpu:
model.cuda()
args.lr /= len(trainset)
optimizer = utils.get_optim(model.parameters(), args.optim,
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, sghmc_alpha=args.sghmc_alpha)
model.n = len(train_sampler)
''' restore model / sampler '''
state_dict = torch.load(args.resume_path)
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
''' for backward compatibility '''
for group in optimizer.param_groups:
if 'lr_decay' in group:
group['lr'] *= group['lr_decay']
group.pop('lr_decay')
del state_dict
unlearner = myUnlearner(
model = model,
optimizer = optimizer,
params = model.parameters(),
cpu = args.cpu,
iter_T = args.ifs_iter_T,
scaling = args.ifs_scaling,
samp_T = args.ifs_samp_T,)
log = dict()
log['user_time'] = 0
utils.add_log(log, 'forgetted_idx', forgetted_idx)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
removed_nums = 0
freq_counter = 0
for ii in forgetted_idx_loader:
''' create forget-batch '''
xx, yy = [], []
for i in ii:
x, y = trainset[i]
if len(x.shape) == 3: x = x.reshape(1, *x.shape)
xx.append(x)
yy.append(y)
xx, yy = torch.cat(xx), torch.tensor(yy)
''' end '''
scaling = args.ifs_scaling / len(train_sampler)
unlearner.param_dict['scaling'] = scaling
''' start calculation of time '''
start_time = datetime.now()
unlearner.remove([xx,yy], train_sampler)
torch.cuda.synchronize()
end_time = datetime.now()
user_time = (end_time - start_time).total_seconds()
''' end calculation of time '''
log['user_time'] += user_time
train_sampler.remove(ii)
''' after removal, update the number of remaining datums '''
unlearner.model.n = len(train_sampler)
removed_nums += len(ii)
freq_counter += len(ii)
''' update mcmc sampler '''
for group in unlearner.optimizer.param_groups:
group['lr'] *= (len(train_sampler) + len(ii)) / len(train_sampler)
logger.info('remaining trainset size {}'.format(len(train_sampler)))
logger.info('user time {:.3f} sec \t'
'cumulated user time {:.3f} mins'
.format(user_time, log['user_time']/60) )
if (args.save_freq > 0) and (freq_counter >= args.save_freq):
freq_counter = 0
save_checkpoint(args.save_dir, '{}-ckpt-{}'.format(args.save_name, removed_nums), log, model, optimizer)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
save_checkpoint(args.save_dir, args.save_name, log, model, optimizer)
return
if __name__ == '__main__':
args = get_args()
logger = utils.generic_init(args)
try:
main(args, logger)
except Exception as e:
logger.exception('Unexpected exception! %s', e)
|
"""Arq-based queue worker lifecycle configuration."""
from __future__ import annotations
import uuid
from typing import Any, Dict
import httpx
import structlog
from safir.dependencies.db_session import db_session_dependency
from safir.logging import configure_logging
from timessquare.config import config
from timessquare.dependencies.redis import redis_dependency
from .functions import (
ping,
pull_request_sync,
repo_added,
repo_push,
repo_removed,
)
async def startup(ctx: Dict[Any, Any]) -> None:
"""Runs during working start-up to set up the worker context."""
configure_logging(
profile=config.profile,
log_level=config.log_level,
name="timessquare",
)
logger = structlog.get_logger("timessquare")
# The instance key uniquely identifies this worker in logs
instance_key = uuid.uuid4().hex
logger = logger.bind(worker_instance=instance_key)
logger.info("Starting up worker")
http_client = httpx.AsyncClient()
ctx["http_client"] = http_client
ctx["logger"] = logger
logger.info("Start up complete")
# Set up FastAPI dependencies; we can use them "manually" with
# arq to provide resources similarly to FastAPI endpoints
await db_session_dependency.initialize(
config.database_url, config.database_password.get_secret_value()
)
await redis_dependency.initialize(config.redis_url)
async def shutdown(ctx: Dict[Any, Any]) -> None:
"""Runs during worker shut-down to resources."""
if "logger" in ctx.keys():
logger = ctx["logger"]
else:
logger = structlog.get_logger("timessquare")
logger.info("Running worker shutdown.")
await db_session_dependency.aclose()
await redis_dependency.close()
try:
await ctx["http_client"].aclose()
except Exception as e:
logger.warning("Issue closing the http_client: %s", str(e))
logger.info("Worker shutdown complete.")
class WorkerSettings:
"""Configuration for a Times Square arq worker.
See `arq.worker.Worker` for details on these attributes.
"""
functions = [ping, repo_push, repo_added, repo_removed, pull_request_sync]
redis_settings = config.arq_redis_settings
queue_name = config.queue_name
on_startup = startup
on_shutdown = shutdown
|
# Copyright (C) 2018 SignalFx, Inc. All rights reserved.
from bson import json_util as json
from opentracing.ext import tags
import pymongo.monitoring
from six import text_type
import opentracing
class CommandTracing(pymongo.monitoring.CommandListener):
_scopes = {}
def __init__(self, tracer=None, span_tags=None):
try:
global_tracer = opentracing.global_tracer()
except AttributeError:
global_tracer = opentracing.tracer
self._tracer = tracer or global_tracer
self._span_tags = span_tags or {}
def started(self, event):
scope = self._tracer.start_active_span(event.command_name)
self._scopes[event.request_id] = scope
span = scope.span
span.set_tag(tags.DATABASE_TYPE, 'mongodb')
span.set_tag(tags.COMPONENT, 'PyMongo')
span.set_tag(tags.DATABASE_INSTANCE, event.database_name)
for tag, value in self._span_tags.items():
span.set_tag(tag, value)
if not event.command:
return
command_name, collection = next(iter(event.command.items()))
span.set_tag('command.name', command_name)
namespace = text_type('{}.{}').format(event.database_name, collection)
span.set_tag('namespace', namespace)
span.set_tag('command', json.dumps(event.command)[:512])
def succeeded(self, event):
scope = self._scopes.pop(event.request_id, None)
if scope is None:
return
span = scope.span
span.set_tag('event.reply', json.dumps(event.reply)[:512])
span.set_tag('reported_duration', event.duration_micros)
scope.close()
def failed(self, event):
scope = self._scopes.pop(event.request_id, None)
if scope is None:
return
span = scope.span
span.set_tag(tags.ERROR, True)
span.set_tag('event.failure', json.dumps(event.failure))
span.set_tag('reported_duration', event.duration_micros)
scope.close()
|
# Importar as bibliotecas necessárias
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import seaborn as sns
from sklearn.linear_model import LinearRegression
# Leitura do dataset
df = pd.read_csv("dataset/consumo.csv")
# Converter uma coluna para numerica
df['Temperatura Maxima (C)'] = df['Temperatura Maxima (C)'].str.replace(',','.').astype(float)
df['Temperatura Minima (C)'] = df['Temperatura Minima (C)'].str.replace(',','.').astype(float)
df['Precipitacao (mm)'] = df['Precipitacao (mm)'].str.replace(',','.').astype(float)
df['Temperatura Media (C)'] = df['Temperatura Media (C)'].str.replace(',','.').astype(float)
# Análise descritiva
df.describe()
df.head()
df.dtypes
df.info()
df.tail()
df.shape
# Verificar quais são os valores faltantes
df.isnull().sum()
# Remover todos os valores faltantes
df.dropna(how = "all", inplace = True)
# Copiando um data frame em uma nova variável
df_feature = df.copy()
# Criação de uma nova feature
df_feature['variacao'] = (df_feature['Temperatura Maxima (C)']) - (df_feature['Temperatura Minima (C)'])
df_feature
# Plotando o gráfico da nova feature
df_feature.plot(x='variacao', y = 'Consumo de cerveja (litros)')
plt.xlabel('variacao', fontsize = 15)
plt.ylabel('Consumo de cerveja (litros)',fontsize = 15)
plt.grid()
# Excluindo a coluna data
df_feature = df_feature.drop(columns = 'Data')
# Realizar a matriz de correlação
df_feature.corr().round(3)
# Gráficos
plt.figure()
sns.pairplot(df_feature,x_vars=['Temperatura Minima (C)','Temperatura Media (C)','Temperatura Maxima (C)','Precipitacao (mm)','variacao'],
y_vars=['Consumo de cerveja (litros)'],hue='Final de Semana',diag_kind=None)
# Realizar o gráfico de final de semana e consumo de cerveja
plt.figure(2)
sns.swarmplot(x='Final de Semana',y='Consumo de cerveja (litros)',data= df_feature)
plt.grid()
plt.xlabel('Final de semana')
plt.ylabel('Consumo de cerveja [L]')
# Realizar o gráfico de final de semana e variacao(nova feature criada)
plt.figure(3)
sns.swarmplot(x = 'Final de Semana', y = 'variacao', data = df_feature)
plt.grid()
plt.xlabel('Final de semana')
plt.ylabel('variacao')
# Utilizando o modelo de regressão linear
modelo = LinearRegression()
# Colocando a variável target
y = df_feature['Consumo de cerveja (litros)'].values #target
# colocando as variaveis independentes neste exemplo pega todos menos consumo de cerveja
x = df_feature.drop(columns='Consumo de cerveja (litros)').values #fetures
xColunas = df_feature.drop(columns='Consumo de cerveja (litros)').columns
# Realizando o treinamento
xTrain,xTest,yTrain,yTest = train_test_split(x,y, test_size = 0.3, random_state = 54564541)
# Fitando o modelo
modelo.fit(xTrain,yTrain)
yPred = modelo.predict(xTest)
# Calcular os resíduos
res = yPred - yTest
# Testes
print('Valor de R2: {}'.format(modelo.score(xTest,yTest)))
print('Valor MSE: {}' .format(mean_squared_error(yTest,yPred)))
print('Coeficientes da regressão: {}'.format(modelo.coef_))
print('Intercept da regressão: {} \n'.format(modelo.intercept_))
|
"""
Python 2 and 3 code to generate 4 and 5 digit NACA profiles
The NACA airfoils are airfoil shapes for aircraft wings developed
by the National Advisory Committee for Aeronautics (NACA).
The shape of the NACA airfoils is described using a series of
digits following the word "NACA". The parameters in the numerical
code can be entered into equations to precisely generate the
cross-section of the airfoil and calculate its properties.
https://en.wikipedia.org/wiki/NACA_airfoil
Pots of the Matlab code available here:
http://www.mathworks.com/matlabcentral/fileexchange/19915-naca-4-digit-airfoil-generator
http://www.mathworks.com/matlabcentral/fileexchange/23241-naca-5-digit-airfoil-generator
Copyright (C) 2011 by Dirk Gorissen <dgorissen@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from math import cos, sin, tan
from math import atan
from math import pi
from math import pow
from math import sqrt
def linspace(start,stop,np):
"""
Emulate Matlab linspace
"""
return [start+(stop-start)*i/(np-1) for i in range(np)]
def interpolate(xa,ya,queryPoints):
"""
A cubic spline interpolation on a given set of points (x,y)
Recalculates everything on every call which is far from efficient but does the job for now
should eventually be replaced by an external helper class
"""
# PreCompute() from Paint Mono which in turn adapted:
# NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING
# ISBN 0-521-43108-5, page 113, section 3.3.
# http://paint-mono.googlecode.com/svn/trunk/src/PdnLib/SplineInterpolator.cs
#number of points
n = len(xa)
u, y2 = [0]*n, [0]*n
for i in range(1,n-1):
# This is the decomposition loop of the tridiagonal algorithm.
# y2 and u are used for temporary storage of the decomposed factors.
wx = xa[i + 1] - xa[i - 1]
sig = (xa[i] - xa[i - 1]) / wx
p = sig * y2[i - 1] + 2.0
y2[i] = (sig - 1.0) / p
ddydx = (ya[i + 1] - ya[i]) / (xa[i + 1] - xa[i]) - (ya[i] - ya[i - 1]) / (xa[i] - xa[i - 1])
u[i] = (6.0 * ddydx / wx - sig * u[i - 1]) / p
y2[n - 1] = 0
# This is the backsubstitution loop of the tridiagonal algorithm
#((int i = n - 2; i >= 0; --i):
for i in range(n-2,-1,-1):
y2[i] = y2[i] * y2[i + 1] + u[i]
# interpolate() adapted from Paint Mono which in turn adapted:
# NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING
# ISBN 0-521-43108-5, page 113, section 3.3.
# http://paint-mono.googlecode.com/svn/trunk/src/PdnLib/SplineInterpolator.cs
results = [0]*n
#loop over all query points
for i in range(len(queryPoints)):
# bisection. This is optimal if sequential calls to this
# routine are at random values of x. If sequential calls
# are in order, and closely spaced, one would do better
# to store previous values of klo and khi and test if
klo = 0
khi = n - 1
while (khi - klo > 1):
k = (khi + klo) >> 1
if (xa[k] > queryPoints[i]):
khi = k
else:
klo = k
h = xa[khi] - xa[klo]
a = (xa[khi] - queryPoints[i]) / h
b = (queryPoints[i] - xa[klo]) / h
# Cubic spline polynomial is now evaluated.
results[i] = a * ya[klo] + b * ya[khi] + ((a * a * a - a) * y2[klo] + (b * b * b - b) * y2[khi]) * (h * h) / 6.0
return results
def naca4(number, n, finite_TE = False, half_cosine_spacing = False):
"""
Returns 2*n+1 points in [0 1] for the given 4 digit NACA number string
"""
m = float(number[0])/100.0
p = float(number[1])/10.0
t = float(number[2:])/100.0
a0 = +0.2969
a1 = -0.1260
a2 = -0.3516
a3 = +0.2843
if finite_TE:
a4 = -0.1015 # For finite thick TE
else:
a4 = -0.1036 # For zero thick TE
if half_cosine_spacing:
beta = linspace(0.0,pi,n+1)
x = [(0.5*(1.0-cos(xx))) for xx in beta] # Half cosine based spacing
else:
x = linspace(0.0,1.0,n+1)
yt = [5*t*(a0*sqrt(xx)+a1*xx+a2*pow(xx,2)+a3*pow(xx,3)+a4*pow(xx,4)) for xx in x]
xc1 = [xx for xx in x if xx <= p]
xc2 = [xx for xx in x if xx > p]
if p == 0:
xu = x
yu = yt
xl = x
yl = [-xx for xx in yt]
xc = xc1 + xc2
zc = [0]*len(xc)
else:
yc1 = [m/pow(p,2)*xx*(2*p-xx) for xx in xc1]
yc2 = [m/pow(1-p,2)*(1-2*p+xx)*(1-xx) for xx in xc2]
zc = yc1 + yc2
dyc1_dx = [m/pow(p,2)*(2*p-2*xx) for xx in xc1]
dyc2_dx = [m/pow(1-p,2)*(2*p-2*xx) for xx in xc2]
dyc_dx = dyc1_dx + dyc2_dx
theta = [atan(xx) for xx in dyc_dx]
xu = [xx - yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yu = [xx + yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
xl = [xx + yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yl = [xx - yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
X = xu[::-1] + xl[1:]
Z = yu[::-1] + yl[1:]
return X,Z
def naca5(number, n, finite_TE = False, half_cosine_spacing = False):
"""
Returns 2*n+1 points in [0 1] for the given 5 digit NACA number string
"""
naca1 = int(number[0])
naca23 = int(number[1:3])
naca45 = int(number[3:])
cld = naca1*(3.0/2.0)/10.0
p = 0.5*naca23/100.0
t = naca45/100.0
a0 = +0.2969
a1 = -0.1260
a2 = -0.3516
a3 = +0.2843
if finite_TE:
a4 = -0.1015 # For finite thickness trailing edge
else:
a4 = -0.1036 # For zero thickness trailing edge
if half_cosine_spacing:
beta = linspace(0.0,pi,n+1)
x = [(0.5*(1.0-cos(x))) for x in beta] # Half cosine based spacing
else:
x = linspace(0.0,1.0,n+1)
yt = [5*t*(a0*sqrt(xx)+a1*xx+a2*pow(xx,2)+a3*pow(xx,3)+a4*pow(xx,4)) for xx in x]
P = [0.05,0.1,0.15,0.2,0.25]
M = [0.0580,0.1260,0.2025,0.2900,0.3910]
K = [361.4,51.64,15.957,6.643,3.230]
m = interpolate(P,M,[p])[0]
k1 = interpolate(M,K,[m])[0]
xc1 = [xx for xx in x if xx <= p]
xc2 = [xx for xx in x if xx > p]
xc = xc1 + xc2
if p == 0:
xu = x
yu = yt
xl = x
yl = [-x for x in yt]
zc = [0]*len(xc)
else:
yc1 = [k1/6.0*(pow(xx,3)-3*m*pow(xx,2)+ pow(m,2)*(3-m)*xx) for xx in xc1]
yc2 = [k1/6.0*pow(m,3)*(1-xx) for xx in xc2]
zc = [cld/0.3 * xx for xx in yc1 + yc2]
dyc1_dx = [cld/0.3*(1.0/6.0)*k1*(3*pow(xx,2)-6*m*xx+pow(m,2)*(3-m)) for xx in xc1]
dyc2_dx = [cld/0.3*(1.0/6.0)*k1*pow(m,3)]*len(xc2)
dyc_dx = dyc1_dx + dyc2_dx
theta = [atan(xx) for xx in dyc_dx]
xu = [xx - yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yu = [xx + yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
xl = [xx + yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yl = [xx - yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
X = xu[::-1] + xl[1:]
Z = yu[::-1] + yl[1:]
return X,Z
def naca(number, n, finite_TE = False, half_cosine_spacing = False):
if len(number)==4:
return naca4(number, n, finite_TE, half_cosine_spacing)
elif len(number)==5:
return naca5(number, n, finite_TE, half_cosine_spacing)
else:
raise Exception
class Display(object):
def __init__(self):
import matplotlib.pyplot as plt
self.plt = plt
self.h = []
self.label = []
self.fig, self.ax = self.plt.subplots()
self.plt.axis('equal')
self.plt.xlabel('x')
self.plt.ylabel('y')
self.ax.grid(True)
def plot(self, X, Y,label=''):
h, = self.plt.plot(X, Y, '-', linewidth = 1)
self.h.append(h)
self.label.append(label)
def show(self):
self.plt.axis((-0.1,1.1)+self.plt.axis()[2:])
self.ax.legend(self.h, self.label)
self.plt.show()
def demo(profNaca = ['0009', '2414', '6409'], nPoints = 240, finite_TE = False, half_cosine_spacing = False):
#profNaca = ['0009', '0012', '2414', '2415', '6409' , '0006', '0008', '0010', '0012', '0015']
d = Display()
for i,p in enumerate(profNaca):
X,Y = naca(p, nPoints, finite_TE, half_cosine_spacing)
d.plot(X, Y, p)
d.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
data6 = Breast cancer
"""
from chip_clas_new import chip_clas_new
import statistics
from functions import remove_noise
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
data_name = "Breast cancer"
print(data_name)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
data1 = pd.read_csv(url, sep=',', header=None, skiprows=1)
data = data1.iloc[:,1:].copy() # the first is the id
# converting object data into category dtype
data.iloc[:,5] = data.iloc[:,5].astype('category')
# encoding labels
data.iloc[:,5] = data.iloc[:,5].cat.codes
X = data.iloc[:,:-1]
min_max_scaler = MinMaxScaler(feature_range=(-1, 1)) # Normalizing data between -1 and 1
X = pd.DataFrame(min_max_scaler.fit_transform(X))
y = data.iloc[:,-1].copy() # Class: (2 for benign, 4 for malignant cancer)
y[y == 2] = 1
y[y == 4] = -1
# Filtering data:
X_new, y_new = remove_noise(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=0.2, random_state=42)
f = open("results_window_size.txt", "a+")
f.write("\n\nDatabase: %s \n" % data_name)
f.write("Size before filter: %d \n" % X.shape[0])
f.write("Dimension: %d \n" % X.shape[1])
f.write("Size after filter: %d \n" % X_new.shape[0])
f.write("Train Size: %d \n" % X_train.shape[0])
window_size = [50, 30, 20, 10, 5, 1]
for split in window_size:
y_hat, y_test, result, runtime, final_split_size, arestas_suporte_size = chip_clas_new(X_train, X_test, y_train, y_test, method = "parallel", split_size = split)
f.write("\nSplit: %d \n" % split)
f.write("AUC: %f \n" % result)
f.write("Runtime: %d \n" % runtime)
f.write("Final_split_size: %d \n" % final_split_size)
f.write("arestas_suporte_size: %d \n" % arestas_suporte_size)
f.write("#######################################################################")
f.close()
|
import fire
import snowflake.connector
import configparser
import secrets
import pathlib
class Bufu():
def connect(self):
cp = configparser.ConfigParser()
path = pathlib.Path('~/.snowsql/config')
cp.read(path.expanduser())
conn = snowflake.connector.connect(
user = cp['connections']['username'],
password = cp['connections']['password'],
account = cp['connections']['accountname'],
database = cp['connections']['database'],
schema = cp['connections']['schema'],
role = cp['connections']['rolename'],
warehouse = cp['connections']['warehouse']
)
return conn
def __init__(self):
self.conn = self.connect()
def show(self, stage=None):
cur = self.conn.cursor(snowflake.connector.DictCursor)
if stage is None:
try:
cur.execute('SHOW STAGES IN SCHEMA')
rs = cur.fetchmany(100)
for row in rs:
print(row['name'])
finally:
self.conn.close()
else:
try:
cur.execute(f'LIST @{stage}')
rs = cur.fetchmany(100)
for row in rs:
print(row['name'])
finally:
self.conn.close()
def put(self, file, stage=None):
path = pathlib.Path(file)
cur = self.conn.cursor()
if stage is None:
stage = f'bufu_{secrets.token_hex(8)}'
cur.execute(f'CREATE STAGE {stage}')
print(f'Stage "{stage}" created.')
try:
cur.execute(f'put {path.resolve().as_uri()} @{stage}')
print(f'File "{path.resolve()}" was uploaded to stage "{stage}".')
finally:
self.conn.close()
def create(self, stage):
try:
cur = self.conn.cursor()
cur.execute(f'CREATE STAGE {stage}')
print(f'Stage "{stage}" created.')
finally:
self.conn.close()
def main():
try:
b = Bufu()
fire.Fire({
'show': b.show,
'create': b.create,
'put': b.put
})
finally:
b.conn.close()
|
import os
import sys
import math
import numpy as np
import torch
src_dir = os.path.dirname(os.path.realpath(__file__))
while not src_dir.endswith("sfa"):
src_dir = os.path.dirname(src_dir)
if src_dir not in sys.path:
sys.path.append(src_dir)
from config import kitti_config as cnf
def angle_in_limit(angle):
# To limit the angle in -pi/2 - pi/2
limit_degree = 5
while angle >= np.pi / 2:
angle -= np.pi
while angle < -np.pi / 2:
angle += np.pi
if abs(angle + np.pi / 2) < limit_degree / 180 * np.pi:
angle = np.pi / 2
return angle
# 相机坐标系转雷达坐标系
def camera_to_lidar(x, y, z, V2C=None, R0=None, P2=None):
p = np.array([x, y, z, 1]) #
if V2C is None or R0 is None:
p = np.matmul(cnf.R0_inv, p)
p = np.matmul(cnf.Tr_velo_to_cam_inv, p)
else:
# 建立坐标变化矩阵
R0_i = np.zeros((4, 4))
R0_i[:3, :3] = R0
R0_i[3, 3] = 1
p = np.matmul(np.linalg.inv(R0_i), p) # np.linalg.inv() 求逆矩阵
p = np.matmul(inverse_rigid_trans(V2C), p)
p = p[0:3]
return tuple(p)
# 雷达坐标系转图像坐标系
def lidar_to_camera(x, y, z, V2C=None, R0=None, P2=None):
p = np.array([x, y, z, 1]) # 先将点(x,y,z)变为齐次坐标系
if V2C is None or R0 is None:
p = np.matmul(cnf.Tr_velo_to_cam, p) # 将坐标系从雷达坐标坐标系转为相机坐标系
p = np.matmul(cnf.R0, p) # 将Velodyne坐标中的点x投影到编号为0的相机中点进行修正
else:
p = np.matmul(V2C, p)
p = np.matmul(R0, p)
p = p[0:3]
return tuple(p)
def camera_to_lidar_point(points):
# (N, 3) -> (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))]).T # (N,4) -> (4,N)
points = np.matmul(cnf.R0_inv, points)
points = np.matmul(cnf.Tr_velo_to_cam_inv, points).T # (4, N) -> (N, 4)
points = points[:, 0:3]
return points.reshape(-1, 3)
#
def lidar_to_camera_point(points, V2C=None, R0=None):
# (N, 3) -> (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))]).T # 在水平方向上拼接一个(N,1)的单位向量并转置
if V2C is None or R0 is None:
points = np.matmul(cnf.Tr_velo_to_cam, points)
points = np.matmul(cnf.R0, points).T
else:
points = np.matmul(V2C, points)
points = np.matmul(R0, points).T
points = points[:, 0:3]
return points.reshape(-1, 3)
# 将相机坐标系下的x,y,z转到雷达坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, rz/y)
def camera_to_lidar_box(boxes, V2C=None, R0=None, P2=None):
# (N, 7) -> (N, 7) x,y,z,h,w,l,r
ret = []
for box in boxes:
x, y, z, h, w, l, ry = box
# 把相机坐标系x,y,z转换为雷达坐标系x,y,z,并通过ry计算出rz
(x, y, z), h, w, l, rz = camera_to_lidar(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -ry - np.pi / 2
# rz = angle_in_limit(rz)
ret.append([x, y, z, h, w, l, rz])
return np.array(ret).reshape(-1, 7)
# 将雷达坐标系下的x,y,z转到相机坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, ry)
def lidar_to_camera_box(boxes, V2C=None, R0=None, P2=None):
# (N, 7) -> (N, 7) x,y,z,h,w,l,r
# Test模式下读取的prediction结果里面还多一个score
ret = []
for box in boxes:
# x, y, z, h, w, l, rz, score = box
x, y, z, h, w, l, rz = box
# 把雷达坐标系下的x,y,z转换为相机坐标系x,y,z
# (x, y, z), h, w, l, ry, score = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2, score
(x, y, z), h, w, l, ry = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2
# ry = angle_in_limit(ry)
# ret.append([x, y, z, h, w, l, ry, score])
ret.append([x, y, z, h, w, l, ry])
# return np.array(ret).reshape(-1, 8)
return np.array(ret).reshape(-1, 7)
def center_to_corner_box2d(boxes_center, coordinate='lidar'):
# (N, 5) -> (N, 4, 2)
N = boxes_center.shape[0]
boxes3d_center = np.zeros((N, 7))
boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center
boxes3d_corner = center_to_corner_box3d(boxes3d_center, coordinate=coordinate)
return boxes3d_corner[:, 0:4, 0:2]
# 将中心点坐标表示法变成八个角点坐标表示3dbbox
def center_to_corner_box3d(boxes_center, coordinate='lidar'):
# (N, 7) -> (N, 8, 3)
N = boxes_center.shape[0]
ret = np.zeros((N, 8, 3), dtype=np.float32) # 保存每一个样本的3Dbbox的八个角点坐标
if coordinate == 'camera':
boxes_center = camera_to_lidar_box(boxes_center) # 如果是相机坐标系,则需要转变到雷达坐标系下并输出3dbbox的信息
# 样本循环
for i in range(N):
box = boxes_center[i]
translation = box[0:3] # x,y,z
size = box[3:6] # h,w,l
rotation = [0, 0, box[-1]] # [0, 0, rz]
h, w, l = size[0], size[1], size[2]
# 3D bbox的八个点
trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \
[0, 0, 0, 0, h, h, h, h]])
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # 绕z轴的偏航角
rotMat = np.array([
[np.cos(yaw), -np.sin(yaw), 0.0],
[np.sin(yaw), np.cos(yaw), 0.0],
[0.0, 0.0, 1.0]])
# 根据航向角调整bbox的方向rotation,然后对八个角都加上(x,y,z)中心点坐标,最终获得通过偏航角rz旋转后的3dbbox的八个点坐标
cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T # 沿着Y轴复制8个同样的向量,沿着X轴保持不变,最后转置。
box3d = cornerPosInVelo.transpose()
ret[i] = box3d
if coordinate == 'camera': # 如果是相机坐标系则需要从雷达坐标系变回相机坐标系
for idx in range(len(ret)):
ret[idx] = lidar_to_camera_point(ret[idx])
return ret
CORNER2CENTER_AVG = True
# 3dbbox的八个角点表示法变成以3dbbox中心点坐标来表示
def corner_to_center_box3d(boxes_corner, coordinate='camera'):
# (N, 8, 3) -> (N, 7) x,y,z,h,w,l,ry/z
if coordinate == 'lidar': # 如果是雷达坐标系则需要先变为相机坐标系
for idx in range(len(boxes_corner)):
boxes_corner[idx] = lidar_to_camera_point(boxes_corner[idx])
ret = []
for roi in boxes_corner:
if CORNER2CENTER_AVG: # average version
roi = np.array(roi) # roi = ()
# 相机坐标系下y轴代表高度
h = abs(np.sum(roi[:4, 1] - roi[4:, 1]) / 4) # 前四个角点的y轴接近0,后四个角点y轴接近h,对他们四个取平均
# 前后相邻的两个角点的欧式距离 w = sqrt(x^2+y^2),对四条边求平均值
# [0, 2]表示x,y坐标
w = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
) / 4
# 左右相邻的两个角点的欧式距离 l = sqrt(x^2+y^2),对四条边求平均值
l = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
) / 4
x = np.sum(roi[:, 0], axis=0) / 8 # 对八个角点的x坐标求平均值
y = np.sum(roi[0:4, 1], axis=0) / 4 # 对四个角点的y坐标求平均值
z = np.sum(roi[:, 2], axis=0) / 8 # 对八个角点的z坐标求平均值
# 对航向角求平均值
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = ry - np.pi / 2
elif l > w:
l, w = w, l
ry = ry - np.pi / 2
ret.append([x, y, z, h, w, l, ry])
else: # max version
h = max(abs(roi[:4, 1] - roi[4:, 1])) # 前四个角点的z轴接近0,后四个角点z轴接近h,对他们四个取最大
w = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
)
l = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
)
x = np.sum(roi[:, 0], axis=0) / 8
y = np.sum(roi[0:4, 1], axis=0) / 4
z = np.sum(roi[:, 2], axis=0) / 8
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = angle_in_limit(ry + np.pi / 2)
ret.append([x, y, z, h, w, l, ry])
if coordinate == 'lidar':
ret = camera_to_lidar_box(np.array(ret))
return np.array(ret)
def point_transform(points, tx, ty, tz, rx=0, ry=0, rz=0):
# Input:
# points: (N, 3)
# rx/y/z: in radians
# Output:
# points: (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))])
# 点云数据平移
mat1 = np.eye(4)
mat1[3, 0:3] = tx, ty, tz
points = np.matmul(points, mat1)
# 点云数据旋转
# 4x4围绕x轴旋转的矩阵
if rx != 0:
mat = np.zeros((4, 4))
mat[0, 0] = 1
mat[3, 3] = 1
mat[1, 1] = np.cos(rx)
mat[1, 2] = -np.sin(rx)
mat[2, 1] = np.sin(rx)
mat[2, 2] = np.cos(rx)
points = np.matmul(points, mat)
# 4x4围绕y轴旋转的矩阵
if ry != 0:
mat = np.zeros((4, 4))
mat[1, 1] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(ry)
mat[0, 2] = np.sin(ry)
mat[2, 0] = -np.sin(ry)
mat[2, 2] = np.cos(ry)
points = np.matmul(points, mat)
# 4x4围绕z轴旋转的矩阵
if rz != 0:
mat = np.zeros((4, 4))
mat[2, 2] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(rz)
mat[0, 1] = -np.sin(rz)
mat[1, 0] = np.sin(rz)
mat[1, 1] = np.cos(rz)
points = np.matmul(points, mat)
return points[:, 0:3]
# 返回旋转过后的label标签,如果雷达坐标系下则返回雷达label,反之camera_label
def box_transform(boxes, tx, ty, tz, r=0, coordinate='lidar'):
# Input:
# boxes: (N, 7) x y z h w l rz/y
# Output:
# boxes: (N, 7) x y z h w l rz/y
# 将每个样本的label中心点坐标根据长宽高变为其3dbbox八个角点的坐标(这个过程需要在雷达坐标系下进行),如果input_label是雷达坐标系则返回雷达坐标,如果是camera坐标系则需要把雷达坐标变回camera坐标
boxes_corner = center_to_corner_box3d(boxes, coordinate=coordinate) # (N, 8, 3)
for idx in range(len(boxes_corner)):
if coordinate == 'lidar':
boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, rz=r) # 如果是lidar坐标系的话偏向角是沿z轴旋转
else:
boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, ry=r) # 如果是camera坐标系的话偏向角是沿y轴旋转
return corner_to_center_box3d(boxes_corner, coordinate=coordinate)
# 刚体的坐标变换
def inverse_rigid_trans(Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])
inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
return inv_Tr
# 选择多个方法结合进行数据增强
class Compose(object):
def __init__(self, transforms, p=1.0):
self.transforms = transforms
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
for t in self.transforms:
lidar, labels = t(lidar, labels)
return lidar, labels
# 选择一个方法进行数据增强
class OneOf(object):
def __init__(self, transforms, p=1.0):
self.transforms = transforms
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
choice = np.random.randint(low=0, high=len(self.transforms))
lidar, labels = self.transforms[choice](lidar, labels)
return lidar, labels
class Random_Rotation(object):
def __init__(self, limit_angle=np.pi / 4, p=0.5):
self.limit_angle = limit_angle
self.p = p
def __call__(self, lidar, labels):
"""
:param labels: # (N', 7) x, y, z, h, w, l, r
:return:
"""
if np.random.random() <= self.p:
# 随机取一个角度在-limit_angle到limit_angle之间
angle = np.random.uniform(-self.limit_angle, self.limit_angle)
# 点云数据绕Z轴旋转
lidar[:, 0:3] = point_transform(lidar[:, 0:3], 0, 0, 0, rz=angle)
# 把数据对应的label也旋转
labels = box_transform(labels, 0, 0, 0, r=angle, coordinate='lidar')
return lidar, labels
class Random_Scaling(object):
def __init__(self, scaling_range=(0.95, 1.05), p=0.5):
self.scaling_range = scaling_range
self.p = p
def __call__(self, lidar, labels):
"""
:param labels: # (N', 7) x, y, z, h, w, l, r
:return:
"""
if np.random.random() <= self.p:
# 数据缩放因子
factor = np.random.uniform(self.scaling_range[0], self.scaling_range[0])
# lidar和label数据缩放
lidar[:, 0:3] = lidar[:, 0:3] * factor
labels[:, 0:6] = labels[:, 0:6] * factor
return lidar, labels
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
Refer from: https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
"""
def __init__(self, n_holes, ratio, fill_value=0., p=1.0):
self.n_holes = n_holes
self.ratio = ratio
assert 0. <= fill_value <= 1., "the fill value is in a range of 0 to 1"
self.fill_value = fill_value
self.p = p
def __call__(self, img, targets):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
if np.random.random() <= self.p:
h = img.size(1)
w = img.size(2)
h_cutout = int(self.ratio * h)
w_cutout = int(self.ratio * w)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - h_cutout // 2, 0, h)
y2 = np.clip(y + h_cutout // 2, 0, h)
x1 = np.clip(x - w_cutout // 2, 0, w)
x2 = np.clip(x + w_cutout // 2, 0, w)
img[:, y1: y2, x1: x2] = self.fill_value # Zero out the selected area
# Remove targets that are in the selected area
keep_target = []
for target_idx, target in enumerate(targets):
_, _, target_x, target_y, target_w, target_l, _, _ = target
if (x1 <= target_x * w <= x2) and (y1 <= target_y * h <= y2):
continue
keep_target.append(target_idx)
targets = targets[keep_target]
return img, targets
|
from typing import Dict, List, Optional, Tuple, Union
from typing_extensions import TypedDict
import numpy as np
import torch
from torch import Tensor
from tqdm import tqdm
from transformers import PreTrainedTokenizer
from neuro_comma.augmentation import AUGMENTATIONS
from neuro_comma.pretrained import TOKEN_IDX
class BaseDataset(torch.utils.data.Dataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
*args,
**kwargs) -> None:
self.tokenizer = tokenizer
self.targets = targets
self.seq_len = sequence_len
self.token_style = token_style
if isinstance(files, list):
self.data = []
for file in files:
self.data += self._parse_data(file, *args, **kwargs)
else:
self.data = self._parse_data(files, *args, **kwargs)
def _parse_data(self, file_path: str, *args, **kwargs) -> List[List[List[int]]]:
"""Parse file to train data
Args:
file_path (`str`): text file path that contains tokens and punctuations separated by tab in lines
Returns:
list[Batch]: each having sequence_len punctuation_mask is used to ignore special indices like padding and intermediate sub-word token during evaluation
"""
with open(file_path, 'r', encoding='utf-8') as file:
x, y = [], []
for i, line in enumerate(file):
if (line.strip()):
line = line.strip()
token = line.rsplit('\t', 1)
if len(token) == 2:
x.append(token[0])
target = self.targets[token[1]]
y.append(target)
else:
continue
data = self.parse_tokens(x, self.tokenizer, self.seq_len, self.token_style, y, *args, **kwargs)
return data
@classmethod
def parse_tokens(cls,
tokens: Union[List[str], Tuple[str]],
tokenizer: PreTrainedTokenizer,
seq_len: int,
token_style: str,
targets: Optional[List[int]] = None,
*args,
**kwargs) -> List[List[List[int]]]:
"""
Convert tokenized data for model prediction
Args:
tokens (`Union[list[str], tuple[str]]`): splited tokens
tokenizer (`PreTrainedTokenizer`): tokenizer which split tokens to subtokens
seq_len (`int`): sequence length
token_style (`str`): token_style from pretrained.TOKEN_IDX
Returns:
(`list[BatchWithoutTarget]`): list of bathces
```txt
tokens : [token token ##token PAD ]
x : [321 1233 23121 101 ]
y : [tar 0 tar 0 ]
y_mask : [1 0 1 0 ]
attn_mask : [1 1 1 0 ]
```
"""
data_items = []
# loop until end of the entire text
idx = 0
debug = kwargs.get('debug')
if debug:
pbar = tqdm(total=len(tokens))
while idx < len(tokens):
x = [TOKEN_IDX[token_style]['START_SEQ']]
w_id = [-1] # word indexes
y = [0]
y_mask = [1] if targets else [0]
# loop until we have required sequence length
# -1 because we will have a special end of sequence token at the end
while len(x) < seq_len - 1 and idx < len(tokens):
word_pieces = tokenizer.tokenize(tokens[idx])
# if taking these tokens exceeds sequence length we finish
# current sequence with padding
# then start next sequence from this token
if len(word_pieces) + len(x) >= seq_len:
break
for i in range(len(word_pieces) - 1):
x.append(tokenizer.convert_tokens_to_ids(word_pieces[i]))
w_id.append(idx)
y.append(0)
y_mask.append(0)
if len(word_pieces) > 0:
x.append(tokenizer.convert_tokens_to_ids(word_pieces[-1]))
else:
x.append(TOKEN_IDX[token_style]['UNK'])
w_id.append(idx)
if targets:
y.append(targets[idx])
else:
y.append(0)
y_mask.append(1)
idx += 1
if debug:
pbar.update(1)
x.append(TOKEN_IDX[token_style]['END_SEQ'])
w_id.append(-1)
y.append(0)
if targets:
y_mask.append(1)
else:
y_mask.append(0)
# Fill with pad tokens
if len(x) < seq_len:
x = x + [TOKEN_IDX[token_style]['PAD'] for _ in range(seq_len - len(x))]
w_id = w_id + [-100 for _ in range(seq_len - len(w_id))]
y = y + [0 for _ in range(seq_len - len(y))]
y_mask = y_mask + [0 for _ in range(seq_len - len(y_mask))]
attn_mask = [1 if token != TOKEN_IDX[token_style]['PAD'] else 0 for token in x]
data_items.append([x, w_id, attn_mask, y, y_mask])
if debug:
pbar.close()
return data_items
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
x = torch.tensor(x) # type: ignore
attn_mask = torch.tensor(attn_mask) # type: ignore
y = torch.tensor(y) # type: ignore
y_mask = torch.tensor(y_mask) # type: ignore
return x, y, attn_mask, y_mask # type: ignore
class RepunctDataset(BaseDataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
is_train=False,
augment_rate=0.,
augment_type='substitute',
*args,
**kwargs) -> None:
"""Preprocess data for restore punctuation
Args:
files (`Union[str, list[str]]`): single file or list of text files containing tokens and punctuations separated by tab in lines
tokenizer (`PreTrainedTokenizer`): tokenizer that will be used to further tokenize word for BERT like models
targets (`dict[str, int]`): dict with targets
sequence_len (`int`): length of each sequence
token_style (`str`): For getting index of special tokens in pretrained.TOKEN_IDX
is_train (`bool, optional`): if false do not apply augmentation. Defaults to False.
augment_rate (`float, optional`): percent of data which should be augmented. Defaults to 0.0.
augment_type (`str, optional`): augmentation type. Defaults to 'substitute'.
"""
super().__init__(files, tokenizer, targets, sequence_len, token_style, *args, **kwargs)
self.is_train = is_train
self.augment_type = augment_type
self.augment_rate = augment_rate
def _augment(self, x, y, y_mask):
x_aug = []
y_aug = []
y_mask_aug = []
for i in range(len(x)):
r = np.random.rand()
if r < self.augment_rate:
AUGMENTATIONS[self.augment_type](x, y, y_mask, x_aug, y_aug, y_mask_aug, i, self.token_style)
else:
x_aug.append(x[i])
y_aug.append(y[i])
y_mask_aug.append(y_mask[i])
if len(x_aug) > self.seq_len:
# len increased due to insert
x_aug = x_aug[:self.seq_len]
y_aug = y_aug[:self.seq_len]
y_mask_aug = y_mask_aug[:self.seq_len]
elif len(x_aug) < self.seq_len:
# len decreased due to delete
x_aug = x_aug + [TOKEN_IDX[self.token_style]['PAD'] for _ in range(self.seq_len - len(x_aug))]
y_aug = y_aug + [0 for _ in range(self.seq_len - len(y_aug))]
y_mask_aug = y_mask_aug + [0 for _ in range(self.seq_len - len(y_mask_aug))]
attn_mask = [1 if token != TOKEN_IDX[self.token_style]['PAD'] else 0 for token in x]
return x_aug, y_aug, attn_mask, y_mask_aug
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
if self.is_train and self.augment_rate > 0:
x, y, attn_mask, y_mask = self._augment(x, y, y_mask)
x = torch.tensor(x) # type: ignore
attn_mask = torch.tensor(attn_mask) # type: ignore
y = torch.tensor(y) # type: ignore
y_mask = torch.tensor(y_mask) # type: ignore
return x, y, attn_mask, y_mask # type: ignore
|
i = 0
while True:
try:
nome = input('Por favor digite o seu nome: ')
ind = int(input('Digite um indice do nome digitado: '))
print(nome[ind])
break
except ValueError:
print('Oops! Nome invalido. Tente novamente...')
except IndexError:
print('Oops! Indice invalido. Tente novamente...')
finally:
print('Tentativa {}' .format(i))
i = i + 1
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write Vorbis comment data.
Vorbis comments are freeform key/value pairs; keys are
case-insensitive ASCII and values are Unicode strings. A key may have
multiple values.
The specification is at http://www.xiph.org/vorbis/doc/v-comment.html.
"""
import sys
from io import BytesIO
import mutagen
from mutagen._util import DictMixin, cdata, MutagenError, reraise
def is_valid_key(key):
"""Return true if a string is a valid Vorbis comment key.
Valid Vorbis comment keys are printable ASCII between 0x20 (space)
and 0x7D ('}'), excluding '='.
Takes str/unicode in Python 2, unicode in Python 3
"""
if isinstance(key, bytes):
raise TypeError("needs to be str not bytes")
for c in key:
if c < " " or c > "}" or c == "=":
return False
else:
return bool(key)
istag = is_valid_key
class error(MutagenError):
pass
class VorbisUnsetFrameError(error):
pass
class VorbisEncodingError(error):
pass
class VComment(mutagen.Tags, list):
"""A Vorbis comment parser, accessor, and renderer.
All comment ordering is preserved. A VComment is a list of
key/value pairs, and so any Python list method can be used on it.
Vorbis comments are always wrapped in something like an Ogg Vorbis
bitstream or a FLAC metadata block, so this loads string data or a
file-like object, not a filename.
Attributes:
vendor (text): the stream 'vendor' (i.e. writer); default 'Mutagen'
"""
vendor = u"Mutagen " + mutagen.version_string
def __init__(self, data=None, *args, **kwargs):
self._size = 0
# Collect the args to pass to load, this lets child classes
# override just load and get equivalent magic for the
# constructor.
if data is not None:
if isinstance(data, bytes):
data = BytesIO(data)
elif not hasattr(data, 'read'):
raise TypeError("VComment requires bytes or a file-like")
start = data.tell()
self.load(data, *args, **kwargs)
self._size = data.tell() - start
def load(self, fileobj, errors='replace', framing=True):
"""Parse a Vorbis comment from a file-like object.
Arguments:
errors (str): 'strict', 'replace', or 'ignore'.
This affects Unicode decoding and how other malformed content
is interpreted.
framing (bool): if true, fail if a framing bit is not present
Framing bits are required by the Vorbis comment specification,
but are not used in FLAC Vorbis comment blocks.
"""
try:
vendor_length = cdata.uint_le(fileobj.read(4))
self.vendor = fileobj.read(vendor_length).decode('utf-8', errors)
count = cdata.uint_le(fileobj.read(4))
for i in range(count):
length = cdata.uint_le(fileobj.read(4))
try:
string = fileobj.read(length).decode('utf-8', errors)
except (OverflowError, MemoryError):
raise error("cannot read %d bytes, too large" % length)
try:
tag, value = string.split('=', 1)
except ValueError as err:
if errors == "ignore":
continue
elif errors == "replace":
tag, value = u"unknown%d" % i, string
else:
reraise(VorbisEncodingError, err, sys.exc_info()[2])
try:
tag = tag.encode('ascii', errors)
except UnicodeEncodeError:
raise VorbisEncodingError("invalid tag name %r" % tag)
else:
tag = tag.decode("ascii")
if is_valid_key(tag):
self.append((tag, value))
if framing and not bytearray(fileobj.read(1))[0] & 0x01:
raise VorbisUnsetFrameError("framing bit was unset")
except (cdata.error, TypeError):
raise error("file is not a valid Vorbis comment")
def validate(self):
"""Validate keys and values.
Check to make sure every key used is a valid Vorbis key, and
that every value used is a valid Unicode or UTF-8 string. If
any invalid keys or values are found, a ValueError is raised.
In Python 3 all keys and values have to be a string.
"""
if not isinstance(self.vendor, str):
raise ValueError("vendor needs to be str")
for key, value in self:
try:
if not is_valid_key(key):
raise ValueError("%r is not a valid key" % key)
except TypeError:
raise ValueError("%r is not a valid key" % key)
if not isinstance(value, str):
err = "%r needs to be str for key %r" % (value, key)
raise ValueError(err)
return True
def clear(self):
"""Clear all keys from the comment."""
for i in list(self):
self.remove(i)
def write(self, framing=True):
"""Return a string representation of the data.
Validation is always performed, so calling this function on
invalid data may raise a ValueError.
Arguments:
framing (bool): if true, append a framing bit (see load)
"""
self.validate()
def _encode(value):
if not isinstance(value, bytes):
return value.encode('utf-8')
return value
f = BytesIO()
vendor = _encode(self.vendor)
f.write(cdata.to_uint_le(len(vendor)))
f.write(vendor)
f.write(cdata.to_uint_le(len(self)))
for tag, value in self:
tag = _encode(tag)
value = _encode(value)
comment = tag + b"=" + value
f.write(cdata.to_uint_le(len(comment)))
f.write(comment)
if framing:
f.write(b"\x01")
return f.getvalue()
def pprint(self):
def _decode(value):
if not isinstance(value, str):
return value.decode('utf-8', 'replace')
return value
tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self]
return u"\n".join(tags)
class VCommentDict(VComment, DictMixin):
"""A VComment that looks like a dictionary.
This object differs from a dictionary in two ways. First,
len(comment) will still return the number of values, not the
number of keys. Secondly, iterating through the object will
iterate over (key, value) pairs, not keys. Since a key may have
multiple values, the same value may appear multiple times while
iterating.
Since Vorbis comment keys are case-insensitive, all keys are
normalized to lowercase ASCII.
"""
def __getitem__(self, key):
"""A list of values for the key.
This is a copy, so comment['title'].append('a title') will not
work.
"""
if isinstance(key, slice):
return VComment.__getitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
values = [value for (k, value) in self if k.lower() == key]
if not values:
raise KeyError(key)
else:
return values
def __delitem__(self, key):
"""Delete all values associated with the key."""
if isinstance(key, slice):
return VComment.__delitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
to_delete = [x for x in self if x[0].lower() == key]
if not to_delete:
raise KeyError(key)
else:
for item in to_delete:
self.remove(item)
def __contains__(self, key):
"""Return true if the key has any values."""
if not is_valid_key(key):
raise ValueError
key = key.lower()
for k, value in self:
if k.lower() == key:
return True
else:
return False
def __setitem__(self, key, values):
"""Set a key's value or values.
Setting a value overwrites all old ones. The value may be a
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
string.
"""
if isinstance(key, slice):
return VComment.__setitem__(self, key, values)
if not is_valid_key(key):
raise ValueError
if not isinstance(values, list):
values = [values]
try:
del(self[key])
except KeyError:
pass
for value in values:
self.append((key, value))
def keys(self):
"""Return all keys in the comment."""
return list(set([k.lower() for k, v in self]))
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
return dict([(key, self[key]) for key in self.keys()])
|
# Implementazione openflow di un hub tramite controller
#
# In ogni switch viene caricata un'unica regola
# di default (table miss) con azione di invio al controller
# dell'intero pacchetto. Il controller risponde con una
# packet out con azione flood
#
# NOTA: OpenVSwitch ignora l'opzione OFPCML_NO_BUFFER
# nelle regole table miss (priorita' 0); pertanto,
# carichiamo una regola con priorita' 1
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
class PolimiHub(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [
parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER
)
]
inst = [
parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions
)
]
mod = parser.OFPFlowMod(
datapath=datapath,
priority=1,
match=match,
instructions=inst
)
datapath.send_msg(mod)
# Registriamo un handler dell'evento Packet In
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# Per come abbiamo scritto le regole nello switch
# i pacchetti non devono essere bufferizzati allo switch
assert msg.buffer_id == ofproto.OFP_NO_BUFFER
# Recuperiamo dai metadati del pacchetto
# la porta di ingresso allo switch
in_port = msg.match['in_port']
actions = [
parser.OFPActionOutput(
ofproto.OFPP_FLOOD
)
]
out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=msg.buffer_id,
in_port=in_port,
actions=actions,
data=msg.data
)
datapath.send_msg(out)
|
from grouper.fe.forms import ServiceAccountCreateForm
from grouper.fe.settings import settings
from grouper.fe.util import GrouperHandler
from grouper.models.group import Group
from grouper.service_account import (
BadMachineSet,
can_create_service_account,
create_service_account,
DuplicateServiceAccount,
)
class ServiceAccountCreate(GrouperHandler):
def get(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
if not can_create_service_account(self.session, self.current_user, group):
return self.forbidden()
form = ServiceAccountCreateForm()
return self.render("service-account-create.html", form=form, group=group)
def post(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
if "@" not in self.request.arguments["name"][0]:
self.request.arguments["name"][0] += "@" + settings.service_account_email_domain
if not can_create_service_account(self.session, self.current_user, group):
return self.forbidden()
form = ServiceAccountCreateForm(self.request.arguments)
if not form.validate():
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
if form.data["name"].split("@")[-1] != settings.service_account_email_domain:
form.name.errors.append(
"All service accounts must have a username ending in {}".format(
settings.service_account_email_domain
)
)
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
try:
create_service_account(
self.session,
self.current_user,
form.data["name"],
form.data["description"],
form.data["machine_set"],
group,
)
except DuplicateServiceAccount:
form.name.errors.append("A user with name {} already exists".format(form.data["name"]))
except BadMachineSet as e:
form.machine_set.errors.append(str(e))
if form.name.errors or form.machine_set.errors:
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
url = "/groups/{}/service/{}?refresh=yes".format(group.name, form.data["name"])
return self.redirect(url)
|
from generate_params import *
n_params = 50
for _ in range(n_params):
random_params_cont_bath(beta=beta,
U_range=[1., 8.],
eps_range=[-1., 1.],
D_range=[2. , 8.],
filename=name("params_cont_bath", beta, 0,
parent="data_cont_bath/"))
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
@toolbar_pool.register
class SeoCheckToolbar(CMSToolbar):
def populate(self):
seo_check_menu = self.toolbar.get_or_create_menu(
'seo_check',
'SEO'
)
url = reverse('cmsplugin_seocheck:check_modal')
seo_check_menu.add_modal_item(name='SEO-Check', url=url)
|
from dataclasses import dataclass
import pele_platform.Adaptive.simulation as si
import pele_platform.Utilities.Parameters.parameters as pv
@dataclass
class GpcrLauncher:
args: pv.ParametersBuilder
def run_gpcr_simulation(self) -> pv.ParametersBuilder:
# Set parameters for GPCR and launch simulation
self._set_parameters()
simulation_parameters = si.run_adaptive(self.args)
return simulation_parameters
def _set_parameters(self) -> None:
# Set box and initial ligand position
self.orthosteric_site = self.args.orthosteric_site
self.initial_site = self.args.initial_site
self.args.center_of_interface = self.initial_site
self.args.randomize = True
|
#
# @lc app=leetcode id=172 lang=python3
#
# [172] Factorial Trailing Zeroes
#
# @lc code=start
class Solution:
def trailingZeroes(self, n):
# zero generated by 2 and 5
if n < 5:
return 0
ans = 0
base = 5
while n >= base:
ans += n//base
base *= 5
return ans
if __name__ == '__main__':
a = Solution()
b = a.trailingZeroes(200)
print(b)
# @lc code=end
|
#from .Opt import BayesianOptimization
from .GaussianProcess import GP
from .GaussianProcess.Kernel import RBF
from .Opt import BayesianOptimization
from .Acquisition import Acquistion
|
import search
import string
from math import(cos, pi)
# A sample map problem
# sumner_map = search.UndirectedGraph(dict(
# Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
# Cottontown=dict(Portland=18),
# Fairfield=dict(Mitchellville=21, Portland=17),
# Mitchellville=dict(Portland=7, Fairfield=21),
# ))
#
# sumner_puzzle = search.GraphProblem('Cottontown', 'Mitchellville', sumner_map)
#
# sumner_puzzle.label = 'Sumner'
# sumner_puzzle.description = '''
# An abbreviated map of Sumner County, TN.
# This map is unique, to the best of my knowledge.
# '''
#=========================================================================
#=========================================================================
norfolk_map = search.UndirectedGraph(dict(
Norfolk=dict(Suffolk=50,Chesapeake=15,VirginiaBeach=35),
Suffolk=dict(Norfolk=50,Chesapeake=35,Hampton=60,Moyock=150,Sunbury=120),
Chesapeake=dict(Suffolk=35,Norfolk=15,VirginiaBeach=40,Moyock=120),
VirginiaBeach=dict(Norfolk=35,Chesapeake=40),
Hampton=dict(Norfolk=30,Suffolk=60,NewportNews=15),
NewportNews=dict(Hampton=15,Jamestown=35,Williamsburg=30,Yorktown=15),
Jamestown=dict(NewportNews=35,Williamsburg=15),
Williamsburg=dict(Jamestown=15,NewportNews=30,Yorktown=20),
Yorktown=dict(Williamsburg=20,Newportnews=15),
Sunbury=dict(Suffolk=120, Moyock=45),
Moyock=dict(Suffolk=150,Chesapeak=120),
))
norfolk_puzzle = search.GraphProblem('Jamestown', 'Yorktown', norfolk_map)
norfolk_puzzle.label = 'Norfolk'
norfolk_puzzle.description = 'This is a map of the Norfolk, VA area.' \
'This map is unique to the best of my' \
'knowledge.'
#=========================================================================
#=========================================================================
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description = '''
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
# A trivial Problem definition
class LightSwitch(search.Problem):
def actions(self, state):
return ['up', 'down']
def result(self, state, action):
if action == 'up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
#swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
#===========================================================================================
#===========================================================================================
# class TrueOrFalse(search.Problem):
# def actions(self, state):
# return ['true', 'false']
#
# def result(self, state, action):
# if action == 'true':
# return 'true'
# else:
# return 'false'
#
# def goal_test(self, state):
# return state == 'true'
#
# def h(self, node):
# state = node.state
# if self.goal_test(state):
# return 0
# else:
# return 1
#
# #swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
# trueorfalse_puzzle = TrueOrFalse('false')
# trueorfalse_puzzle.label = 'True or False'
cheese_map = search.UndirectedGraph(dict(
A1=dict(A2=10,A3=20,B1=10,B2=20,B3=30,C1=20,C2=30,C3=40),
A2=dict(A1=10,A3=10,B1=20,B2=10,B3=20,C1=30,C2=20,C3=30),
A3=dict(A1=20,A2=10,B1=30,B2=20,B3=10,C1=40,C2=30,C3=20),
B1=dict(A1=10,A2=20,A3=30,B2=10,B3=10,C1=10,C2=20,C3=30),
B2=dict(A2=10,A3=20,B1=10,A1=20,B3=10,C1=20,C2=10,C3=20),
B3=dict(A2=20,A3=10,B1=20,B2=10,A1=30,C1=30,C2=20,C3=10),
C1=dict(A2=20,A3=40,B1=10,B2=20,B3=30,A1=20,C2=10,C3=20),
C2=dict(A2=10,A3=20,B1=20,B2=10,B3=20,C1=10,A1=30,C3=10),
C3=dict(A2=30,A3=20,B1=30,B2=20,B3=10,C1=20,C2=10,A1=40),
))
import random
def guess_letter():
return random.choice('ABC')
def guess_number():
return random.choice('123')
a = guess_letter()
b = guess_number()
print(a + b)
cheese_puzzle = search.GraphProblem('A1', a+b , cheese_map)
cheese_puzzle.label = 'Cheese Puzzle'
#===========================================================================================
#===========================================================================================
mySearches = [
# swiss_puzzle,
# sumner_puzzle,
romania_puzzle,
switch_puzzle,
norfolk_puzzle,
#trueorfalse_puzzle,
cheese_puzzle,
]
|
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hashing for st.memo and st.singleton."""
import collections
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import threading
import unittest.mock
import weakref
from typing import Any, Pattern, Optional, Dict, List
from streamlit import type_util
from streamlit import util
from streamlit.logger import get_logger
from streamlit.uploaded_file_manager import UploadedFile
from .cache_errors import (
CacheType,
UnhashableTypeError,
)
_LOGGER = get_logger(__name__)
# If a dataframe has more than this many rows, we consider it large and hash a sample.
_PANDAS_ROWS_LARGE = 100000
_PANDAS_SAMPLE_SIZE = 10000
# Similar to dataframes, we also sample large numpy arrays.
_NP_SIZE_LARGE = 1000000
_NP_SAMPLE_SIZE = 100000
# Arbitrary item to denote where we found a cycle in a hashed object.
# This allows us to hash self-referencing lists, dictionaries, etc.
_CYCLE_PLACEHOLDER = b"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE"
def update_hash(val: Any, hasher, cache_type: CacheType) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
ch = _CacheFuncHasher(cache_type)
ch.update(hasher, val)
class _HashStack:
"""Stack of what has been hashed, for debug and circular reference detection.
This internally keeps 1 stack per thread.
Internally, this stores the ID of pushed objects rather than the objects
themselves because otherwise the "in" operator inside __contains__ would
fail for objects that don't return a boolean for "==" operator. For
example, arr == 10 where arr is a NumPy array returns another NumPy array.
This causes the "in" to crash since it expects a boolean.
"""
def __init__(self):
self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()
def __repr__(self) -> str:
return util.repr_(self)
def push(self, val: Any):
self._stack[id(val)] = val
def pop(self):
self._stack.popitem()
def __contains__(self, val: Any):
return id(val) in self._stack
class _HashStacks:
"""Stacks of what has been hashed, with at most 1 stack per thread."""
def __init__(self):
self._stacks: weakref.WeakKeyDictionary[
threading.Thread, _HashStack
] = weakref.WeakKeyDictionary()
def __repr__(self) -> str:
return util.repr_(self)
@property
def current(self) -> _HashStack:
current_thread = threading.current_thread()
stack = self._stacks.get(current_thread, None)
if stack is None:
stack = _HashStack()
self._stacks[current_thread] = stack
return stack
hash_stacks = _HashStacks()
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True)
def _key(obj: Optional[Any]) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult
class _CacheFuncHasher:
"""A hasher that can hash objects with cycles."""
def __init__(self, cache_type: CacheType):
self._hashes: Dict[Any, bytes] = {}
# The number of the bytes in the hash.
self.size = 0
self.cache_type = cache_type
def __repr__(self) -> str:
return util.repr_(self)
def to_bytes(self, obj: Any) -> bytes:
"""Add memoization to _to_bytes and protect against cycles in data structures."""
tname = type(obj).__qualname__.encode()
key = (tname, _key(obj))
# Memoize if possible.
if key[1] is not NoResult:
if key in self._hashes:
return self._hashes[key]
# Break recursive cycles.
if obj in hash_stacks.current:
return _CYCLE_PLACEHOLDER
hash_stacks.current.push(obj)
try:
# Hash the input
b = b"%s:%s" % (tname, self._to_bytes(obj))
# Hmmm... It's possible that the size calculation is wrong. When we
# call to_bytes inside _to_bytes things get double-counted.
self.size += sys.getsizeof(b)
if key[1] is not NoResult:
self._hashes[key] = b
finally:
# In case an UnhashableTypeError (or other) error is thrown, clean up the
# stack so we don't get false positives in future hashing calls
hash_stacks.current.pop()
return b
def update(self, hasher, obj: Any) -> None:
"""Update the provided hasher with the hash of an object."""
b = self.to_bytes(obj)
hasher.update(b)
def _to_bytes(self, obj: Any) -> bytes:
"""Hash objects to bytes, including code with dependencies.
Python's built in `hash` does not produce consistent results across
runs.
"""
if isinstance(obj, unittest.mock.Mock):
# Mock objects can appear to be infinitely
# deep, so we don't try to hash them at all.
return self.to_bytes(id(obj))
elif isinstance(obj, bytes) or isinstance(obj, bytearray):
return obj
elif isinstance(obj, str):
return obj.encode()
elif isinstance(obj, float):
return self.to_bytes(hash(obj))
elif isinstance(obj, int):
return _int_to_bytes(obj)
elif isinstance(obj, (list, tuple)):
h = hashlib.new("md5")
for item in obj:
self.update(h, item)
return h.digest()
elif isinstance(obj, dict):
h = hashlib.new("md5")
for item in obj.items():
self.update(h, item)
return h.digest()
elif obj is None:
return b"0"
elif obj is True:
return b"1"
elif obj is False:
return b"0"
elif type_util.is_type(obj, "pandas.core.frame.DataFrame") or type_util.is_type(
obj, "pandas.core.series.Series"
):
import pandas as pd
if len(obj) >= _PANDAS_ROWS_LARGE:
obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)
try:
return b"%s" % pd.util.hash_pandas_object(obj).sum()
except TypeError:
# Use pickle if pandas cannot hash the object for example if
# it contains unhashable objects.
return b"%s" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
elif type_util.is_type(obj, "numpy.ndarray"):
h = hashlib.new("md5")
self.update(h, obj.shape)
if obj.size >= _NP_SIZE_LARGE:
import numpy as np
state = np.random.RandomState(0)
obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)
self.update(h, obj.tobytes())
return h.digest()
elif inspect.isbuiltin(obj):
return bytes(obj.__name__.encode())
elif type_util.is_type(obj, "builtins.mappingproxy") or type_util.is_type(
obj, "builtins.dict_items"
):
return self.to_bytes(dict(obj))
elif type_util.is_type(obj, "builtins.getset_descriptor"):
return bytes(obj.__qualname__.encode())
elif isinstance(obj, UploadedFile):
# UploadedFile is a BytesIO (thus IOBase) but has a name.
# It does not have a timestamp so this must come before
# temproary files
h = hashlib.new("md5")
self.update(h, obj.name)
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif hasattr(obj, "name") and (
isinstance(obj, io.IOBase)
# Handle temporary files used during testing
or isinstance(obj, tempfile._TemporaryFileWrapper)
):
# Hash files as name + last modification date + offset.
# NB: we're using hasattr("name") to differentiate between
# on-disk and in-memory StringIO/BytesIO file representations.
# That means that this condition must come *before* the next
# condition, which just checks for StringIO/BytesIO.
h = hashlib.new("md5")
obj_name = getattr(obj, "name", "wonthappen") # Just to appease MyPy.
self.update(h, obj_name)
self.update(h, os.path.getmtime(obj_name))
self.update(h, obj.tell())
return h.digest()
elif isinstance(obj, Pattern):
return self.to_bytes([obj.pattern, obj.flags])
elif isinstance(obj, io.StringIO) or isinstance(obj, io.BytesIO):
# Hash in-memory StringIO/BytesIO by their full contents
# and seek position.
h = hashlib.new("md5")
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif type_util.is_type(obj, "numpy.ufunc"):
# For numpy.remainder, this returns remainder.
return bytes(obj.__name__.encode())
elif inspect.ismodule(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# so the current warning is quite annoying...
# st.warning(('Streamlit does not support hashing modules. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name for internal modules.
return self.to_bytes(obj.__name__)
elif inspect.isclass(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# (e.g. in every "except" statement) so the current warning is
# quite annoying...
# st.warning(('Streamlit does not support hashing classes. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name of classes.
return self.to_bytes(obj.__name__)
elif isinstance(obj, functools.partial):
# The return value of functools.partial is not a plain function:
# it's a callable object that remembers the original function plus
# the values you pickled into it. So here we need to special-case it.
h = hashlib.new("md5")
self.update(h, obj.args)
self.update(h, obj.func)
self.update(h, obj.keywords)
return h.digest()
else:
# As a last resort, hash the output of the object's __reduce__ method
h = hashlib.new("md5")
try:
reduce_data = obj.__reduce__()
except BaseException as e:
raise UnhashableTypeError() from e
for item in reduce_data:
self.update(h, item)
return h.digest()
class NoResult:
"""Placeholder class for return values when None is meaningful."""
pass
|
print('a b'.split('.s'))
|
"""BipHelp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls')),
]
|
from django.db import models
class Manufacturer(models.Model):
name = models.CharField(max_length=255)
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.DecimalField()
description = models.TextField()
release_date = models.DateField()
manufacturer = models.ForeignKey(Manufacturer, on_delete=models.CASCADE)
|
"""
python test.py --model pointMLP --msg 20220209053148-404
"""
import argparse
import os
import datetime
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
import models as models
from utils import progress_bar, IOStream
from data import ModelNet40
import sklearn.metrics as metrics
from helper import cal_loss
import numpy as np
import torch.nn.functional as F
model_names = sorted(
name for name in models.__dict__ if callable(models.__dict__[name])
)
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser("training")
parser.add_argument(
"-c",
"--checkpoint",
type=str,
metavar="PATH",
help="path to save checkpoint (default: checkpoint)",
)
parser.add_argument("--msg", type=str, help="message after checkpoint")
parser.add_argument(
"--batch_size", type=int, default=16, help="batch size in training"
)
parser.add_argument(
"--model", default="pointMLP", help="model name [default: pointnet_cls]"
)
parser.add_argument(
"--num_classes",
default=40,
type=int,
choices=[10, 40],
help="training on ModelNet10/40",
)
parser.add_argument("--num_points", type=int, default=1024, help="Point Number")
return parser.parse_args()
def main():
args = parse_args()
print(f"args: {args}")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"==> Using device: {device}")
if args.msg is None:
message = str(datetime.datetime.now().strftime("-%Y%m%d%H%M%S"))
else:
message = "-" + args.msg
args.checkpoint = "checkpoints/" + args.model + message
print("==> Preparing data..")
test_loader = DataLoader(
ModelNet40(partition="test", num_points=args.num_points),
num_workers=4,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
)
# Model
print("==> Building model..")
net = models.__dict__[args.model]()
criterion = cal_loss
net = net.to(device)
checkpoint_path = os.path.join(args.checkpoint, "best_checkpoint.pth")
checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"))
# criterion = criterion.to(device)
if device == "cuda":
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(checkpoint["net"])
test_out = validate(net, test_loader, criterion, device)
print(f"Vanilla out: {test_out}")
def validate(net, testloader, criterion, device):
net.eval()
test_loss = 0
correct = 0
total = 0
test_true = []
test_pred = []
time_cost = datetime.datetime.now()
with torch.no_grad():
for batch_idx, (data, label) in enumerate(testloader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = net(data)
loss = criterion(logits, label)
test_loss += loss.item()
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(
batch_idx,
len(testloader),
"Loss: %.3f | Acc: %.3f%% (%d/%d)"
% (
test_loss / (batch_idx + 1),
100.0 * correct / total,
correct,
total,
),
)
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
return {
"loss": float("%.3f" % (test_loss / (batch_idx + 1))),
"acc": float("%.3f" % (100.0 * metrics.accuracy_score(test_true, test_pred))),
"acc_avg": float(
"%.3f" % (100.0 * metrics.balanced_accuracy_score(test_true, test_pred))
),
"time": time_cost,
}
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -reindex with CheckBlockIndex
#
from test_framework.test_framework import FilbitTestFramework
from test_framework.util import *
class ReindexTest(FilbitTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir))
def run_test(self):
self.nodes[0].generate(3)
stop_node(self.nodes[0], 0)
wait_filbitds()
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
assert_equal(self.nodes[0].getblockcount(), 3)
print "Success"
if __name__ == '__main__':
ReindexTest().main()
|
import winreg
import sys
def check_hide() -> bool:
"""check if hidguardian is used and controller is hidden
"""
if sys.platform.startswith('win32'):
try:
access_reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
access_key = winreg.OpenKey(access_reg, 'SYSTEM\CurrentControlSet\Services\HidGuardian\Parameters', 0, winreg.KEY_READ)
affected_devices = winreg.QueryValueEx(access_key, 'AffectedDevices')[0]
if "054C" in affected_devices and "0CE6" in affected_devices:
return True
return False
except OSError as e:
print(e)
return False
|
from .helpers import *
class TestFilterRuleId:
def test_filter_rule_id_exact_match(self):
arguments = [
"--filter-rule-id", "12",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_prefix_match(self):
arguments = [
"--filter-rule-id", "^12",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_suffix_match(self):
arguments = [
"--filter-rule-id", ".*22$",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_no_match(self):
arguments = [
"--filter-rule-id", "11",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
"""
expected = rule_string
context = create_context(arguments, rule_string)
assert expected == get_output(context)
class TestLineNumbers:
def test_line_numbers_identical(self):
arguments = [
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
SecRule ARGS "@rx bar" "id:13"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12,tag:'foo'"
SecRule ARGS "@rx bar" "id:13,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_line_numbers_shifted_down(self):
arguments = [
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
SecRule ARGS "@rx bar" \\
"id:13"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
SecRule ARGS "@rx bar" \\
"id:13,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_line_numbers_shifted_up(self):
arguments = [
"--remove-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:foo"
SecRule ARGS "@rx bar" \\
"id:13,\\
tag:foo"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
SecRule ARGS "@rx bar" \\
"id:13"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
class TestTargetFile:
def test_target_file(self, tmp_path):
import os
from rule_ctl import write_output
file_path = str(tmp_path / 'foo.conf')
arguments = [
"--append-tag", "foo",
"--target-file", file_path
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
write_output(context)
assert os.path.exists(file_path)
with open(file_path, 'r') as h:
assert expected.rstrip() == h.read()
def test_target_file_uses_config_as_default(self, tmp_path):
import os
from rule_ctl import write_output
file_path = str(tmp_path / 'foo.conf')
arguments = [
"--append-tag", "foo",
"--config", file_path
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
write_output(context)
assert os.path.exists(file_path)
with open(file_path, 'r') as h:
assert expected.rstrip() == h.read()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:jingtongyu
# datetime:2020/6/7 10:14 下午
# software: PyCharm
from flask import current_app
from . import db
from .base import BaseModel
from sqlalchemy.exc import SQLAlchemyError
from werkzeug.security import generate_password_hash, check_password_hash
import time
class AdvicesModel(db.Model, BaseModel):
__tablename__ = 'advices'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(25), nullable=False)
username = db.Column(db.String(25), nullable=False)
advice = db.Column(db.String(500), nullable=False)
def __init__(self, email, username, advice):
self.email = email
self.username = username
self.advice = advice
def __str__(self):
return "Advices(id='%s')" % self.id
def paginate(self, page, per_page):
return self.query.paginate(page=page, per_page=per_page, error_out=False)
def filter_by_email(self, email):
return self.query.filter(self.email.like("%" + email + "%")).all()
def filter_by_username(self, username):
return self.query.filter(self.username.like("%" + username + "%")).all()
def get(self, _id):
return self.query.filter_by(id=_id).first()
def add(self, role):
db.session.add(role)
return session_commit()
def update(self):
return session_commit()
def delete(self, ids):
# self.query.filter_by(id=id).delete()
self.query.filter(self.id.in_(ids)).delete(synchronize_session=False)
return session_commit()
def session_commit():
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
reason = str(e)
current_app.logger.info(e)
return reason
|
<warning descr="Python versions 3.5, 3.6, 3.7, 3.8, 3.9, 3.10 do not have type long. Use int instead.">long("abc")</warning>
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Address Cache Table
#
# This test verifies that address cache entry associated with a SED child
# addresses is removed from new parent node ensuring we would not have a
# routing loop.
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
r1 = wpan.Node()
r2 = wpan.Node()
r3 = wpan.Node()
c = wpan.Node()
c3 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
#
# r3 ---- r1 ---- r2
# | |
# | |
# c3 c
#
# c is initially attached to r2 but it switches parent during test to r1 and then r3
# c3 is just added to make sure r3 become router quickly (not involved in test)
PREFIX = "fd00:1234::"
POLL_INTERVAL = 400
r1.form("addr-cache")
r1.add_prefix(PREFIX, stable=True, on_mesh=True, slaac=True, preferred=True)
r1.whitelist_node(r2)
r2.whitelist_node(r1)
r2.join_node(r1, wpan.JOIN_TYPE_ROUTER)
c.set(wpan.WPAN_POLL_INTERVAL, str(POLL_INTERVAL))
c.whitelist_node(r2)
r2.whitelist_node(c)
c.join_node(r2, wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
r3.whitelist_node(r1)
r1.whitelist_node(r3)
r3.join_node(r1, wpan.JOIN_TYPE_ROUTER)
c3.whitelist_node(r3)
r3.whitelist_node(c3)
c3.join_node(r3, wpan.JOIN_TYPE_END_DEVICE)
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
#
ROUTER_TABLE_WAIT_TIME = 30 / speedup + 5
INVALID_ROUTER_ID = 63
verify(r1.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_LEADER)
verify(r2.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(r3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(c.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_SLEEPY_END_DEVICE)
verify(c3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_END_DEVICE)
r1_address = r1.find_ip6_address_with_prefix(PREFIX)
r2_address = r2.find_ip6_address_with_prefix(PREFIX)
c_address = c.find_ip6_address_with_prefix(PREFIX)
# Send a single UDP message from r1 to c
#
# This adds an address cache entry on r1 for c pointing to r2 (the current parent of c).
sender = r1.prepare_tx(r1_address, c_address, "Hi from r1 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
# Force c to switch its parent from r2 to r1
#
# r3 ---- r1 ---- r2
# | |
# | |
# c3 c
CHILD_SUPERVISION_CHECK_TIMEOUT = 2
PARENT_SUPERVISION_INTERVAL = 1
REATTACH_WAIT_TIME = CHILD_SUPERVISION_CHECK_TIMEOUT / speedup + 6
c.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT))
r2.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r1.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r3.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r2.un_whitelist_node(c)
r1.whitelist_node(c)
c.whitelist_node(r1)
# Wait for c to detach from r2 and attach to r1.
def check_c_is_removed_from_r2_child_table():
child_table = wpan.parse_list(r2.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 0)
wpan.verify_within(check_c_is_removed_from_r2_child_table, REATTACH_WAIT_TIME)
# check that c is now a child of r1
child_table = wpan.parse_list(r1.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 1)
# Send a single UDP message from r2 to c
#
# This adds an address cache entry on r2 for c pointing to r1 (the current parent of c).
sender = r2.prepare_tx(r2_address, c_address, "Hi from r2 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
# Force c to switch its parent from r1 to r3
#
# r3 ---- r1 ---- r2
# | \
# | \
# c3 c
r1.un_whitelist_node(c)
r3.whitelist_node(c)
c.whitelist_node(r3)
# Wait for c to detach from r1 and attach to r3.
def check_c_is_removed_from_r1_child_table():
child_table = wpan.parse_list(r1.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 0)
wpan.verify_within(check_c_is_removed_from_r1_child_table, REATTACH_WAIT_TIME)
# check that c is now a child of r3 (r3 should have two child, c and c3)
child_table = wpan.parse_list(r3.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 2)
# Send a single UDP message from r1 to c
#
# If the r1 address cache entry is not cleared when c attached to r1,
# r1 will still have an entry pointing to r2, and r2 will have an entry
# pointing to r1, thus creating a loop (the msg will not be delivered to r3)
sender = r1.prepare_tx(r1_address, c_address, "Hi from r1 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
|
import _plotly_utils.basevalidators
class LightpositionValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="lightposition", parent_name="volume", **kwargs):
super(LightpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Lightposition"),
data_docs=kwargs.pop(
"data_docs",
"""
x
Numeric vector, representing the X coordinate
for each vertex.
y
Numeric vector, representing the Y coordinate
for each vertex.
z
Numeric vector, representing the Z coordinate
for each vertex.
""",
),
**kwargs
)
|
import numpy as np
#import skimage.draw as skdraw
def add_bbox(img,bbox,color=[0,0,0],fill=False,alpha=1):
x1,y1,x2,y2 = bbox
# Clockwise starting from top left
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
if fill:
coords = skdraw.polygon(r,c,shape=img.shape[0:2])
skdraw.set_color(img,coords,color,alpha=alpha)
return
peri_coords = skdraw.polygon_perimeter(r,c,shape=img.shape[0:2])
skdraw.set_color(img,peri_coords,color,alpha=alpha)
def compute_area(bbox,invalid=None):
x1,y1,x2,y2 = bbox
if (x2 <= x1) or (y2 <= y1):
area = invalid
else:
area = (x2 - x1 + 1) * (y2 - y1 + 1)
return area
def compute_iou(bbox1,bbox2,verbose=False):
x1,y1,x2,y2 = bbox1
x1_,y1_,x2_,y2_ = bbox2
x1_in = max(x1,x1_)
y1_in = max(y1,y1_)
x2_in = min(x2,x2_)
y2_in = min(y2,y2_)
intersection = compute_area(bbox=[x1_in,y1_in,x2_in,y2_in],invalid=0.0)
area1 = compute_area(bbox1)
area2 = compute_area(bbox2)
union = area1 + area2 - intersection
iou = intersection / (union + 1e-6)
if verbose:
return iou, intersection, union
return iou
def compute_area_batch(bbox):
x1,y1,x2,y2 = [bbox[:,i] for i in range(4)]
area = np.zeros(x1.shape[0])
valid_mask = np.logical_and(x2 > x1, y2 > y1)
area_ = (x2 - x1 + 1) * (y2 - y1 + 1)
area[valid_mask] = area_[valid_mask]
return area
def compute_iou_batch(bbox1,bbox2,verbose=False):
x1,y1,x2,y2 = [bbox1[:,i] for i in range(4)]
x1_,y1_,x2_,y2_ = [bbox2[:,i] for i in range(4)]
x1_in = np.maximum(x1,x1_)
y1_in = np.maximum(y1,y1_)
x2_in = np.minimum(x2,x2_)
y2_in = np.minimum(y2,y2_)
intersection_bbox = np.stack((x1_in,y1_in,x2_in,y2_in),1)
intersection = compute_area_batch(bbox=intersection_bbox)
area1 = compute_area_batch(bbox1)
area2 = compute_area_batch(bbox2)
union = area1 + area2 - intersection
iou = intersection / (union + 1e-6)
if verbose:
return iou, intersection, union
return iou
def vis_bbox(bbox,img,color=(0,0,0),modify=False):
im_h,im_w = img.shape[0:2]
x1,y1,x2,y2 = bbox
x1 = max(0,min(x1,im_w-1))
x2 = max(x1,min(x2,im_w-1))
y1 = max(0,min(y1,im_h-1))
y2 = max(y1,min(y2,im_h-1))
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
if modify:
img_ = img
else:
img_ = np.copy(img)
rr,cc = skdraw.polygon(r,c,img.shape[:2])
skdraw.set_color(img_,(rr,cc),color,alpha=0.2)
rr,cc = skdraw.polygon_perimeter(r,c,img.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
return img_
def vis_bboxes(bboxes,img,color=(0,0,0),modify=False):
if modify:
img_ = img
else:
img_ = np.copy(img)
for bbox in bboxes:
img_ = vis_bbox(bbox,img_,color,True)
return img_
def join_bboxes_by_line(bbox1,bbox2,img,color=(255,0,255),modify=False):
im_h,im_w = img.shape[0:2]
x1,y1,x2,y2 = bbox1
x1_,y1_,x2_,y2_ = bbox2
c0 = 0.5*(x1+x2)
r0 = 0.5*(y1+y2)
c1 = 0.5*(x1_+x2_)
r1 = 0.5*(y1_+y2_)
r0,c0,r1,c1 = [int(x) for x in [r0,c0,r1,c1]]
c0 = max(0,min(c0,im_w-1))
c1 = max(0,min(c1,im_w-1))
r0 = max(0,min(r0,im_h-1))
r1 = max(0,min(r1,im_h-1))
rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)
if modify:
img_ = img
else:
img_ = np.copy(img)
for k in range(3):
img_[rr,cc,k] = val*color[k]
rr,cc = skdraw.circle(r0,c0,4,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
rr,cc = skdraw.circle(r1,c1,4,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
return img_
def vis_sub_obj_bboxes(
sub_bboxes,
obj_bboxes,
img,
sub_color=(0,0,255),
obj_color=(255,0,0),
modify=False):
img_ = vis_bboxes(sub_bboxes,img,sub_color,modify)
img_ = vis_bboxes(obj_bboxes,img_,obj_color,modify=True)
for sub_bbox,obj_bbox in zip(sub_bboxes,obj_bboxes):
img_ = join_bboxes_by_line(sub_bbox,obj_bbox,img_,modify=True)
return img_
def vis_human_keypts(
img,
keypts,
radius=2,
pt_color=(0,255,255),
line_color=(0,255,255),
modify=False):
LINKS = [
(0,1),
(1,2),
(2,3),
(3,4),
(1,5),
(5,6),
(6,7),
(0,15),
(15,17),
(0,14),
(14,16),
(1,8),
(8,9),
(9,10),
(1,11),
(11,12),
(12,13),
(8,11)
]
if modify:
img_ = img
else:
img_ = np.copy(img)
h,w = img.shape[:2]
for i,j in LINKS:
c0,r0,conf0 = keypts[i]
c1,r1,conf1 = keypts[j]
r0,r1 = [max(0,min(h-1,int(v))) for v in [r0,r1]]
c0,c1 = [max(0,min(w-1,int(v))) for v in [c0,c1]]
if conf0 > 0 and conf1 > 0:
rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)
for k in range(3):
img_[rr,cc,k] = val*line_color[k]
num_keypts = keypts.shape[0]
for i in range(num_keypts):
c,r,conf = keypts[i]
if conf==0.0:
continue
rr,cc = skdraw.circle(r,c,radius,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = pt_color[k]
return img_
|
from neo4j import GraphDatabase
import json
import os
def create_beers(tx):
""" Load from the results of cloudburst site scraper """
with open('data/beers.json', 'r') as f:
beer_hops = json.load(f)
beers = beer_hops['beers']
query = """
UNWIND $beers as beer
MERGE (b:Beer {name : beer.beer_name,
abv : beer.abv,
style : beer.beer_style,
description : beer.description
})
RETURN count(b) as c
"""
records = tx.run(query, beers=beers)
print(
'Merged {} Beer nodes'
.format(records.single()['c']))
def create_hops(tx):
""" Hops are loaded into the DB from multiple sources
First is a hand-curated hop list to get better coverage
of the cloudburst beer descriptions. Contains names only.
We also load from Yakima Chief, which makes nodes with
additional data on aroma profiles and a useful description
of the hop.
"""
with open('data/hopnames.txt') as f:
hoplist = f.read().splitlines()
hoplist = [h.title() for h in hoplist if len(h) > 0]
with open('data/yakimachiefhopdata.json', 'r') as f:
ych = json.load(f)
# This query is fast but definitely not idempotent
query_params = []
for i, hop in enumerate(ych['hops']):
query_params.append([i, hop])
query = """
UNWIND $query_params as params
MERGE (h:Hop { idx : params[0]})
SET h += params[1]
SET h.data_source = 'Yakima Chief'
SET h.data_file = 'yakimachiefhopdata.json'
"""
tx.run(query, query_params=query_params)
query = """
with $hoplist as hoplist
UNWIND hoplist as name
OPTIONAL MATCH (h:Hop {name:name})
with h,name where h is NULL
MERGE (new:Hop {name : name})
SET new.data_source = 'Curated List'
SET new.data_file = 'hopnames.txt'
"""
tx.run(query, hoplist=hoplist)
query = """
match (n:Hop)
return count(n) as c
"""
records = tx.run(query)
print("Merged {} Hop nodes".format(records.single()['c']))
def create_beer_contains_hop_edges(tx):
query = """
match (b:Beer)
match (h:Hop)
where b.description contains h.name
merge (b)-[e:CONTAINS]-(h)
return count(e) as c
"""
records = tx.run(query)
print(
'Merged {} (:Beer)-[:CONTAINS]-(:Hop) relationships'
.format(records.single()['c']))
def create_styles(tx):
query = """
match (b:Beer)
with distinct b.style as styles
MERGE (s:Style {style : styles})
with s
match (b:Beer) where b.style = s.style
MERGE (b)-[e:STYLE]->(s)
return count(e) as c
"""
records = tx.run(query)
print(
"Merged {} (:Beer)-[:STYLE]-(:Style) relationships"
.format(records.single()['c']))
def create_hop_aromas(tx):
query = """
match (h:Hop)
UNWIND h.aroma_profile as aromas
with distinct aromas as aroma
MERGE (a:Aroma {aroma : aroma})
with a
match (h:Hop) where a.aroma in h.aroma_profile
MERGE (h)-[e:HAS_AROMA]-(a)
return count(e) as c
"""
records = tx.run(query)
print(
"Merged {} (:Aroma)-[:RECOMMENDED]-(:Aroma) relationships"
.format(records.single()['c']))
def style_abv_stats(tx):
query = """
match (s:Style)-[:STYLE]-(b:Beer)
with s, avg(b.abv) as abv_mean, stDevP(b.abv) as abv_std
set s.abv_mean = abv_mean
set s.abv_std = abv_std
"""
tx.run(query)
print("Computed style mean/std abv.")
query = """
match (b:Beer)-[:STYLE]-(s:Style)
set b.style_abv_z_score = (b.abv - s.abv_mean) / s.abv_std
"""
tx.run(query)
print("Computed beer style_abv_z_score")
if __name__ == '__main__':
uri = "neo4j://localhost:7687"
try:
pw = os.environ['NEO4J_PW']
except KeyError as e:
msg = "No environment variable `NEO4J_PW` found. " \
"Export NEO4J_PW='yourpassword' " \
"in the current shell environment or in your shell config file."
raise KeyError(msg) from e
driver = GraphDatabase.driver(uri, auth=("neo4j", pw))
with driver.session() as session:
swt = session.write_transaction
swt(create_beers)
swt(create_hops)
swt(create_beer_contains_hop_edges)
swt(create_hop_aromas)
swt(create_styles)
swt(style_abv_stats)
driver.close()
|
from pprint import pprint
d = {
'A' : 10,
'B' : 50,
'C' : 40,
}
print(sorted(d)) # ['A', 'B', 'C']
print(sorted(d, key=d.get)) # ['A', 'C', 'B']
print(sorted(d, key=d.get, reverse=True)) # ['B', 'C', 'A']
l = [
{
'id' : 'A',
'keyword' : 10,
},
{
'id' : 'B',
'keyword' : 50,
},
{
'id' : 'C',
'keyword' : 40,
},
]
pprint(sorted(l, key=lambda x:x['keyword'], reverse=True))
'''
[{'id': 'B', 'keyword': 50},
{'id': 'C', 'keyword': 40},
{'id': 'A', 'keyword': 10}]
'''
|
#
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
This consolidates build-time config data such as git status
and build date. This is in contrast to cmake configuration-time
config data like relative paths and library/header availability.
"""
workflowVersion="2.9.2"
buildTime="2018-03-02T22:08:15.960987Z"
|
import os
import argparse
import warnings
import numpy as np
from scipy.io import wavfile
from hmmlearn import hmm
from python_speech_features import mfcc
# Function to parse input arguments
def build_arg_parser():
parser = argparse.ArgumentParser(description='Trains the HMM classifier')
parser.add_argument("--input-folder", dest="input_folder", required=True,
help="Input folder containing the audio files in subfolders")
return parser
# Class to handle all HMM related processing
class HMMTrainer(object):
def __init__(self, model_name='GaussianHMM', n_components=4, cov_type='diag', n_iter=1000):
self.model_name = model_name
self.n_components = n_components
self.cov_type = cov_type
self.n_iter = n_iter
self.models = []
if self.model_name == 'GaussianHMM':
self.model = hmm.GaussianHMM(n_components=self.n_components,
covariance_type=self.cov_type, n_iter=self.n_iter)
else:
raise TypeError('Invalid model type')
# X is a 2D numpy array where each row is 13D
def train(self, X):
np.seterr(all='ignore')
self.models.append(self.model.fit(X))
# Run the model on input data
def get_score(self, input_data):
return self.model.score(input_data)
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_folder = args.input_folder
hmm_models = []
# Parse the input directory
for dirname in os.listdir(input_folder):
# Get the name of the subfolder
subfolder = os.path.join(input_folder, dirname)
if not os.path.isdir(subfolder):
continue
# Extract the label
label = subfolder[subfolder.rfind('/') + 1:]
# Initialize variables
X = np.array([])
y_words = []
warnings.filterwarnings("ignore")
# Iterate through the audio files (leaving 1 file for testing in each class)
for filename in [x for x in os.listdir(subfolder) if x.endswith('.wav')][:-1]:
# Read the input file
filepath = os.path.join(subfolder, filename)
sampling_freq, audio = wavfile.read(filepath)
# Extract MFCC features
mfcc_features = mfcc(audio, sampling_freq)
# Append to the variable X
if len(X) == 0:
X = mfcc_features
else:
X = np.append(X, mfcc_features, axis=0)
# Append the label
y_words.append(label)
#print('X.shape =', X.shape)
# Train and save HMM model
hmm_trainer = HMMTrainer()
hmm_trainer.train(X)
hmm_models.append((hmm_trainer, label))
hmm_trainer = None
# Test files
input_files = [
'data/pineapple/pineapple15.wav',
'data/orange/orange15.wav',
'data/apple/apple15.wav',
'data/kiwi/kiwi15.wav'
]
# Classify input data
for input_file in input_files:
# Read input file
sampling_freq, audio = wavfile.read(input_file)
# Extract MFCC features
mfcc_features = mfcc(audio, sampling_freq)
# Define variables
max_score = [float("-inf")]
output_label = [float("-inf")]
# Iterate through all HMM models and pick
# the one with the highest score
for item in hmm_models:
hmm_model, label = item
score = hmm_model.get_score(mfcc_features)
if score > max_score:
max_score = score
output_label = label
# Print the output
print( "\nTrue:", input_file[input_file.find('/')+1:input_file.rfind('/')])
print("Predicted:", output_label)
warnings.filterwarnings("ignore")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0F d3 /r
# psrlq mm1, mm2/m64
Buffer = bytes.fromhex('660fd39011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'psrlq')
assert_equal(myDisasm.repr(), 'psrlq xmm2, xmmword ptr [rax+44332211h]')
# VEX.NDS.128.66.0F.WIG d3 /r
# vpsrlq xmm1, xmm2, xmm3/m128
Buffer = bytes.fromhex('c40101d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm9, xmm15, xmmword ptr [r14]')
# VEX.NDS.256.66.0F.WIG d3 /r
# vpsrlq ymm1, ymm2, ymm3/m256
Buffer = bytes.fromhex('c40105d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm9, ymm15, ymmword ptr [r14]')
# EVEX.NDS.128.66.0F.WIG d3 /r
# vpsrlq xmm1 {k1}{z}, xmm2, xmm3/m128
Buffer = bytes.fromhex('62010506d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm25, xmm31, xmmword ptr [r14]')
# EVEX.NDS.256.66.0F.WIG d3 /r
# vpsrlq ymm1 {k1}{z}, ymm2, ymm3/m256
Buffer = bytes.fromhex('62010520d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm25, ymm31, ymmword ptr [r14]')
# EVEX.NDS.512.66.0F.WIG d3 /r
# vpsrlq zmm1 {k1}{z}, zmm2, zmm3/m512
Buffer = bytes.fromhex('62010540d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq zmm25, zmm31, zmmword ptr [r14]')
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T N A N O
#
# Copyright (c) 2020+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# MyTypeSpecimen.py
#
# This MyTypeSpecimen.py shows an example how to import
# existing libaries, that contain knowledge about document,
# pages and the elements on the pages.
#
from random import random
#
# From the library we import the classes (=object factories)
# that we need for creating the type specimen.
# Classes can be recognised by their initial capital name.
from pagebotnano_005.document import Document
from pagebotnano_005.elements import Rect, Text, TextBox
from pagebotnano_005.toolbox.loremipsum import loremipsum
class TypeSpecimen(Document):
# Class names start with a capital. See a class as a factory
# of type specimen objects (name spelled with an initial lower case.)
# In this case we inherit from what is already defined in Document.
# Similar how a Volkswagen factory would inherit the functions already
# defined in a generic car factory. Inheriting is one of the most
# powerful aspects of Python programming, so an object can perform
# complex tasks, without the need to add these functions again for
# every new project.
pass # For now it will do nothing, but that will change.
# Now we create a new type specimen, by executing the class.
# Compare that by letting a car factory produce a car. We only need
# one factory ("TypeSpecimen" name starting with capital), which
# then can product an inlimited number of typeSpecimen objects (name
# starting with a lower case.)
typeSpecimen = TypeSpecimen() # Execute the class/factory by adding "()"
fontName = 'Georgia'
titleSize = 64
headSize = 24
bodyFontSize = 16
leading = 1.4 # Multiplier for the fontSize;lineHe
padding = 80 # Padding of the page. Outside CSS called "margin" of the page.
def makeCoverPage(doc, title):
global Rect, Text, TextBox
global fontName, titleSize, headSize, bodyFontSize, leading, padding
page = doc.newPage()
# Fill the page with a random dark color (< 50% for (r, g, b))
fillColor = random()*0.5, random()*0.5, random()*0.5
rectangleElement = Rect(0, 0, page.w, page.h, fill=fillColor)
page.addElement(rectangleElement) # Add the rectangle element to the page.
# Make a FormattedString for the text box
fs = Text.FS(title,
font=fontName, fontSize=titleSize, lineHeight=titleSize*1.1, fill=1)
# Make a Text element with an (x, y) position and add it to the page.
textElement = Text(fs, x=padding, y=page.h-1.5*padding)
page.addElement(textElement) # Add the text element to the page.
# Add square with light color (> 50% for (r, g, b)) and lighter frame.
rx = ry = padding # Position from bottom-left
rw = rh = page.w - 2*padding # Make a square, so w = h
fillColor = 0.5+random()*0.5, 0.5+random()*0.5, 0.5+random()*0.5
strokeColor = 0.75+random()*0.25, 0.75+random()*0.25, 0.75+random()*0.25
rectangleElement = Rect(rx, ry, rw, rh, fill=fillColor,
stroke=strokeColor, strokeWidth=5)
page.addElement(rectangleElement) # Add the rectangle element to the page.
def makeBodyPages(doc, bodyText):
"""Create a number of new pages in the document, as long as there is overflow.
If no new page size is given, it will take over the size of the document.
"""
fs = Text.FS(bodyText, font=fontName, fontSize=bodyFontSize, lineHeight=bodyFontSize*leading)
while True:
page = doc.newPage()
# Add text element with page number
pn = TextBox.FS(str(page.pn), align='center', font=fontName, fontSize=bodyFontSize)
page.addElement(Text(pn, page.w/2, padding/2))
e = TextBox(fs, x=padding, y=padding, w=page.w-2*padding, h=page.h-2*padding, fill=1)
page.addElement(e)
fs = e.getOverflow(fs)
if not fs:
break
txt = loremipsum(doShuffle=True)
makeCoverPage(typeSpecimen, 'Type specimen\n'+fontName)
makeBodyPages(typeSpecimen, txt)
# Build the document, all pages and their contained elements.
typeSpecimen.build()
# Create the "_export" folder if it does not exist yet.
# This Github repository is filtering file to not upload _export.
# Export the specimen as empty page as PDF and PNG.
typeSpecimen.export('_export/MyTypeSpecimen.pdf')
typeSpecimen.export('_export/MyTypeSpecimen.png')
print('Done')
|
class hypSyntaxError(Exception):
pass
class hypFileError(Exception):
pass
class hypObjectError(Exception):
pass
|
# The Computer Language Benchmarks Game
# http://benchmarksgame.alioth.debian.org/
#
# originally by Kevin Carson
# modified by Tupteq, Fredrik Johansson, and Daniel Nanz
# modified by Maciej Fijalkowski
# 2to3
import pyjion
import timeit
import gc
def combinations(l):
result = []
for x in range(len(l) - 1):
ls = l[x+1:]
for y in ls:
result.append((l[x],y))
return result
PI = 3.14159265358979323
SOLAR_MASS = 4 * PI * PI
DAYS_PER_YEAR = 365.24
BODIES = {
'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], SOLAR_MASS),
'jupiter': ([4.84143144246472090e+00,
-1.16032004402742839e+00,
-1.03622044471123109e-01],
[1.66007664274403694e-03 * DAYS_PER_YEAR,
7.69901118419740425e-03 * DAYS_PER_YEAR,
-6.90460016972063023e-05 * DAYS_PER_YEAR],
9.54791938424326609e-04 * SOLAR_MASS),
'saturn': ([8.34336671824457987e+00,
4.12479856412430479e+00,
-4.03523417114321381e-01],
[-2.76742510726862411e-03 * DAYS_PER_YEAR,
4.99852801234917238e-03 * DAYS_PER_YEAR,
2.30417297573763929e-05 * DAYS_PER_YEAR],
2.85885980666130812e-04 * SOLAR_MASS),
'uranus': ([1.28943695621391310e+01,
-1.51111514016986312e+01,
-2.23307578892655734e-01],
[2.96460137564761618e-03 * DAYS_PER_YEAR,
2.37847173959480950e-03 * DAYS_PER_YEAR,
-2.96589568540237556e-05 * DAYS_PER_YEAR],
4.36624404335156298e-05 * SOLAR_MASS),
'neptune': ([1.53796971148509165e+01,
-2.59193146099879641e+01,
1.79258772950371181e-01],
[2.68067772490389322e-03 * DAYS_PER_YEAR,
1.62824170038242295e-03 * DAYS_PER_YEAR,
-9.51592254519715870e-05 * DAYS_PER_YEAR],
5.15138902046611451e-05 * SOLAR_MASS) }
SYSTEM = list(BODIES.values())
PAIRS = combinations(SYSTEM)
def advance(dt, n, bodies=SYSTEM, pairs=PAIRS):
for i in range(n):
for (([x1, y1, z1], v1, m1),
([x2, y2, z2], v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5))
b1m = m1 * mag
b2m = m2 * mag
v1[0] -= dx * b2m
v1[1] -= dy * b2m
v1[2] -= dz * b2m
v2[0] += dx * b1m
v2[1] += dy * b1m
v2[2] += dz * b1m
for (r, [vx, vy, vz], m) in bodies:
r[0] += dt * vx
r[1] += dt * vy
r[2] += dt * vz
def report_energy(bodies=SYSTEM, pairs=PAIRS, e=0.0):
for (((x1, y1, z1), v1, m1),
((x2, y2, z2), v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz) ** 0.5)
for (r, [vx, vy, vz], m) in bodies:
e += m * (vx * vx + vy * vy + vz * vz) / 2.
print("%.9f" % e)
def offset_momentum(ref, bodies=SYSTEM, px=0.0, py=0.0, pz=0.0):
for (r, [vx, vy, vz], m) in bodies:
px -= vx * m
py -= vy * m
pz -= vz * m
(r, v, m) = ref
v[0] = px / m
v[1] = py / m
v[2] = pz / m
def main(n=50000, ref='sun'):
offset_momentum(BODIES[ref])
report_energy()
advance(0.01, n)
report_energy()
if __name__ == "__main__":
print("N-body took {0} without Pyjion".format(timeit.repeat(main, repeat=5, number=1)))
pyjion.enable()
pyjion.set_optimization_level(1)
print("N-body took {0} with Pyjion".format(timeit.repeat(main, repeat=5, number=1)))
pyjion.disable()
print(pyjion.info(offset_momentum))
print(pyjion.info(advance))
print(pyjion.info(report_energy))
gc.collect()
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Swift backend store"""
import copy
import fixtures
import hashlib
import httplib
import mock
import tempfile
import uuid
from oslo.config import cfg
from oslotest import moxstubout
import six
import stubout
import swiftclient
from glance_store._drivers.swift import store as swift
from glance_store._drivers.swift import utils as sutils
from glance_store import backend
from glance_store import BackendException
from glance_store.common import auth
from glance_store import exceptions
from glance_store.location import get_location_from_uri
from glance_store.openstack.common import context
from glance_store.openstack.common import units
from glance_store.tests import base
CONF = cfg.CONF
FAKE_UUID = lambda: str(uuid.uuid4())
Store = swift.Store
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
MAX_SWIFT_OBJECT_SIZE = FIVE_GB
SWIFT_PUT_OBJECT_CALLS = 0
SWIFT_CONF = {'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_user': 'user',
'swift_store_key': 'key',
'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_retry_get_count': 1,
'default_swift_reference': 'ref1'
}
# We stub out as little as possible to ensure that the code paths
# between swift and swiftclient are tested
# thoroughly
def stub_out_swiftclient(stubs, swift_store_auth_version):
fixture_containers = ['glance']
fixture_container_headers = {}
fixture_headers = {
'glance/%s' % FAKE_UUID: {
'content-length': FIVE_KB,
'etag': 'c2e5db72bd7fd153f53ede5da5a06de3'
}
}
fixture_objects = {'glance/%s' % FAKE_UUID:
six.StringIO("*" * FIVE_KB)}
def fake_head_container(url, token, container, **kwargs):
if container not in fixture_containers:
msg = "No container %s found" % container
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
return fixture_container_headers
def fake_put_container(url, token, container, **kwargs):
fixture_containers.append(container)
def fake_post_container(url, token, container, headers, http_conn=None):
for key, value in six.iteritems(headers):
fixture_container_headers[key] = value
def fake_put_object(url, token, container, name, contents, **kwargs):
# PUT returns the ETag header for the newly-added object
# Large object manifest...
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS += 1
CHUNKSIZE = 64 * units.Ki
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
if kwargs.get('headers'):
etag = kwargs['headers']['ETag']
fixture_headers[fixture_key] = {'manifest': True,
'etag': etag}
return etag
if hasattr(contents, 'read'):
fixture_object = six.StringIO()
chunk = contents.read(CHUNKSIZE)
checksum = hashlib.md5()
while chunk:
fixture_object.write(chunk)
checksum.update(chunk)
chunk = contents.read(CHUNKSIZE)
etag = checksum.hexdigest()
else:
fixture_object = six.StringIO(contents)
etag = hashlib.md5(fixture_object.getvalue()).hexdigest()
read_len = fixture_object.len
if read_len > MAX_SWIFT_OBJECT_SIZE:
msg = ('Image size:%d exceeds Swift max:%d' %
(read_len, MAX_SWIFT_OBJECT_SIZE))
raise swiftclient.ClientException(
msg, http_status=httplib.REQUEST_ENTITY_TOO_LARGE)
fixture_objects[fixture_key] = fixture_object
fixture_headers[fixture_key] = {
'content-length': read_len,
'etag': etag}
return etag
else:
msg = ("Object PUT failed - Object with key %s already exists"
% fixture_key)
raise swiftclient.ClientException(msg,
http_status=httplib.CONFLICT)
def fake_get_object(url, token, container, name, **kwargs):
# GET returns the tuple (list of headers, file object)
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object GET failed"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
byte_range = None
headers = kwargs.get('headers', dict())
if headers is not None:
headers = dict((k.lower(), v) for k, v in six.iteritems(headers))
if 'range' in headers:
byte_range = headers.get('range')
fixture = fixture_headers[fixture_key]
if 'manifest' in fixture:
# Large object manifest... we return a file containing
# all objects with prefix of this fixture key
chunk_keys = sorted([k for k in fixture_headers.keys()
if k.startswith(fixture_key) and
k != fixture_key])
result = six.StringIO()
for key in chunk_keys:
result.write(fixture_objects[key].getvalue())
else:
result = fixture_objects[fixture_key]
if byte_range is not None:
start = int(byte_range.split('=')[1].strip('-'))
result = six.StringIO(result.getvalue()[start:])
fixture_headers[fixture_key]['content-length'] = len(
result.getvalue())
return fixture_headers[fixture_key], result
def fake_head_object(url, token, container, name, **kwargs):
# HEAD returns the list of headers for an object
try:
fixture_key = "%s/%s" % (container, name)
return fixture_headers[fixture_key]
except KeyError:
msg = "Object HEAD failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
def fake_delete_object(url, token, container, name, **kwargs):
# DELETE returns nothing
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object DELETE failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
else:
del fixture_headers[fixture_key]
del fixture_objects[fixture_key]
def fake_http_connection(*args, **kwargs):
return None
def fake_get_auth(url, user, key, snet, auth_version, **kwargs):
if url is None:
return None, None
if 'http' in url and '://' not in url:
raise ValueError('Invalid url %s' % url)
# Check the auth version against the configured value
if swift_store_auth_version != auth_version:
msg = 'AUTHENTICATION failed (version mismatch)'
raise swiftclient.ClientException(msg)
return None, None
stubs.Set(swiftclient.client,
'head_container', fake_head_container)
stubs.Set(swiftclient.client,
'put_container', fake_put_container)
stubs.Set(swiftclient.client,
'post_container', fake_post_container)
stubs.Set(swiftclient.client,
'put_object', fake_put_object)
stubs.Set(swiftclient.client,
'delete_object', fake_delete_object)
stubs.Set(swiftclient.client,
'head_object', fake_head_object)
stubs.Set(swiftclient.client,
'get_object', fake_get_object)
stubs.Set(swiftclient.client,
'get_auth', fake_get_auth)
stubs.Set(swiftclient.client,
'http_connection', fake_http_connection)
class SwiftTests(object):
@property
def swift_store_user(self):
return 'tenant:user1'
def test_get_size(self):
"""
Test that we can get the size of an object in the swift store
"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 5120)
def test_validate_location_for_invalid_uri(self):
"""
Test that validate location raises when the location contains
any account reference.
"""
uri = "swift+config://store_1/glance/%s"
self.assertRaises(exceptions.BadStoreUri,
self.store.validate_location,
uri)
def test_validate_location_for_valid_uri(self):
"""
Test that validate location verifies that the location does not
contain any account reference
"""
uri = "swift://user:key@auth_address/glance/%s"
try:
self.assertIsNone(self.store.validate_location(uri))
except Exception:
self.fail('Location uri validation failed')
def test_get_size_with_multi_tenant_on(self):
"""Test that single tenant uris work with multi tenant on."""
uri = ("swift://%s:key@auth_address/glance/%s" %
(self.swift_store_user, FAKE_UUID))
self.config(swift_store_multi_tenant=True)
#NOTE(markwash): ensure the image is found
size = backend.get_size_from_backend(uri, context={})
self.assertEqual(size, 5120)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_retry(self):
"""
Test a retrieval where Swift does not get the full image in a single
request.
"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
ctxt = context.RequestContext()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
resp_full = ''.join([chunk for chunk in image_swift.wrapped])
resp_half = resp_full[:len(resp_full) / 2]
image_swift.wrapped = swift.swift_retry_iter(resp_half, image_size,
self.store,
loc.store_location,
ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_http_auth(self):
"""
Test a retrieval from Swift with an HTTP authurl. This is
specified either via a Location header with swift+http:// or using
http:// in the swift_store_auth_address config value
"""
loc = get_location_from_uri("swift+http://%s:key@auth_address/"
"glance/%s" %
(self.swift_store_user, FAKE_UUID))
ctxt = context.RequestContext()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_non_existing(self):
"""
Test that trying to retrieve a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
def test_add(self):
"""Test that we can add an image via the swift backend"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=False)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = "swift+https://tenant%%3Auser1:key@localhost:8080/glance/%s"
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting a single object to be created on Swift i.e. no chunking.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_multi_store(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['default_swift_reference'] = 'store_2'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_image_id = str(uuid.uuid4())
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
loc = 'swift+config://store_2/glance/%s'
expected_location = loc % (expected_image_id)
location, size, checksum, arg = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
def test_add_auth_url_variations(self):
"""
Test that we can add an image via the swift backend with
a variety of different auth_address values
"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
variations = {
'store_4': 'swift+config://store_4/glance/%s',
'store_5': 'swift+config://store_5/glance/%s',
'store_6': 'swift+config://store_6/glance/%s'
}
for variation, expected_location in variations.items():
image_id = str(uuid.uuid4())
expected_location = expected_location % image_id
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = \
hashlib.md5(expected_swift_contents).hexdigest()
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf['default_swift_reference'] = variation
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
location, size, checksum, _ = self.store.add(image_id, image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_no_container_no_create(self):
"""
Tests that adding an image with a non-existing container
raises an appropriate exception
"""
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = False
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
image_swift = six.StringIO("nevergonnamakeit")
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# We check the exception text to ensure the container
# missing text is found in it, otherwise, we would have
# simply used self.assertRaises here
exception_caught = False
try:
self.store.add(str(uuid.uuid4()), image_swift, 0)
except BackendException as e:
exception_caught = True
self.assertIn("container noexist does not exist "
"in Swift", unicode(e))
self.assertTrue(exception_caught)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 0)
def test_add_no_container_and_create(self):
"""
Tests that adding an image with a non-existing container
creates the container automatically if flag is set
"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/noexist/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = True
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object(self):
"""
Tests that adding a very large image. We simulate the large
object by setting store.large_object_size to a small number
and then verify that there have been a number of calls to
put_object()...
"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
try:
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 6 objects to be created on Swift -- 5 chunks and 1
# manifest.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 6)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object_zero_size(self):
"""
Tests that adding an image to Swift which has both an unknown size and
exceeds Swift's maximum limit of 5GB is correctly uploaded.
We avoid the overhead of creating a 5GB object for this test by
temporarily setting MAX_SWIFT_OBJECT_SIZE to 1KB, and then adding
an object of 5KB.
Bug lp:891738
"""
# Set up a 'large' image of 5KB
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# Temporarily set Swift MAX_SWIFT_OBJECT_SIZE to 1KB and add our image,
# explicitly setting the image_length to 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
global MAX_SWIFT_OBJECT_SIZE
orig_max_swift_object_size = MAX_SWIFT_OBJECT_SIZE
try:
MAX_SWIFT_OBJECT_SIZE = 1024
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift, 0)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
MAX_SWIFT_OBJECT_SIZE = orig_max_swift_object_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 7 calls to put_object -- 5 chunks, a zero chunk which is
# then deleted, and the manifest. Note the difference with above
# where the image_size is specified in advance (there's no zero chunk
# in that case).
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 7)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_swift = six.StringIO("nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
FAKE_UUID, image_swift, 0)
def _option_required(self, key):
conf = self.getConfig()
conf[key] = None
try:
self.config(**conf)
self.store = Store(self.conf)
return self.store.add == self.store.add_disabled
except Exception:
return False
return False
def test_no_store_credentials(self):
"""
Tests that options without a valid credentials disables the add method
"""
swift.SWIFT_STORE_REF_PARAMS = {'ref1': {'auth_address':
'authurl.com', 'user': '',
'key': ''}}
self.store = Store(self.conf)
self.store.configure()
self.assertEqual(self.store.add, self.store.add_disabled)
def test_no_auth_address(self):
"""
Tests that options without auth address disables the add method
"""
swift.SWIFT_STORE_REF_PARAMS = {'ref1': {'auth_address':
'', 'user': 'user1',
'key': 'key1'}}
self.store = Store(self.conf)
self.store.configure()
self.assertEqual(self.store.add, self.store.add_disabled)
def test_delete(self):
"""
Test we can delete an existing image in the swift store
"""
uri = "swift://%s:key@authurl/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_with_reference_params(self):
"""
Test we can delete an existing image in the swift store
"""
uri = "swift+config://ref1/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def test_read_acl_public(self):
"""
Test that we can set a public read acl.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
ctxt = context.RequestContext()
store.set_acls(loc, public=True, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
".r:*,.rlistings")
def test_read_acl_tenants(self):
"""
Test that we can set read acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['matt', 'mark']
ctxt = context.RequestContext()
store.set_acls(loc, read_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
'matt:*,mark:*')
def test_write_acls(self):
"""
Test that we can set write acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['frank', 'jim']
ctxt = context.RequestContext()
store.set_acls(loc, write_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Write'],
'frank:*,jim:*')
class TestStoreAuthV1(base.StoreBaseTest, SwiftTests):
_CONF = cfg.CONF
def getConfig(self):
conf = SWIFT_CONF.copy()
conf['swift_store_auth_version'] = '1'
conf['swift_store_user'] = 'tenant:user1'
return conf
def setUp(self):
"""Establish a clean test environment"""
super(TestStoreAuthV1, self).setUp()
conf = self.getConfig()
conf_file = 'glance-swift.conf'
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
self.stubs = stubout.StubOutForTesting()
stub_out_swiftclient(self.stubs, conf['swift_store_auth_version'])
self.store = Store(self.conf)
self.config(**conf)
self.store.configure()
self.addCleanup(self.stubs.UnsetAll)
self.register_store_schemes(self.store)
swift.SWIFT_STORE_REF_PARAMS = sutils.SwiftParams().params
self.addCleanup(self.conf.reset)
class TestStoreAuthV2(TestStoreAuthV1):
def getConfig(self):
conf = super(TestStoreAuthV2, self).getConfig()
conf['swift_store_auth_version'] = '2'
conf['swift_store_user'] = 'tenant:user1'
return conf
def test_v2_with_no_tenant(self):
uri = "swift://failme:key@auth_address/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertRaises(exceptions.BadStoreUri,
self.store.get,
loc)
def test_v2_multi_tenant_location(self):
conf = self.getConfig()
conf['swift_store_multi_tenant'] = True
uri = "swift://auth_address/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertEqual('swift', loc.store_name)
class FakeConnection(object):
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
preauthtoken=None, snet=False, starting_backoff=1,
tenant_name=None, os_options=None, auth_version="1",
insecure=False, ssl_compression=True):
if os_options is None:
os_options = {}
self.authurl = authurl
self.user = user
self.key = key
self.preauthurl = preauthurl
self.preauthtoken = preauthtoken
self.snet = snet
self.tenant_name = tenant_name
self.os_options = os_options
self.auth_version = auth_version
self.insecure = insecure
class TestSingleTenantStoreConnections(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestSingleTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.store = swift.SingleTenantStore(self.conf)
self.store.configure()
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com/v2/',
'user': 'tenant:user1',
'key': 'key1',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertFalse(connection.snet)
self.assertEqual(connection.key, 'key1')
self.assertIsNone(connection.preauthurl)
self.assertIsNone(connection.preauthtoken)
self.assertFalse(connection.insecure)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_no_trailing_slash(self):
self.location.auth_or_store_url = 'example.com/v2'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
def test_connection_insecure(self):
self.config(swift_store_auth_insecure=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.insecure)
def test_connection_with_auth_v1(self):
self.config(swift_store_auth_version='1')
self.store.configure()
self.location.user = 'auth_v1_user'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.auth_version, '1')
self.assertEqual(connection.user, 'auth_v1_user')
self.assertIsNone(connection.tenant_name)
def test_connection_invalid_user(self):
self.store.configure()
self.location.user = 'invalid:format:user'
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_missing_user(self):
self.store.configure()
self.location.user = None
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_with_region(self):
self.config(swift_store_region='Sahara')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'region_name': 'Sahara',
'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_service_type(self):
self.config(swift_store_service_type='shoe-store')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'shoe-store',
'endpoint_type': 'publicURL'})
def test_connection_with_endpoint_type(self):
self.config(swift_store_endpoint_type='internalURL')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'internalURL'})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.snet)
def test_bad_location_uri(self):
self.store.configure()
self.location.uri = 'http://bad_uri://'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_credentials(self):
self.store.configure()
self.location.uri = 'swift://bad_creds@uri/cont/obj'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_object_path(self):
self.store.configure()
self.location.uri = 'swift://user:key@uri/cont'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
class TestMultiTenantStoreConnections(base.StoreBaseTest):
def setUp(self):
super(TestMultiTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.context = context.RequestContext(
user='tenant:user1', tenant='tenant', auth_token='0123')
self.store = swift.MultiTenantStore(self.conf)
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
self.store.configure()
connection = self.store.get_connection(self.location,
context=self.context)
self.assertIsNone(connection.authurl)
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'tenant:user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertIsNone(connection.key)
self.assertFalse(connection.snet)
self.assertEqual(connection.preauthurl, 'https://example.com')
self.assertEqual(connection.preauthtoken, '0123')
self.assertEqual(connection.os_options, {})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location,
context=self.context)
self.assertTrue(connection.snet)
class FakeGetEndpoint(object):
def __init__(self, response):
self.response = response
def __call__(self, service_catalog, service_type=None,
endpoint_region=None, endpoint_type=None):
self.service_type = service_type
self.endpoint_region = endpoint_region
self.endpoint_type = endpoint_type
return self.response
class TestCreatingLocations(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestCreatingLocations, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
conf = copy.deepcopy(SWIFT_CONF)
self.store = Store(self.conf)
self.config(**conf)
reload(swift)
self.addCleanup(self.conf.reset)
def test_single_tenant_location(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_container'] = 'container'
conf_file = "glance-swift.conf"
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
conf['default_swift_reference'] = 'ref1'
self.config(**conf)
reload(swift)
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://example.com')
self.assertEqual(location.container, 'container')
self.assertEqual(location.obj, 'image-id')
self.assertEqual(location.user, 'tenant:user1')
self.assertEqual(location.key, 'key1')
def test_single_tenant_location_http(self):
conf_file = "glance-swift.conf"
test_dir = self.useFixture(fixtures.TempDir()).path
self.swift_config_file = self.copy_data_file(conf_file, test_dir)
self.config(swift_store_container='container',
default_swift_reference='ref2',
swift_store_config_file=self.swift_config_file)
swift.SWIFT_STORE_REF_PARAMS = sutils.SwiftParams().params
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://example.com')
def test_multi_tenant_location(self):
self.config(swift_store_container='container')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://some_endpoint')
self.assertEqual(location.container, 'container_image-id')
self.assertEqual(location.obj, 'image-id')
self.assertIsNone(location.user)
self.assertIsNone(location.key)
self.assertEqual(fake_get_endpoint.service_type, 'object-store')
def test_multi_tenant_location_http(self):
fake_get_endpoint = FakeGetEndpoint('http://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://some_endpoint')
def test_multi_tenant_location_with_region(self):
self.config(swift_store_region='WestCarolina')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_region, 'WestCarolina')
def test_multi_tenant_location_custom_service_type(self):
self.config(swift_store_service_type='toy-store')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.service_type, 'toy-store')
def test_multi_tenant_location_custom_endpoint_type(self):
self.config(swift_store_endpoint_type='InternalURL')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_type, 'InternalURL')
class TestChunkReader(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestChunkReader, self).setUp()
conf = copy.deepcopy(SWIFT_CONF)
store = Store(self.conf)
self.config(**conf)
def test_read_all_data(self):
"""
Replicate what goes on in the Swift driver with the
repeated creation of the ChunkReader object
"""
CHUNKSIZE = 100
checksum = hashlib.md5()
data_file = tempfile.NamedTemporaryFile()
data_file.write('*' * units.Ki)
data_file.flush()
infile = open(data_file.name, 'rb')
bytes_read = 0
while True:
cr = swift.ChunkReader(infile, checksum, CHUNKSIZE)
chunk = cr.read(CHUNKSIZE)
bytes_read += len(chunk)
if not chunk:
break
self.assertEqual(1024, bytes_read)
data_file.close()
|
import logging
from django.conf import settings as django_settings
from django.contrib.auth import authenticate, get_user_model, login, logout
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
try:
from django.utils.http import url_has_allowed_host_and_scheme as safe_url
except ImportError: # pragma: no cover
from django.utils.http import is_safe_url as safe_url
from django.views.decorators.csrf import csrf_protect
from . import settings
from .forms import (
LoginForm, SignupForm, SignupFormEmailOnly, SignupFormFull,
SignupFormWithUsername
)
from .helpers import create_magiclink, get_or_create_user
from .models import MagicLink, MagicLinkError
from .utils import get_url_path
User = get_user_model()
log = logging.getLogger(__name__)
@method_decorator(csrf_protect, name='dispatch')
class Login(TemplateView):
template_name = settings.LOGIN_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['login_form'] = LoginForm()
context['require_signup'] = settings.REQUIRE_SIGNUP
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
logout(request)
context = self.get_context_data(**kwargs)
context['require_signup'] = settings.REQUIRE_SIGNUP
form = LoginForm(request.POST)
if not form.is_valid():
context['login_form'] = form
return self.render_to_response(context)
email = form.cleaned_data['email']
if not settings.REQUIRE_SIGNUP:
get_or_create_user(email)
redirect_url = self.login_redirect_url(request.GET.get('next', ''))
try:
magiclink = create_magiclink(
email, request, redirect_url=redirect_url
)
except MagicLinkError as e:
form.add_error('email', str(e))
context['login_form'] = form
return self.render_to_response(context)
magiclink.send(request)
sent_url = get_url_path(settings.LOGIN_SENT_REDIRECT)
response = HttpResponseRedirect(sent_url)
if settings.REQUIRE_SAME_BROWSER:
cookie_name = f'magiclink{magiclink.pk}'
response.set_cookie(cookie_name, magiclink.cookie_value)
log.info(f'Cookie {cookie_name} set for {email}')
return response
def login_redirect_url(self, next_url) -> str:
redirect_url = ''
allowed_hosts = django_settings.ALLOWED_HOSTS
if '*' in allowed_hosts:
allowed_hosts = [self.request.get_host()]
url_is_safe = safe_url(
url=next_url,
allowed_hosts=allowed_hosts,
require_https=self.request.is_secure(),
)
if url_is_safe:
redirect_url = next_url
return redirect_url
class LoginSent(TemplateView):
template_name = settings.LOGIN_SENT_TEMPLATE_NAME
@method_decorator(never_cache, name='dispatch')
class LoginVerify(TemplateView):
template_name = settings.LOGIN_FAILED_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
token = request.GET.get('token')
email = request.GET.get('email')
user = authenticate(request, token=token, email=email)
if not user:
if settings.LOGIN_FAILED_REDIRECT:
redirect_url = get_url_path(settings.LOGIN_FAILED_REDIRECT)
return HttpResponseRedirect(redirect_url)
if not settings.LOGIN_FAILED_TEMPLATE_NAME:
raise Http404()
context = self.get_context_data(**kwargs)
# The below settings are left in for backward compatibility
context['ONE_TOKEN_PER_USER'] = settings.ONE_TOKEN_PER_USER
context['REQUIRE_SAME_BROWSER'] = settings.REQUIRE_SAME_BROWSER
context['REQUIRE_SAME_IP'] = settings.REQUIRE_SAME_IP
context['ALLOW_SUPERUSER_LOGIN'] = settings.ALLOW_SUPERUSER_LOGIN # NOQA: E501
context['ALLOW_STAFF_LOGIN'] = settings.ALLOW_STAFF_LOGIN
try:
magiclink = MagicLink.objects.get(token=token)
except MagicLink.DoesNotExist:
error = 'A magic link with that token could not be found'
context['login_error'] = error
return self.render_to_response(context)
try:
magiclink.validate(request, email)
except MagicLinkError as error:
context['login_error'] = str(error)
return self.render_to_response(context)
login(request, user)
log.info(f'Login successful for {email}')
response = self.login_complete_action()
if settings.REQUIRE_SAME_BROWSER:
magiclink = MagicLink.objects.get(token=token)
cookie_name = f'magiclink{magiclink.pk}'
response.delete_cookie(cookie_name, magiclink.cookie_value)
return response
def login_complete_action(self) -> HttpResponse:
token = self.request.GET.get('token')
magiclink = MagicLink.objects.get(token=token)
return HttpResponseRedirect(magiclink.redirect_url)
@method_decorator(csrf_protect, name='dispatch')
class Signup(TemplateView):
template_name = settings.SIGNUP_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['SignupForm'] = SignupForm()
context['SignupFormEmailOnly'] = SignupFormEmailOnly()
context['SignupFormWithUsername'] = SignupFormWithUsername()
context['SignupFormFull'] = SignupFormFull()
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
logout(request)
context = self.get_context_data(**kwargs)
form_name = request.POST.get('form_name')
from_list = [
'SignupForm, SignupFormEmailOnly', 'SignupFormWithUsername',
'SignupFormFull',
]
forms = __import__('magiclink.forms', fromlist=from_list)
try:
SignupForm = getattr(forms, form_name)
except AttributeError:
return HttpResponseRedirect(self.request.path_info)
form = SignupForm(request.POST)
if not form.is_valid():
context[form_name] = form
return self.render_to_response(context)
email = form.cleaned_data['email']
full_name = form.cleaned_data.get('name', '')
try:
first_name, last_name = full_name.split(' ', 1)
except ValueError:
first_name = full_name
last_name = ''
get_or_create_user(
email=email,
username=form.cleaned_data.get('username', ''),
first_name=first_name,
last_name=last_name
)
default_signup_redirect = get_url_path(settings.SIGNUP_LOGIN_REDIRECT)
next_url = request.GET.get('next', default_signup_redirect)
magiclink = create_magiclink(email, request, redirect_url=next_url)
magiclink.send(request)
sent_url = get_url_path(settings.LOGIN_SENT_REDIRECT)
response = HttpResponseRedirect(sent_url)
if settings.REQUIRE_SAME_BROWSER:
cookie_name = f'magiclink{magiclink.pk}'
response.set_cookie(cookie_name, magiclink.cookie_value)
log.info(f'Cookie {cookie_name} set for {email}')
return response
class Logout(RedirectView):
def get(self, request, *args, **kwargs):
logout(self.request)
next_page = request.GET.get('next')
if next_page:
return HttpResponseRedirect(next_page)
redirect_url = get_url_path(django_settings.LOGOUT_REDIRECT_URL)
return HttpResponseRedirect(redirect_url)
|
# generated by appcreator
from django.conf.urls import url
from . import views
from . import stats_views
app_name = 'archiv'
urlpatterns = [
url(
r'^match-binary/$',
stats_views.MatchBinaryView.as_view(),
name='match-binary'
),
url(
r'^actor/$',
views.ActorListView.as_view(),
name='actor_browse'
),
url(
r'^actor/detail/(?P<pk>[0-9]+)$',
views.ActorDetailView.as_view(),
name='actor_detail'
),
url(
r'^actor/create/$',
views.ActorCreate.as_view(),
name='actor_create'
),
url(
r'^actor/edit/(?P<pk>[0-9]+)$',
views.ActorUpdate.as_view(),
name='actor_edit'
),
url(
r'^actor/delete/(?P<pk>[0-9]+)$',
views.ActorDelete.as_view(),
name='actor_delete'),
url(
r'^archaeologicalobject4dpuzzleid/$',
views.ArchaeologicalObject4DPuzzleIDListView.as_view(),
name='archaeologicalobject4dpuzzleid_browse'
),
url(
r'^archaeologicalobject4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDDetailView.as_view(),
name='archaeologicalobject4dpuzzleid_detail'
),
url(
r'^archaeologicalobject4dpuzzleid/create/$',
views.ArchaeologicalObject4DPuzzleIDCreate.as_view(),
name='archaeologicalobject4dpuzzleid_create'
),
url(
r'^archaeologicalobject4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDUpdate.as_view(),
name='archaeologicalobject4dpuzzleid_edit'
),
url(
r'^archaeologicalobject4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDDelete.as_view(),
name='archaeologicalobject4dpuzzleid_delete'),
url(
r'^archaeologicalobjectid/$',
views.ArchaeologicalObjectIDListView.as_view(),
name='archaeologicalobjectid_browse'
),
url(
r'^archaeologicalobjectid/detail/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDDetailView.as_view(),
name='archaeologicalobjectid_detail'
),
url(
r'^archaeologicalobjectid/create/$',
views.ArchaeologicalObjectIDCreate.as_view(),
name='archaeologicalobjectid_create'
),
url(
r'^archaeologicalobjectid/edit/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDUpdate.as_view(),
name='archaeologicalobjectid_edit'
),
url(
r'^archaeologicalobjectid/delete/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDDelete.as_view(),
name='archaeologicalobjectid_delete'),
url(
r'^archiveinf/$',
views.ArchiveINFListView.as_view(),
name='archiveinf_browse'
),
url(
r'^archiveinf/detail/(?P<pk>[0-9]+)$',
views.ArchiveINFDetailView.as_view(),
name='archiveinf_detail'
),
url(
r'^archiveinf/create/$',
views.ArchiveINFCreate.as_view(),
name='archiveinf_create'
),
url(
r'^archiveinf/edit/(?P<pk>[0-9]+)$',
views.ArchiveINFUpdate.as_view(),
name='archiveinf_edit'
),
url(
r'^archiveinf/delete/(?P<pk>[0-9]+)$',
views.ArchiveINFDelete.as_view(),
name='archiveinf_delete'),
url(
r'^autocad/$',
views.AutoCADListView.as_view(),
name='autocad_browse'
),
url(
r'^autocad/detail/(?P<pk>[0-9]+)$',
views.AutoCADDetailView.as_view(),
name='autocad_detail'
),
url(
r'^autocad/create/$',
views.AutoCADCreate.as_view(),
name='autocad_create'
),
url(
r'^autocad/edit/(?P<pk>[0-9]+)$',
views.AutoCADUpdate.as_view(),
name='autocad_edit'
),
url(
r'^autocad/delete/(?P<pk>[0-9]+)$',
views.AutoCADDelete.as_view(),
name='autocad_delete'),
url(
r'^convolutecards/$',
views.ConvolutecardsListView.as_view(),
name='convolutecards_browse'
),
url(
r'^convolutecards/detail/(?P<pk>[0-9]+)$',
views.ConvolutecardsDetailView.as_view(),
name='convolutecards_detail'
),
url(
r'^convolutecards/create/$',
views.ConvolutecardsCreate.as_view(),
name='convolutecards_create'
),
url(
r'^convolutecards/edit/(?P<pk>[0-9]+)$',
views.ConvolutecardsUpdate.as_view(),
name='convolutecards_edit'
),
url(
r'^convolutecards/delete/(?P<pk>[0-9]+)$',
views.ConvolutecardsDelete.as_view(),
name='convolutecards_delete'),
url(
r'^datenbase/$',
views.DatenbaseListView.as_view(),
name='datenbase_browse'
),
url(
r'^datenbase/detail/(?P<pk>[0-9]+)$',
views.DatenbaseDetailView.as_view(),
name='datenbase_detail'
),
url(
r'^datenbase/create/$',
views.DatenbaseCreate.as_view(),
name='datenbase_create'
),
url(
r'^datenbase/edit/(?P<pk>[0-9]+)$',
views.DatenbaseUpdate.as_view(),
name='datenbase_edit'
),
url(
r'^datenbase/delete/(?P<pk>[0-9]+)$',
views.DatenbaseDelete.as_view(),
name='datenbase_delete'),
url(
r'^document4dpuzzleid/$',
views.Document4DPuzzleIDListView.as_view(),
name='document4dpuzzleid_browse'
),
url(
r'^document4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDDetailView.as_view(),
name='document4dpuzzleid_detail'
),
url(
r'^document4dpuzzleid/create/$',
views.Document4DPuzzleIDCreate.as_view(),
name='document4dpuzzleid_create'
),
url(
r'^document4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDUpdate.as_view(),
name='document4dpuzzleid_edit'
),
url(
r'^document4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDDelete.as_view(),
name='document4dpuzzleid_delete'),
url(
r'^documenttypes/$',
views.DocumentTypesListView.as_view(),
name='documenttypes_browse'
),
url(
r'^documenttypes/detail/(?P<pk>[0-9]+)$',
views.DocumentTypesDetailView.as_view(),
name='documenttypes_detail'
),
url(
r'^documenttypes/create/$',
views.DocumentTypesCreate.as_view(),
name='documenttypes_create'
),
url(
r'^documenttypes/edit/(?P<pk>[0-9]+)$',
views.DocumentTypesUpdate.as_view(),
name='documenttypes_edit'
),
url(
r'^documenttypes/delete/(?P<pk>[0-9]+)$',
views.DocumentTypesDelete.as_view(),
name='documenttypes_delete'),
url(
r'^excavationobjectid/$',
views.ExcavationObjectIDListView.as_view(),
name='excavationobjectid_browse'
),
url(
r'^excavationobjectid/detail/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDDetailView.as_view(),
name='excavationobjectid_detail'
),
url(
r'^excavationobjectid/create/$',
views.ExcavationObjectIDCreate.as_view(),
name='excavationobjectid_create'
),
url(
r'^excavationobjectid/edit/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDUpdate.as_view(),
name='excavationobjectid_edit'
),
url(
r'^excavationobjectid/delete/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDDelete.as_view(),
name='excavationobjectid_delete'),
url(
r'^excavationseasons/$',
views.ExcavationSeasonsListView.as_view(),
name='excavationseasons_browse'
),
url(
r'^excavationseasons/detail/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsDetailView.as_view(),
name='excavationseasons_detail'
),
url(
r'^excavationseasons/create/$',
views.ExcavationSeasonsCreate.as_view(),
name='excavationseasons_create'
),
url(
r'^excavationseasons/edit/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsUpdate.as_view(),
name='excavationseasons_edit'
),
url(
r'^excavationseasons/delete/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsDelete.as_view(),
name='excavationseasons_delete'),
url(
r'^fielddrawing/$',
views.FielddrawingListView.as_view(),
name='fielddrawing_browse'
),
url(
r'^fielddrawing/detail/(?P<pk>[0-9]+)$',
views.FielddrawingDetailView.as_view(),
name='fielddrawing_detail'
),
url(
r'^fielddrawing/create/$',
views.FielddrawingCreate.as_view(),
name='fielddrawing_create'
),
url(
r'^fielddrawing/edit/(?P<pk>[0-9]+)$',
views.FielddrawingUpdate.as_view(),
name='fielddrawing_edit'
),
url(
r'^fielddrawing/delete/(?P<pk>[0-9]+)$',
views.FielddrawingDelete.as_view(),
name='fielddrawing_delete'),
url(
r'^film/$',
views.FilmListView.as_view(),
name='film_browse'
),
url(
r'^film/detail/(?P<pk>[0-9]+)$',
views.FilmDetailView.as_view(),
name='film_detail'
),
url(
r'^film/create/$',
views.FilmCreate.as_view(),
name='film_create'
),
url(
r'^film/edit/(?P<pk>[0-9]+)$',
views.FilmUpdate.as_view(),
name='film_edit'
),
url(
r'^film/delete/(?P<pk>[0-9]+)$',
views.FilmDelete.as_view(),
name='film_delete'),
url(
r'^finddrawing/$',
views.FinddrawingListView.as_view(),
name='finddrawing_browse'
),
url(
r'^finddrawing/detail/(?P<pk>[0-9]+)$',
views.FinddrawingDetailView.as_view(),
name='finddrawing_detail'
),
url(
r'^finddrawing/create/$',
views.FinddrawingCreate.as_view(),
name='finddrawing_create'
),
url(
r'^finddrawing/edit/(?P<pk>[0-9]+)$',
views.FinddrawingUpdate.as_view(),
name='finddrawing_edit'
),
url(
r'^finddrawing/delete/(?P<pk>[0-9]+)$',
views.FinddrawingDelete.as_view(),
name='finddrawing_delete'),
url(
r'^findsheets/$',
views.FindsheetsListView.as_view(),
name='findsheets_browse'
),
url(
r'^findsheets/detail/(?P<pk>[0-9]+)$',
views.FindsheetsDetailView.as_view(),
name='findsheets_detail'
),
url(
r'^findsheets/create/$',
views.FindsheetsCreate.as_view(),
name='findsheets_create'
),
url(
r'^findsheets/edit/(?P<pk>[0-9]+)$',
views.FindsheetsUpdate.as_view(),
name='findsheets_edit'
),
url(
r'^findsheets/delete/(?P<pk>[0-9]+)$',
views.FindsheetsDelete.as_view(),
name='findsheets_delete'),
url(
r'^fotoborndigital/$',
views.FotoborndigitalListView.as_view(),
name='fotoborndigital_browse'
),
url(
r'^fotoborndigital/detail/(?P<pk>[0-9]+)$',
views.FotoborndigitalDetailView.as_view(),
name='fotoborndigital_detail'
),
url(
r'^fotoborndigital/create/$',
views.FotoborndigitalCreate.as_view(),
name='fotoborndigital_create'
),
url(
r'^fotoborndigital/edit/(?P<pk>[0-9]+)$',
views.FotoborndigitalUpdate.as_view(),
name='fotoborndigital_edit'
),
url(
r'^fotoborndigital/delete/(?P<pk>[0-9]+)$',
views.FotoborndigitalDelete.as_view(),
name='fotoborndigital_delete'),
url(
r'^fotosgescannt/$',
views.FotosgescanntListView.as_view(),
name='fotosgescannt_browse'
),
url(
r'^fotosgescannt/detail/(?P<pk>[0-9]+)$',
views.FotosgescanntDetailView.as_view(),
name='fotosgescannt_detail'
),
url(
r'^fotosgescannt/create/$',
views.FotosgescanntCreate.as_view(),
name='fotosgescannt_create'
),
url(
r'^fotosgescannt/edit/(?P<pk>[0-9]+)$',
views.FotosgescanntUpdate.as_view(),
name='fotosgescannt_edit'
),
url(
r'^fotosgescannt/delete/(?P<pk>[0-9]+)$',
views.FotosgescanntDelete.as_view(),
name='fotosgescannt_delete'),
url(
r'^fundinventar4dpuzzleid/$',
views.Fundinventar4DPuzzleIDListView.as_view(),
name='fundinventar4dpuzzleid_browse'
),
url(
r'^fundinventar4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDDetailView.as_view(),
name='fundinventar4dpuzzleid_detail'
),
url(
r'^fundinventar4dpuzzleid/create/$',
views.Fundinventar4DPuzzleIDCreate.as_view(),
name='fundinventar4dpuzzleid_create'
),
url(
r'^fundinventar4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDUpdate.as_view(),
name='fundinventar4dpuzzleid_edit'
),
url(
r'^fundinventar4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDDelete.as_view(),
name='fundinventar4dpuzzleid_delete'),
url(
r'^fundinventarinventarnummern/$',
views.FundinventarInventarnummernListView.as_view(),
name='fundinventarinventarnummern_browse'
),
url(
r'^fundinventarinventarnummern/detail/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernDetailView.as_view(),
name='fundinventarinventarnummern_detail'
),
url(
r'^fundinventarinventarnummern/create/$',
views.FundinventarInventarnummernCreate.as_view(),
name='fundinventarinventarnummern_create'
),
url(
r'^fundinventarinventarnummern/edit/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernUpdate.as_view(),
name='fundinventarinventarnummern_edit'
),
url(
r'^fundinventarinventarnummern/delete/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernDelete.as_view(),
name='fundinventarinventarnummern_delete'),
url(
r'^fundinventarkonvolutnummern/$',
views.FundinventarKonvolutnummernListView.as_view(),
name='fundinventarkonvolutnummern_browse'
),
url(
r'^fundinventarkonvolutnummern/detail/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernDetailView.as_view(),
name='fundinventarkonvolutnummern_detail'
),
url(
r'^fundinventarkonvolutnummern/create/$',
views.FundinventarKonvolutnummernCreate.as_view(),
name='fundinventarkonvolutnummern_create'
),
url(
r'^fundinventarkonvolutnummern/edit/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernUpdate.as_view(),
name='fundinventarkonvolutnummern_edit'
),
url(
r'^fundinventarkonvolutnummern/delete/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernDelete.as_view(),
name='fundinventarkonvolutnummern_delete'),
url(
r'^fundinventarmaterialproben/$',
views.FundinventarMaterialprobenListView.as_view(),
name='fundinventarmaterialproben_browse'
),
url(
r'^fundinventarmaterialproben/detail/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenDetailView.as_view(),
name='fundinventarmaterialproben_detail'
),
url(
r'^fundinventarmaterialproben/create/$',
views.FundinventarMaterialprobenCreate.as_view(),
name='fundinventarmaterialproben_create'
),
url(
r'^fundinventarmaterialproben/edit/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenUpdate.as_view(),
name='fundinventarmaterialproben_edit'
),
url(
r'^fundinventarmaterialproben/delete/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenDelete.as_view(),
name='fundinventarmaterialproben_delete'),
url(
r'^fundinventarsteininventar/$',
views.FundinventarSteininventarListView.as_view(),
name='fundinventarsteininventar_browse'
),
url(
r'^fundinventarsteininventar/detail/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarDetailView.as_view(),
name='fundinventarsteininventar_detail'
),
url(
r'^fundinventarsteininventar/create/$',
views.FundinventarSteininventarCreate.as_view(),
name='fundinventarsteininventar_create'
),
url(
r'^fundinventarsteininventar/edit/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarUpdate.as_view(),
name='fundinventarsteininventar_edit'
),
url(
r'^fundinventarsteininventar/delete/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarDelete.as_view(),
name='fundinventarsteininventar_delete'),
url(
r'^gis/$',
views.GISListView.as_view(),
name='gis_browse'
),
url(
r'^gis/detail/(?P<pk>[0-9]+)$',
views.GISDetailView.as_view(),
name='gis_detail'
),
url(
r'^gis/create/$',
views.GISCreate.as_view(),
name='gis_create'
),
url(
r'^gis/edit/(?P<pk>[0-9]+)$',
views.GISUpdate.as_view(),
name='gis_edit'
),
url(
r'^gis/delete/(?P<pk>[0-9]+)$',
views.GISDelete.as_view(),
name='gis_delete'),
url(
r'^geophysics/$',
views.GeophysicsListView.as_view(),
name='geophysics_browse'
),
url(
r'^geophysics/detail/(?P<pk>[0-9]+)$',
views.GeophysicsDetailView.as_view(),
name='geophysics_detail'
),
url(
r'^geophysics/create/$',
views.GeophysicsCreate.as_view(),
name='geophysics_create'
),
url(
r'^geophysics/edit/(?P<pk>[0-9]+)$',
views.GeophysicsUpdate.as_view(),
name='geophysics_edit'
),
url(
r'^geophysics/delete/(?P<pk>[0-9]+)$',
views.GeophysicsDelete.as_view(),
name='geophysics_delete'),
url(
r'^inventorybooks/$',
views.InventorybooksListView.as_view(),
name='inventorybooks_browse'
),
url(
r'^inventorybooks/detail/(?P<pk>[0-9]+)$',
views.InventorybooksDetailView.as_view(),
name='inventorybooks_detail'
),
url(
r'^inventorybooks/create/$',
views.InventorybooksCreate.as_view(),
name='inventorybooks_create'
),
url(
r'^inventorybooks/edit/(?P<pk>[0-9]+)$',
views.InventorybooksUpdate.as_view(),
name='inventorybooks_edit'
),
url(
r'^inventorybooks/delete/(?P<pk>[0-9]+)$',
views.InventorybooksDelete.as_view(),
name='inventorybooks_delete'),
url(
r'^phasenid/$',
views.PhasenIDListView.as_view(),
name='phasenid_browse'
),
url(
r'^phasenid/detail/(?P<pk>[0-9]+)$',
views.PhasenIDDetailView.as_view(),
name='phasenid_detail'
),
url(
r'^phasenid/create/$',
views.PhasenIDCreate.as_view(),
name='phasenid_create'
),
url(
r'^phasenid/edit/(?P<pk>[0-9]+)$',
views.PhasenIDUpdate.as_view(),
name='phasenid_edit'
),
url(
r'^phasenid/delete/(?P<pk>[0-9]+)$',
views.PhasenIDDelete.as_view(),
name='phasenid_delete'),
url(
r'^protocols/$',
views.ProtocolsListView.as_view(),
name='protocols_browse'
),
url(
r'^protocols/detail/(?P<pk>[0-9]+)$',
views.ProtocolsDetailView.as_view(),
name='protocols_detail'
),
url(
r'^protocols/create/$',
views.ProtocolsCreate.as_view(),
name='protocols_create'
),
url(
r'^protocols/edit/(?P<pk>[0-9]+)$',
views.ProtocolsUpdate.as_view(),
name='protocols_edit'
),
url(
r'^protocols/delete/(?P<pk>[0-9]+)$',
views.ProtocolsDelete.as_view(),
name='protocols_delete'),
url(
r'^stratenid/$',
views.StratenIDListView.as_view(),
name='stratenid_browse'
),
url(
r'^stratenid/detail/(?P<pk>[0-9]+)$',
views.StratenIDDetailView.as_view(),
name='stratenid_detail'
),
url(
r'^stratenid/create/$',
views.StratenIDCreate.as_view(),
name='stratenid_create'
),
url(
r'^stratenid/edit/(?P<pk>[0-9]+)$',
views.StratenIDUpdate.as_view(),
name='stratenid_edit'
),
url(
r'^stratenid/delete/(?P<pk>[0-9]+)$',
views.StratenIDDelete.as_view(),
name='stratenid_delete'),
url(
r'^tables/$',
views.TablesListView.as_view(),
name='tables_browse'
),
url(
r'^tables/detail/(?P<pk>[0-9]+)$',
views.TablesDetailView.as_view(),
name='tables_detail'
),
url(
r'^tables/create/$',
views.TablesCreate.as_view(),
name='tables_create'
),
url(
r'^tables/edit/(?P<pk>[0-9]+)$',
views.TablesUpdate.as_view(),
name='tables_edit'
),
url(
r'^tables/delete/(?P<pk>[0-9]+)$',
views.TablesDelete.as_view(),
name='tables_delete'),
url(
r'^threedimensionalmodel/$',
views.ThreeDimensionalModelListView.as_view(),
name='threedimensionalmodel_browse'
),
url(
r'^threedimensionalmodel/detail/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelDetailView.as_view(),
name='threedimensionalmodel_detail'
),
url(
r'^threedimensionalmodel/create/$',
views.ThreeDimensionalModelCreate.as_view(),
name='threedimensionalmodel_create'
),
url(
r'^threedimensionalmodel/edit/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelUpdate.as_view(),
name='threedimensionalmodel_edit'
),
url(
r'^threedimensionalmodel/delete/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelDelete.as_view(),
name='threedimensionalmodel_delete'),
url(
r'^videos/$',
views.VideosListView.as_view(),
name='videos_browse'
),
url(
r'^videos/detail/(?P<pk>[0-9]+)$',
views.VideosDetailView.as_view(),
name='videos_detail'
),
url(
r'^videos/create/$',
views.VideosCreate.as_view(),
name='videos_create'
),
url(
r'^videos/edit/(?P<pk>[0-9]+)$',
views.VideosUpdate.as_view(),
name='videos_edit'
),
url(
r'^videos/delete/(?P<pk>[0-9]+)$',
views.VideosDelete.as_view(),
name='videos_delete'),
url(
r'^wallpaintinginventory/$',
views.WallpaintingInventoryListView.as_view(),
name='wallpaintinginventory_browse'
),
url(
r'^wallpaintinginventory/detail/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryDetailView.as_view(),
name='wallpaintinginventory_detail'
),
url(
r'^wallpaintinginventory/create/$',
views.WallpaintingInventoryCreate.as_view(),
name='wallpaintinginventory_create'
),
url(
r'^wallpaintinginventory/edit/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryUpdate.as_view(),
name='wallpaintinginventory_edit'
),
url(
r'^wallpaintinginventory/delete/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryDelete.as_view(),
name='wallpaintinginventory_delete'),
]
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ConversionUploadServiceTransport
from .grpc import ConversionUploadServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ConversionUploadServiceTransport]]
_transport_registry['grpc'] = ConversionUploadServiceGrpcTransport
__all__ = (
'ConversionUploadServiceTransport',
'ConversionUploadServiceGrpcTransport',
)
|
from django import forms
from .models import Comment, Answer
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
widgets = {
#'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnonymousCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author','text')
widgets = {
'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnswerForm(forms.Form):
answer_content = forms.CharField(label='',max_length=200,widget=forms.TextInput(attrs={'class': 'form-control','placeholder':'New Answer ..'}),required=False)
def clean(self):
cleaned_data = super(AnswerForm, self).clean()
return cleaned_data
|
import sys
import unittest
from test import support
pwd = support.import_module('pwd')
class PwdTest(unittest.TestCase):
@unittest.skipUnless(hasattr(pwd, 'getpwall'), 'pwd module does not expose getpwall()')
def test_values(self):
entries = pwd.getpwall()
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assertIsInstance(e.pw_name, str)
self.assertEqual(e[1], e.pw_passwd)
self.assertIsInstance(e.pw_passwd, str)
self.assertEqual(e[2], e.pw_uid)
self.assertIsInstance(e.pw_uid, int)
self.assertEqual(e[3], e.pw_gid)
self.assertIsInstance(e.pw_gid, int)
self.assertEqual(e[4], e.pw_gecos)
self.assertIsInstance(e.pw_gecos, str)
self.assertEqual(e[5], e.pw_dir)
self.assertIsInstance(e.pw_dir, str)
self.assertEqual(e[6], e.pw_shell)
self.assertIsInstance(e.pw_shell, str)
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards (done in test_values_extended)
def test_values_extended(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip this test
self.skipTest('passwd file is huge; extended test skipped')
for e in entries:
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assertIn(pwd.getpwnam(e.pw_name), entriesbyname[e.pw_name])
self.assertIn(pwd.getpwuid(e.pw_uid), entriesbyuid[e.pw_uid])
@unittest.skipUnless(hasattr(pwd, 'getpwall'), 'pwd module does not expose getpwall()')
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwuid, 3.14)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwnam, 42)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = list(bynames.keys())
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in range(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# In some cases, byuids isn't a complete list of all users in the
# system, so if we try to pick a value not in byuids (via a perturbing
# loop, say), pwd.getpwuid() might still be able to find data for that
# uid. Using sys.maxint may provoke the same problems, but hopefully
# it will be a more repeatable failure.
fakeuid = sys.maxsize
self.assertNotIn(fakeuid, byuids)
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
# -1 shouldn't be a valid uid because it has a special meaning in many
# uid-related functions
self.assertRaises(KeyError, pwd.getpwuid, -1)
# should be out of uid_t range
self.assertRaises(KeyError, pwd.getpwuid, 2**128)
self.assertRaises(KeyError, pwd.getpwuid, -2**128)
if __name__ == "__main__":
unittest.main()
|
# strip puncuation custom module
# 12 / 03 / 2015
# Brandon
# https://www.facebook.com/AiiYourBaseRBel0ngToUs
"""
This program was designed to strip puncuation
from a string
This program was made by Brandon in February 2015
and was finished in February 2015
If you have any suggestions or want to help
contact me at
https://www.facebook.com/AiiYourBaseRBel0ngToUs
This program abides by the rules of presentation for
PEP-8
shown here on
https://www.python.org/dev/peps/pep-0008/
You may use this code, or any features of this code
in your own work, as long as you link my page
and the BSD licensing, which can be copied directly
below.
https://www.facebook.com/AiiYourBaseRBel0ngToUs
*BSD licensed*
More info can be read here
http://opensource.org/licenses/BSD-3-Clause
"""
import sys
# Sys is required for Sys.exit() in close() function
def main():
# runs through every function and strips everything
message = str(input("enter message here to strip "))
message1 = strip(message)
message2 = stripWithSpace(message)
message3 = stripSpaceOnly(message)
print(message1)
print(message2)
print(message3)
close()
def strip(message):
# strips all basic puncuation
# defines puncuations
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
# creates empty variable
no_punct = ""
# for every charecter in MESSAGE
for char in message:
# if charecter is not in puncuations
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
# returns non-puncuated string
def stripWithSpace(message):
# strips all puncuation with Space
# defines puncuations
punctuations = ''' !()-[]{};:'"\,<>./?@#$%^&*_~'''
# creates empty variable
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
def stripSpaceOnly(message):
# Strips Space only
# defines puncuations
punctuations = ''' '''
# creates empty variable
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
def stripLetters(message):
# Strips only alphabetical letters
# defines puncuations
message = message.upper()
# converts message to upper case, makes it easier to strip
punctuations = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# creates empty variable
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
def Reverse(message):
# reverse a string
# may be useful
reverseTranslated = ''
i = len(message) - 1
while i >= 0:
reverseTranslated = reverseTranslated + message[i]
i = i - 1
def close():
input("Any key to exit! ")
sys.exit()
if __name__ == '__main__':
main()
|
# coding=utf-8
import numpy as np
import unittest
import pystella as ps
# from pystella.rf import band
# from pystella.rf.lc import LightCurve
# from pystella.util.reader_table import read_table_header_float, table2curves, read_obs_table_header, curves2table
__author__ = 'bakl'
def lc_create(b, m=-19, dt=0.):
n = 10
time = np.linspace(0. + dt, 200. + dt, n)
mags = m * np.ones(n)
return ps.LightCurve(b, time, mags)
class TestReaderTable(unittest.TestCase):
def test_read_table_header_float(self):
fname = 'data/stella/cat_R500_M15_Ni006_E12.gri'
data = ps.util.read_table_header_float(fname)
cols = len(data.dtype.names)
self.assertTrue(cols == 15,
msg="The number of colums in the data should be 15, but it's : %d." % cols)
def test_read_table_header_float_skiprows(self):
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
cols = len(data.dtype.names)
self.assertTrue(cols == 14,
msg="The number of colums in [%s] should be 14, but it's : %d." % (fname, cols))
def test_table2curves_no_bands(self):
ps.Band.load_settings()
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
data.dtype.names = [col.replace('M', '') for col in data.dtype.names]
curves = ps.table2curves('test', data)
for bname in curves.BandNames:
self.assertTrue(bname in data.dtype.names,
msg="No band %s in [%s] after table2curves." % (bname, ''.join(data.dtype.names)))
def test_curves2table(self):
ps.Band.load_settings()
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
data.dtype.names = [col.replace('M', '') for col in data.dtype.names]
curves = ps.table2curves('test', data, is_filter_zero=False)
tbl = ps.curves2table(curves)
self.assertCountEqual(curves.Length, len(tbl.names))
def test_read_obs_table_header(self):
fname = 'data/obs/1999em-uphHamuy.dat'
tbl, cols_data = ps.util.read_obs_table_header(fname, is_out=True)
for c in ('JD', 'V'):
self.assertTrue(c in tbl.dtype.names,
msg="No band %s in [%s] after read_obs_table_header." % (c, ','.join(tbl.dtype.names)))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .._tensor import elemwise_shape_func
from ..strategy.generic import is_depthwise_conv2d
from ...transform import LayoutConfig
# relu
reg.register_broadcast_schedule("nn.relu")
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
reg.register_strategy("nn.softmax", strategy.softmax_strategy)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
# log_softmax
reg.register_schedule("nn.log_softmax", strategy.schedule_log_softmax)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
# dense
reg.register_strategy("nn.dense", strategy.dense_strategy)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# fifo_buffer
@reg.register_compute("nn.fifo_buffer")
def compute_fifo_buffer(attrs, inputs, out_type):
return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int("axis"))]
reg.register_injective_schedule("nn.fifo_buffer")
reg.register_pattern("nn.fifo_buffer", OpPattern.OPAQUE)
# batch_matmul
reg.register_strategy("nn.batch_matmul", strategy.batch_matmul_strategy)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])]
reg.register_strategy("nn.sparse_dense", strategy.sparse_dense_strategy)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.sparse_dense")
def alter_op_layout_sparse_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of sparse_dense"""
return topi.nn.sparse_dense_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_compute("nn.internal.sparse_dense_padded")
def compute_sparse_dense_padded(attrs, inputs, out_type):
"""Compute definition of sparse_dense_padded"""
raise NotImplementedError("nn.internal.sparse_dense_padded is only available on cuda")
reg.register_strategy("nn.internal.sparse_dense_padded", strategy.sparse_dense_padded_strategy)
reg.register_pattern("nn.internal.sparse_dense_padded", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
reg.register_schedule("nn.sparse_transpose", strategy.schedule_sparse_transpose)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d
reg.register_strategy("nn.conv1d", strategy.conv1d_strategy)
reg.register_pattern("nn.conv1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
reg.register_strategy("nn.conv2d", strategy.conv2d_strategy)
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv2d"""
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d")
def convert_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
# First check if there is a LayoutConfig scope, and if so, whether
# it indicates we should ignore this layer or not.
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.conv2d(data, weight, **attrs)
# Prepare new layout.
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info, weight_info = tinfos
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "HWNC":
new_attrs["kernel_layout"] = "HWOI"
return relay.nn.conv2d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv2d_transpose
reg.register_strategy("nn.conv2d_transpose", strategy.conv2d_transpose_strategy)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv2d_transpose")
def legalize_conv2d_transpose(attrs, inputs, types):
"""Legalize conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_transpose_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d_transpose")
def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv3d_transpose
reg.register_strategy("nn.conv3d_transpose", strategy.conv3d_transpose_strategy)
reg.register_pattern("nn.conv3d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv3d_transpose")
def legalize_conv3d_transpose(attrs, inputs, types):
"""Legalize conv3d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv3d_transpose_legalize(attrs, inputs, types)
# conv3d
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv3d")
def alter_op_layout_conv3d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv3d"""
return topi.nn.conv3d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_convert_op_layout("nn.conv3d")
def convert_conv3d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv3d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv3d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv3d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCDHW":
new_attrs["kernel_layout"] = "OIDHW"
return relay.nn.conv3d(data, weight, **new_attrs)
elif desired_data_layout == "NDHWC":
new_attrs["kernel_layout"] = "DHWIO"
return relay.nn.conv3d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
# conv3d_winograd related operators
reg.register_strategy(
"nn.contrib_conv3d_winograd_without_weight_transform",
strategy.conv3d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv3d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv3d_winograd_weight_transform")
def compute_contrib_conv3d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv3d_winograd_weight_transform"""
out = topi.nn.conv3d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv3d_winograd_weight_transform",
strategy.schedule_conv3d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv3d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_injective_schedule("nn.bias_add")
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool1d
reg.register_schedule("nn.max_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d
reg.register_schedule("nn.max_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool3d
reg.register_schedule("nn.max_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool1d
reg.register_schedule("nn.avg_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
reg.register_schedule("nn.avg_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool3d
reg.register_schedule("nn.avg_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
reg.register_schedule("nn.max_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
reg.register_schedule("nn.avg_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
reg.register_schedule("nn.global_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool3d
reg.register_schedule("nn.adaptive_max_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool3d
reg.register_schedule("nn.adaptive_avg_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_broadcast_schedule("nn.prelu")
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_broadcast_schedule("nn.batch_flatten")
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis, attrs.alpha, attrs.beta, attrs.bias)]
reg.register_schedule("nn.lrn", strategy.schedule_lrn)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# upsampling
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)]
reg.register_injective_schedule("nn.upsampling")
# upsampling3d
@reg.register_compute("nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
scale_d = attrs.scale_d
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
inputs[0], scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
]
reg.register_injective_schedule("nn.upsampling3d")
# pad
reg.register_broadcast_schedule("nn.pad")
# mirror_pad
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
reg.register_broadcast_schedule("nn.mirror_pad")
@script
def _mirror_pad_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
out[i] = data_shape[i] + int64(pad_width[i][0]) + int64(pad_width[i][1])
return out
@reg.register_shape_func("nn.mirror_pad", False)
def mirror_pad_func(attrs, inputs, _):
pad_width_tuple = [get_const_tuple(p) for p in attrs.pad_width]
return [_mirror_pad_func(inputs[0], convert(pad_width_tuple))]
# conv2d_winograd related operators
reg.register_strategy(
"nn.contrib_conv2d_winograd_without_weight_transform",
strategy.conv2d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
# conv2d_gemm related operators
reg.register_strategy(
"nn.contrib_conv2d_gemm_without_weight_transform",
strategy.conv2d_gemm_without_weight_transform_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_gemm_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv2d_gemm_weight_transform")
def compute_contrib_conv2d_gemm_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_gemm_weight_transform"""
out = topi.nn.conv2d_gemm_weight_transform(inputs[0], attrs.tile_rows, attrs.tile_cols)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_gemm_weight_transform", strategy.schedule_conv2d_gemm_weight_transform
)
reg.register_pattern("nn.contrib_conv2d_gemm_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_weight_transform",
strategy.schedule_conv2d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int("convolution_algorithm")
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype
)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_nnpack_weight_transform",
strategy.schedule_conv2d_winograd_nnpack_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform", OpPattern.OPAQUE)
# conv2d_NCHWc
reg.register_strategy("nn.contrib_conv2d_NCHWc", strategy.conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# depthwise_conv2d_NCHWc
reg.register_strategy("nn.contrib_depthwise_conv2d_NCHWc", strategy.depthwise_conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# deformable_conv2d
reg.register_strategy("nn.deformable_conv2d", strategy.deformable_conv2d_strategy)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# bitpack
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, name)
return [out]
reg.register_schedule("nn.bitpack", strategy.schedule_bitpack)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
# bitserial_conv2d
reg.register_strategy("nn.bitserial_conv2d", strategy.bitserial_conv2d_strategy)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
# bitserial_dense
reg.register_strategy("nn.bitserial_dense", strategy.bitserial_dense_strategy)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# cross_entropy
@reg.register_compute("nn.cross_entropy")
def compute_cross_entropy(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(topi.log(x) * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy")
reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE)
# dilate
@reg.register_compute("nn.dilate")
def compute_dilate(attrs, inputs, out_dtype):
return [topi.nn.dilate(inputs[0], attrs.strides, attrs.dilation_value)]
reg.register_broadcast_schedule("nn.dilate")
reg.register_pattern("nn.dilate", OpPattern.INJECTIVE)
# cross_entropy_with_logits
@reg.register_compute("nn.cross_entropy_with_logits")
def compute_cross_entropy_with_logits(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy_with_logits")
reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE)
# depth_to_space
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]
reg.register_injective_schedule("nn.depth_to_space")
reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE)
# space_to_depth
@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]
reg.register_injective_schedule("nn.space_to_depth")
reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE)
# correlation
reg.register_strategy("nn.correlation", strategy.correlation_strategy)
reg.register_pattern("nn.correlation", OpPattern.OUT_ELEMWISE_FUSABLE)
# space_to_batch_nd and batch_to_space_nd
reg.register_injective_schedule("nn.space_to_batch_nd")
reg.register_injective_schedule("nn.batch_to_space_nd")
#####################
# Shape functions #
#####################
@script
def _conv_shape_func(dshape, kshape, strides, padding, dilation):
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[1] = kshape[0]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 2] - 1) * dilation[i] + 1
out[i + 2] = (dshape[i + 2] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
def conv_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
return [
_conv_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
reg.register_shape_func("nn.conv1d", False, conv_shape_func)
reg.register_shape_func("nn.conv2d", False, conv_shape_func)
reg.register_shape_func("nn.conv3d", False, conv_shape_func)
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
out = output_tensor((dshape.shape[0],), "int64")
ic_chunk = dshape[1]
height = dshape[2]
width = dshape[3]
ic_bn = dshape[4]
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
kflatten = int64(1)
for i in const_range(kshape.shape[0]):
kflatten *= kshape[i]
oc = kflatten // (kheight * kwidth * ic_chunk * ic_bn)
oc_chunk = oc // oc_bn
out_height = (height + 2 * padding[0] - dilated_kh) // strides[0] + 1
out_width = (width + 2 * padding[1] - dilated_kw) // strides[1] + 1
out[0] = dshape[0]
out[1] = oc_chunk
out[2] = out_height
out[3] = out_width
out[4] = int64(oc_bn)
return out
@reg.register_shape_func("nn.contrib_conv2d_NCHWc", False)
def conv2d_NCHWc_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_layout = attrs.out_layout
oc_bn = int(out_layout[4:-1])
return [
_conv2d_NCHWc_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(oc_bn),
)
]
@script
def _conv2d_transpose_nchw_shape_func(dshape, kshape, strides, padding, dilation, output_padding):
out = output_tensor((dshape.shape[0],), "int64")
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
out_height = strides[0] * (dshape[2] - 1) + dilated_kh - 2 * padding[0] + output_padding[0]
out_width = strides[1] * (dshape[3] - 1) + dilated_kw - 2 * padding[1] + output_padding[1]
out[0] = dshape[0]
out[1] = kshape[1]
out[2] = out_height
out[3] = out_width
return out
@reg.register_shape_func("nn.conv2d_transpose", False)
def conv2d_transpose_nchw_shape_func(attrs, inputs, _):
"""
Shape function for conv2d_transpose op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
output_padding = get_const_tuple(attrs.output_padding)
return [
_conv2d_transpose_nchw_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(output_padding),
)
]
@script
def _pool2d_shape_func(data_shape, pool_size, strides, padding, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == height_axis:
out[i] = (data_shape[i] + padding[0] + padding[2] - pool_size[0]) // strides[0] + 1
elif i == width_axis:
out[i] = (data_shape[i] + padding[1] + padding[3] - pool_size[1]) // strides[1] + 1
else:
out[i] = data_shape[i]
return out
def pool2d_shape_func(attrs, inputs, _):
"""
Shape function for pool2d op.
"""
pool_size = get_const_tuple(attrs.pool_size)
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
layout = attrs.layout
height_axis = layout.index("H")
width_axis = layout.index("W")
if len(padding) == 1:
padding = [padding[0]] * 4
elif len(padding) == 2:
padding = [padding[0], padding[1], padding[0], padding[1]]
return [
_pool2d_shape_func(
inputs[0],
convert(pool_size),
convert(strides),
convert(padding),
convert(height_axis),
convert(width_axis),
)
]
reg.register_shape_func("nn.max_pool2d", False, pool2d_shape_func)
reg.register_shape_func("nn.avg_pool2d", False, pool2d_shape_func)
@script
def _global_pool2d_shape_func(data_shape, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
if i == height_axis or i == width_axis:
out[i] = int64(1)
else:
out[i] = data_shape[i]
return out
def global_pool2d_shape_func(attrs, inputs, _):
"""
Shape function for global pool2d op.
"""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [_global_pool2d_shape_func(inputs[0], convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.global_max_pool2d", False, global_pool2d_shape_func)
reg.register_shape_func("nn.global_avg_pool2d", False, global_pool2d_shape_func)
@script
def _batch_flatten_shape_func(data_shape):
out = output_tensor((2,), "int64")
out[0] = data_shape[0]
out[1] = int64(1)
for i in const_range(data_shape.shape[0] - 1):
out[1] *= data_shape[i + 1]
return out
@reg.register_shape_func("nn.batch_flatten", False)
def batch_flatten_shape_func(attrs, inputs, _):
"""
Shape function for batch_flatten op.
"""
return [_batch_flatten_shape_func(inputs[0])]
@script
def _dense_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[0]
return out
@reg.register_shape_func("nn.dense", False)
def dense_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_dense_shape_func(inputs[0], inputs[1])]
return ret
@script
def _batch_matmul_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
if i == 0:
out[i] = max(data_shape[i], weight_shape[i])
else:
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[weight_shape.shape[0] - 2]
return out
@reg.register_shape_func("nn.batch_matmul", False)
def batch_matmul_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_batch_matmul_shape_func(inputs[0], inputs[1])]
return ret
@script
def _pad_shape_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = data_shape[i] + pad_width[i][0] + pad_width[i][1]
return out
@reg.register_shape_func("nn.pad", False)
def pad_shape_func(attrs, inputs, _):
"""
Shape function for pad op.
"""
pad_width = []
for pair in attrs.pad_width:
pad_width.append(get_const_tuple(pair))
return [_pad_shape_func(inputs[0], convert(pad_width))]
@script
def _dilate_shape_func(data_shape, strides):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = (data_shape[i] - 1) * strides[i] + 1
return out
@reg.register_shape_func("nn.dilate", False)
def dilate_shape_func(attrs, inputs, _):
"""
Shape function for dilate op.
"""
return [_dilate_shape_func(inputs[0], convert(attrs.strides))]
reg.register_shape_func("nn.bias_add", False, elemwise_shape_func)
reg.register_shape_func("nn.softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.relu", False, elemwise_shape_func)
|
from tensorflow_v2_examples.cli import main
def test_main():
main([])
|
import math
import operator
from datetime import date, datetime
from operator import methodcaller
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
clickhouse_driver = pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`double_col` AS Int8)'),
('int16', 'CAST(`double_col` AS Int16)'),
('float', 'CAST(`double_col` AS Float32)'),
# alltypes.double_col is non-nullable
(dt.Double(nullable=False), '`double_col`'),
],
)
def test_cast_double_col(alltypes, translate, to_type, expected):
expr = alltypes.double_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`string_col` AS Int8)'),
('int16', 'CAST(`string_col` AS Int16)'),
(dt.String(nullable=False), '`string_col`'),
('timestamp', 'CAST(`string_col` AS DateTime)'),
('date', 'CAST(`string_col` AS Date)'),
],
)
def test_cast_string_col(alltypes, translate, to_type, expected):
expr = alltypes.string_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.xfail(
raises=AssertionError, reason='Clickhouse doesn\'t have decimal type'
)
def test_decimal_cast():
assert False
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, translate, column):
col = alltypes[column]
result = col.cast(col.type())
assert result.equals(col)
assert translate(result) == '`{}`'.format(column)
def test_timestamp_cast_noop(alltypes, translate):
target = dt.Timestamp(nullable=False)
result1 = alltypes.timestamp_col.cast(target)
result2 = alltypes.int_col.cast(target)
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
assert translate(result1) == '`timestamp_col`'
assert translate(result2) == 'CAST(`int_col` AS DateTime)'
def test_timestamp_now(con, translate):
expr = ibis.now()
# now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
assert translate(expr) == 'now()'
# assert con.execute(expr) == now
@pytest.mark.parametrize(
('unit', 'expected'),
[
('y', '2009-01-01'),
param('m', '2009-05-01', marks=pytest.mark.xfail),
('d', '2009-05-17'),
('w', '2009-05-11'),
('h', '2009-05-17 12:00:00'),
('minute', '2009-05-17 12:34:00'),
],
)
def test_timestamp_truncate(con, translate, unit, expected):
stamp = ibis.timestamp('2009-05-17 12:34:56')
expr = stamp.truncate(unit)
assert con.execute(expr) == pd.Timestamp(expected)
@pytest.mark.parametrize(
('func', 'expected'),
[
(methodcaller('year'), 2015),
(methodcaller('month'), 9),
(methodcaller('day'), 1),
(methodcaller('hour'), 14),
(methodcaller('minute'), 48),
(methodcaller('second'), 5),
],
)
def test_simple_datetime_operations(con, func, expected):
value = ibis.timestamp('2015-09-01 14:48:05.359')
with pytest.raises(ValueError):
con.execute(func(value))
value = ibis.timestamp('2015-09-01 14:48:05')
con.execute(func(value)) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
result = con.execute(L(value).nullifzero())
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(None).isnull(), True),
(L(1).isnull(), False),
(L(None).notnull(), False),
(L(1).notnull(), True),
],
)
def test_isnull_notnull(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
def test_fillna_nullif(con, expr, expected):
result = con.execute(expr)
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
(L('foo_bar'), 'String'),
(L(5), 'UInt8'),
(L(1.2345), 'Float64'),
(L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),
(L(date(2015, 9, 1)), 'Date'),
param(
ibis.NA,
'Null',
marks=pytest.mark.xfail(
raises=AssertionError,
reason=(
'Client/server version mismatch not handled in the '
'clickhouse driver'
),
),
),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
(methodcaller('substr', 0, 3), 'foo'),
(methodcaller('substr', 4, 3), 'bar'),
(methodcaller('substr', 1), 'oo_bar'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
def test_string_column_substring(con, alltypes, translate):
expr = alltypes.string_col.substr(2)
assert translate(expr) == 'substring(`string_col`, 2 + 1)'
assert len(con.execute(expr))
expr = alltypes.string_col.substr(0, 3)
assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'
assert len(con.execute(expr))
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
def test_string_lenght(con):
assert con.execute(L('FOO').length()) == 3
@pytest.mark.parametrize(
('value', 'op', 'expected'),
[
(L('foobar'), methodcaller('contains', 'bar'), True),
(L('foobar'), methodcaller('contains', 'foo'), True),
(L('foobar'), methodcaller('contains', 'baz'), False),
(L('100%'), methodcaller('contains', '%'), True),
(L('a_b_c'), methodcaller('contains', '_'), True),
],
)
def test_string_contains(con, op, value, expected):
assert con.execute(op(value)) == expected
# TODO: clickhouse-driver escaping bug
def test_re_replace(con, translate):
expr1 = L('Hello, World!').re_replace('.', '\\\\0\\\\0')
expr2 = L('Hello, World!').re_replace('^', 'here: ')
assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'
assert con.execute(expr2) == 'here: Hello, World!'
@pytest.mark.parametrize(
('value', 'expected'),
[(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?
)
def test_find_in_set(con, value, expected, translate):
vals = list('abc')
expr = value.find_in_set(vals)
assert con.execute(expr) == expected
def test_string_column_find_in_set(con, alltypes, translate):
s = alltypes.string_col
vals = list('abc')
expr = s.find_in_set(vals)
assert translate(expr) == "indexOf(['a','b','c'], `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('url', 'extract', 'expected'),
[
(L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),
(L('https://www.cloudera.com'), 'PROTOCOL', 'https'),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'PATH',
'/watch',
),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'QUERY',
'v=kEuEcWfewf8&t=10',
),
],
)
def test_parse_url(con, translate, url, extract, expected):
expr = url.parse_url(extract)
assert con.execute(expr) == expected
def test_parse_url_query_parameter(con, translate):
url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')
expr = url.parse_url('QUERY', 't')
assert con.execute(expr) == '10'
expr = url.parse_url('QUERY', 'v')
assert con.execute(expr) == 'kEuEcWfewf8'
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
(L('foobar').like(['%bar']), True),
(L('foobar').like(['foo%']), True),
(L('foobar').like(['%baz%']), False),
(L('foobar').like(['%bar', 'foo%']), True),
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
],
)
def test_string_find_like(con, expr, expected):
assert con.execute(expr) == expected
def test_string_column_like(con, alltypes, translate):
expr = alltypes.string_col.like('foo%')
assert translate(expr) == "`string_col` LIKE 'foo%'"
assert len(con.execute(expr))
expr = alltypes.string_col.like(['foo%', '%bar'])
expected = "`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'"
assert translate(expr) == expected
assert len(con.execute(expr))
def test_string_column_find(con, alltypes, translate):
s = alltypes.string_col
expr = s.find('a')
assert translate(expr) == "position(`string_col`, 'a') - 1"
assert len(con.execute(expr))
expr = s.find(s)
assert translate(expr) == "position(`string_col`, `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('call', 'expected'),
[
(methodcaller('log'), 'log(`double_col`)'),
(methodcaller('log2'), 'log2(`double_col`)'),
(methodcaller('log10'), 'log10(`double_col`)'),
(methodcaller('round'), 'round(`double_col`)'),
(methodcaller('round', 0), 'round(`double_col`, 0)'),
(methodcaller('round', 2), 'round(`double_col`, 2)'),
(methodcaller('exp'), 'exp(`double_col`)'),
(methodcaller('abs'), 'abs(`double_col`)'),
(methodcaller('ceil'), 'ceil(`double_col`)'),
(methodcaller('floor'), 'floor(`double_col`)'),
(methodcaller('sqrt'), 'sqrt(`double_col`)'),
(
methodcaller('sign'),
'intDivOrZero(`double_col`, abs(`double_col`))',
),
],
)
def test_translate_math_functions(con, alltypes, translate, call, expected):
expr = call(alltypes.double_col)
assert translate(expr) == expected
assert len(con.execute(expr))
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(-5).abs(), 5),
(L(5).abs(), 5),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
(L(5.556).ceil(), 6.0),
(L(5.556).floor(), 5.0),
(L(5.556).exp(), math.exp(5.556)),
(L(5.556).sign(), 1),
(L(-5.556).sign(), -1),
(L(0).sign(), 0),
(L(5.556).sqrt(), math.sqrt(5.556)),
(L(5.556).log(2), math.log(5.556, 2)),
(L(5.556).ln(), math.log(5.556)),
(L(5.556).log2(), math.log(5.556, 2)),
(L(5.556).log10(), math.log10(5.556)),
],
)
def test_math_functions(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_greatest(con, alltypes, translate):
expr = ibis.greatest(alltypes.int_col, 10)
assert translate(expr) == "greatest(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "greatest(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
def test_least(con, alltypes, translate):
expr = ibis.least(alltypes.int_col, 10)
assert translate(expr) == "least(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.least(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "least(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
# TODO: clickhouse-driver escaping bug
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_search('[a-z]'), True),
(L('abcd').re_search(r'[\\d]+'), False),
(L('1222').re_search(r'[\\d]+'), True),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_extract('([a-z]+)', 0), 'abcd'),
# (L('abcd').re_extract('(ab)(cd)', 1), 'cd'),
# valid group number but no match => empty string
(L('abcd').re_extract(r'(\\d)', 0), ''),
# match but not a valid group number => NULL
# (L('abcd').re_extract('abcd', 3), None),
],
)
def test_regexp_extract(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_column_regexp_extract(con, alltypes, translate):
expected = r"extractAll(`string_col`, '[\d]+')[3 + 1]"
expr = alltypes.string_col.re_extract(r'[\d]+', 3)
assert translate(expr) == expected
assert len(con.execute(expr))
def test_column_regexp_replace(con, alltypes, translate):
expected = r"replaceRegexpAll(`string_col`, '[\d]+', 'aaa')"
expr = alltypes.string_col.re_replace(r'[\d]+', 'aaa')
assert translate(expr) == expected
assert len(con.execute(expr))
def test_numeric_builtins_work(con, alltypes, df, translate):
expr = alltypes.double_col
result = expr.execute()
expected = df.double_col.fillna(0)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes, translate):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
expected = pd.Series([None] * nrows, name='na_column')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('attr', 'expected'),
[
(operator.methodcaller('year'), {2009, 2010}),
(operator.methodcaller('month'), set(range(1, 13))),
(operator.methodcaller('day'), set(range(1, 32))),
],
)
def test_date_extract_field(db, alltypes, attr, expected):
t = alltypes
expr = attr(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int)
assert set(result) == expected
def test_timestamp_from_integer(con, alltypes, translate):
# timestamp_col has datetime type
expr = alltypes.int_col.to_timestamp()
assert translate(expr) == 'toDateTime(`int_col`)'
assert len(con.execute(expr))
def test_count_distinct_with_filter(alltypes):
expr = alltypes.string_col.nunique(
where=alltypes.string_col.cast('int64') > 1
)
result = expr.execute()
expected = alltypes.string_col.execute()
expected = expected[expected.astype('int64') > 1].nunique()
assert result == expected
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Notes about unicode handling in psutil
======================================
In psutil these are the APIs returning or dealing with a string
('not tested' means they are not tested to deal with non-ASCII strings):
* Process.cmdline()
* Process.connections('unix')
* Process.cwd()
* Process.environ()
* Process.exe()
* Process.memory_maps()
* Process.name()
* Process.open_files()
* Process.username() (not tested)
* disk_io_counters() (not tested)
* disk_partitions() (not tested)
* disk_usage(str)
* net_connections('unix')
* net_if_addrs() (not tested)
* net_if_stats() (not tested)
* net_io_counters() (not tested)
* sensors_fans() (not tested)
* sensors_temperatures() (not tested)
* users() (not tested)
* WindowsService.binpath() (not tested)
* WindowsService.description() (not tested)
* WindowsService.display_name() (not tested)
* WindowsService.name() (not tested)
* WindowsService.status() (not tested)
* WindowsService.username() (not tested)
In here we create a unicode path with a funky non-ASCII name and (where
possible) make psutil return it back (e.g. on name(), exe(), open_files(),
etc.) and make sure that:
* psutil never crashes with UnicodeDecodeError
* the returned path matches
For a detailed explanation of how psutil handles unicode see:
- https://github.com/giampaolo/psutil/issues/1040
- http://psutil.readthedocs.io/#unicode
"""
import os
import traceback
import warnings
from contextlib import closing
from psutil import BSD
from psutil import MACOS
from psutil import OPENBSD
from psutil import POSIX
from psutil import WINDOWS
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import APPVEYOR
from psutil.tests import ASCII_FS
from psutil.tests import bind_unix_socket
from psutil.tests import chdir
from psutil.tests import copyload_shared_lib
from psutil.tests import create_exe
from psutil.tests import get_test_subprocess
from psutil.tests import HAS_CONNECTIONS_UNIX
from psutil.tests import HAS_ENVIRON
from psutil.tests import HAS_MEMORY_MAPS
from psutil.tests import mock
from psutil.tests import PYPY
from psutil.tests import reap_children
from psutil.tests import safe_mkdir
from psutil.tests import safe_rmpath as _safe_rmpath
from psutil.tests import skip_on_access_denied
from psutil.tests import TESTFILE_PREFIX
from psutil.tests import TESTFN
from psutil.tests import TESTFN_UNICODE
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import unix_socket_path
import psutil
def safe_rmpath(path):
if APPVEYOR:
# TODO - this is quite random and I'm not sure why it happens,
# nor I can reproduce it locally:
# https://ci.appveyor.com/project/giampaolo/psutil/build/job/
# jiq2cgd6stsbtn60
# safe_rmpath() happens after reap_children() so this is weird
# Perhaps wait_procs() on Windows is broken? Maybe because
# of STILL_ACTIVE?
# https://github.com/giampaolo/psutil/blob/
# 68c7a70728a31d8b8b58f4be6c4c0baa2f449eda/psutil/arch/
# windows/process_info.c#L146
try:
return _safe_rmpath(path)
except WindowsError:
traceback.print_exc()
else:
return _safe_rmpath(path)
def subprocess_supports_unicode(name):
"""Return True if both the fs and the subprocess module can
deal with a unicode file name.
"""
if PY3:
return True
try:
safe_rmpath(name)
create_exe(name)
get_test_subprocess(cmd=[name])
except UnicodeEncodeError:
return False
else:
return True
finally:
reap_children()
# An invalid unicode string.
if PY3:
INVALID_NAME = (TESTFN.encode('utf8') + b"f\xc0\x80").decode(
'utf8', 'surrogateescape')
else:
INVALID_NAME = TESTFN + "f\xc0\x80"
# ===================================================================
# FS APIs
# ===================================================================
class _BaseFSAPIsTests(object):
funky_name = None
@classmethod
def setUpClass(cls):
safe_rmpath(cls.funky_name)
create_exe(cls.funky_name)
@classmethod
def tearDownClass(cls):
reap_children()
safe_rmpath(cls.funky_name)
def tearDown(self):
reap_children()
def expect_exact_path_match(self):
raise NotImplementedError("must be implemented in subclass")
def test_proc_exe(self):
subp = get_test_subprocess(cmd=[self.funky_name])
p = psutil.Process(subp.pid)
exe = p.exe()
self.assertIsInstance(exe, str)
if self.expect_exact_path_match():
self.assertEqual(exe, self.funky_name)
def test_proc_name(self):
subp = get_test_subprocess(cmd=[self.funky_name])
if WINDOWS:
# On Windows name() is determined from exe() first, because
# it's faster; we want to overcome the internal optimization
# and test name() instead of exe().
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as m:
name = psutil.Process(subp.pid).name()
assert m.called
else:
name = psutil.Process(subp.pid).name()
self.assertIsInstance(name, str)
if self.expect_exact_path_match():
self.assertEqual(name, os.path.basename(self.funky_name))
def test_proc_cmdline(self):
subp = get_test_subprocess(cmd=[self.funky_name])
p = psutil.Process(subp.pid)
cmdline = p.cmdline()
for part in cmdline:
self.assertIsInstance(part, str)
if self.expect_exact_path_match():
self.assertEqual(cmdline, [self.funky_name])
def test_proc_cwd(self):
dname = self.funky_name + "2"
self.addCleanup(safe_rmpath, dname)
safe_mkdir(dname)
with chdir(dname):
p = psutil.Process()
cwd = p.cwd()
self.assertIsInstance(p.cwd(), str)
if self.expect_exact_path_match():
self.assertEqual(cwd, dname)
def test_proc_open_files(self):
p = psutil.Process()
start = set(p.open_files())
with open(self.funky_name, 'rb'):
new = set(p.open_files())
path = (new - start).pop().path
self.assertIsInstance(path, str)
if BSD and not path:
# XXX - see https://github.com/giampaolo/psutil/issues/595
return self.skipTest("open_files on BSD is broken")
if self.expect_exact_path_match():
self.assertEqual(os.path.normcase(path),
os.path.normcase(self.funky_name))
@unittest.skipIf(not POSIX, "POSIX only")
def test_proc_connections(self):
suffix = os.path.basename(self.funky_name)
with unix_socket_path(suffix=suffix) as name:
try:
sock = bind_unix_socket(name)
except UnicodeEncodeError:
if PY3:
raise
else:
raise unittest.SkipTest("not supported")
with closing(sock):
conn = psutil.Process().connections('unix')[0]
self.assertIsInstance(conn.laddr, str)
# AF_UNIX addr not set on OpenBSD
if not OPENBSD:
self.assertEqual(conn.laddr, name)
@unittest.skipIf(not POSIX, "POSIX only")
@unittest.skipIf(not HAS_CONNECTIONS_UNIX, "can't list UNIX sockets")
@skip_on_access_denied()
def test_net_connections(self):
def find_sock(cons):
for conn in cons:
if os.path.basename(conn.laddr).startswith(TESTFILE_PREFIX):
return conn
raise ValueError("connection not found")
suffix = os.path.basename(self.funky_name)
with unix_socket_path(suffix=suffix) as name:
try:
sock = bind_unix_socket(name)
except UnicodeEncodeError:
if PY3:
raise
else:
raise unittest.SkipTest("not supported")
with closing(sock):
cons = psutil.net_connections(kind='unix')
# AF_UNIX addr not set on OpenBSD
if not OPENBSD:
conn = find_sock(cons)
self.assertIsInstance(conn.laddr, str)
self.assertEqual(conn.laddr, name)
def test_disk_usage(self):
dname = self.funky_name + "2"
self.addCleanup(safe_rmpath, dname)
safe_mkdir(dname)
psutil.disk_usage(dname)
@unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
@unittest.skipIf(not PY3, "ctypes does not support unicode on PY2")
def test_memory_maps(self):
# XXX: on Python 2, using ctypes.CDLL with a unicode path
# opens a message box which blocks the test run.
with copyload_shared_lib(dst_prefix=self.funky_name) as funky_path:
def normpath(p):
return os.path.realpath(os.path.normcase(p))
libpaths = [normpath(x.path)
for x in psutil.Process().memory_maps()]
# ...just to have a clearer msg in case of failure
libpaths = [x for x in libpaths if TESTFILE_PREFIX in x]
self.assertIn(normpath(funky_path), libpaths)
for path in libpaths:
self.assertIsInstance(path, str)
# https://travis-ci.org/giampaolo/psutil/jobs/440073249
@unittest.skipIf(PYPY and TRAVIS, "unreliable on PYPY + TRAVIS")
@unittest.skipIf(MACOS and TRAVIS, "unreliable on TRAVIS") # TODO
@unittest.skipIf(ASCII_FS, "ASCII fs")
@unittest.skipIf(not subprocess_supports_unicode(TESTFN_UNICODE),
"subprocess can't deal with unicode")
class TestFSAPIs(_BaseFSAPIsTests, unittest.TestCase):
"""Test FS APIs with a funky, valid, UTF8 path name."""
funky_name = TESTFN_UNICODE
@classmethod
def expect_exact_path_match(cls):
# Do not expect psutil to correctly handle unicode paths on
# Python 2 if os.listdir() is not able either.
if PY3:
return True
else:
here = '.' if isinstance(cls.funky_name, str) else u('.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cls.funky_name in os.listdir(here)
@unittest.skipIf(PYPY and TRAVIS, "unreliable on PYPY + TRAVIS")
@unittest.skipIf(MACOS and TRAVIS, "unreliable on TRAVIS") # TODO
@unittest.skipIf(not subprocess_supports_unicode(INVALID_NAME),
"subprocess can't deal with invalid unicode")
class TestFSAPIsWithInvalidPath(_BaseFSAPIsTests, unittest.TestCase):
"""Test FS APIs with a funky, invalid path name."""
funky_name = INVALID_NAME
@classmethod
def expect_exact_path_match(cls):
# Invalid unicode names are supposed to work on Python 2.
return True
@unittest.skipIf(not WINDOWS, "WINDOWS only")
class TestWinProcessName(unittest.TestCase):
def test_name_type(self):
# On Windows name() is determined from exe() first, because
# it's faster; we want to overcome the internal optimization
# and test name() instead of exe().
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as m:
self.assertIsInstance(psutil.Process().name(), str)
assert m.called
# ===================================================================
# Non fs APIs
# ===================================================================
class TestNonFSAPIS(unittest.TestCase):
"""Unicode tests for non fs-related APIs."""
def tearDown(self):
reap_children()
@unittest.skipIf(not HAS_ENVIRON, "not supported")
def test_proc_environ(self):
# Note: differently from others, this test does not deal
# with fs paths. On Python 2 subprocess module is broken as
# it's not able to handle with non-ASCII env vars, so
# we use "è", which is part of the extended ASCII table
# (unicode point <= 255).
env = os.environ.copy()
funky_str = TESTFN_UNICODE if PY3 else 'è'
env['FUNNY_ARG'] = funky_str
sproc = get_test_subprocess(env=env)
p = psutil.Process(sproc.pid)
env = p.environ()
for k, v in env.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
self.assertEqual(env['FUNNY_ARG'], funky_str)
if __name__ == '__main__':
from psutil.tests.runner import run
run(__file__)
|
# Copyright (c) 2010-2013, 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2012-2014 Mark D. Hill and David A. Wood
# Copyright (c) 2009-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Brad Beckmann
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal, warn
from m5.util.fdthelper import *
addToPath('../')
from ruby import Ruby
from common.FSConfig import *
from common.SysPaths import *
from common.Benchmarks import *
from common import Simulation
from common import CacheConfig
from common import MemConfig
from common import CpuConfig
from common.Caches import *
from common import Options
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def cmd_line_template():
if options.command_line and options.command_line_file:
print "Error: --command-line and --command-line-file are " \
"mutually exclusive"
sys.exit(1)
if options.command_line:
return options.command_line
if options.command_line_file:
return open(options.command_line_file).read().strip()
return None
def build_test_system(np):
cmdline = cmd_line_template()
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby,
cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0],
options.ruby, cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type,
options.num_cpus, bm[0], options.dtb_filename,
bare_metal=options.bare_metal,
cmdline=cmdline,
ignore_dtb=options.generate_dtb,
external_memory=
options.external_memory_system,
ruby=options.ruby,
security=options.enable_security_extensions)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Set the cache line size for the entire system
test_sys.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = test_sys.voltage_domain)
# Create a CPU voltage domain
test_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
test_sys.cpu_voltage_domain)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
if options.lpae:
test_sys.have_lpae = True
if options.virtualisation:
test_sys.have_virtualization = True
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
for i in xrange(np)]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.kvm_vm = KvmVM()
if options.ruby:
Ruby.create_system(options, True, test_sys, test_sys.iobus,
test_sys._dma_ports)
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = test_sys.voltage_domain)
# Connect the ruby io port to the PIO bus,
# assuming that there is just one such port.
test_sys.iobus.master = test_sys.ruby._io_port.slave
for (i, cpu) in enumerate(test_sys.cpu):
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.clk_domain = test_sys.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] in ("x86", "arm"):
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] in "x86":
cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master
cpu.interrupts[0].int_master = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts[0].int_slave = test_sys.ruby._cpu_ports[i].master
else:
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
elif not options.external_memory_system:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.simpoint_profile:
test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
# If elastic tracing is enabled when not restoring from checkpoint and
# when not fast forwarding using the atomic cpu, then check that the
# TestCPUClass is DerivO3CPU or inherits from DerivO3CPU. If the check
# passes then attach the elastic trace probe.
# If restoring from checkpoint or fast forwarding, the code that does this for
# FutureCPUClass is in the Simulation module. If the check passes then the
# elastic trace probe is attached to the switch CPUs.
if options.elastic_trace_en and options.checkpoint_restore == None and \
not options.fast_forward:
CpuConfig.config_etrace(TestCPUClass, test_sys.cpu, options)
CacheConfig.config_cache(options, test_sys)
MemConfig.config_mem(options, test_sys)
return test_sys
def build_drive_system(np):
# driver system CPU is always simple, so is the memory
# Note this is an assignment of a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
DriveMemClass = SimpleMemory
cmdline = cmd_line_template()
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1],
cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, np,
bm[1], options.dtb_filename, cmdline=cmdline,
ignore_dtb=options.generate_dtb)
# Create a top-level voltage domain
drive_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = drive_sys.voltage_domain)
# Create a CPU voltage domain
drive_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
drive_sys.cpu_voltage_domain)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.kvm_vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
# Create the appropriate memory controllers and connect them to the
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
return drive_sys
# Add options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
# Add the ruby specific and protocol specific options
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
# Match the memories with the CPUs, based on the options for the test system
TestMemClass = Simulation.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type),
SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type)]
else:
bm = [SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type)]
np = options.num_cpus
test_sys = build_test_system(np)
if len(bm) == 2:
drive_sys = build_drive_system(np)
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1 and options.dist:
# This system is part of a dist-gem5 simulation
root = makeDistRoot(test_sys,
options.dist_rank,
options.dist_size,
options.dist_server_name,
options.dist_server_port,
options.dist_sync_repeat,
options.dist_sync_start,
options.ethernet_linkspeed,
options.ethernet_linkdelay,
options.etherdump);
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
if buildEnv['TARGET_ISA'] == "arm" and options.generate_dtb:
# Sanity checks
if options.dtb_filename:
fatal("--generate-dtb and --dtb-filename cannot be specified at the"\
"same time.")
if options.machine_type not in ["VExpress_GEM5", "VExpress_GEM5_V1"]:
warn("Can only correctly generate a dtb for VExpress_GEM5_V1 " \
"platforms, unless custom hardware models have been equipped "\
"with generation functionality.")
# Generate a Device Tree
def create_dtb_for_system(system, filename):
state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1)
rootNode = system.generateDeviceTree(state)
fdt = Fdt()
fdt.add_rootnode(rootNode)
dtb_filename = os.path.join(m5.options.outdir, filename)
return fdt.writeDtbFile(dtb_filename)
for sysname in ('system', 'testsys', 'drivesys'):
if hasattr(root, sysname):
sys = getattr(root, sysname)
sys.dtb_filename = create_dtb_for_system(sys, '%s.dtb' % sysname)
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
|
import random
import math
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from django.contrib.auth.decorators import permission_required
from django.db.models import Q
from django.shortcuts import redirect, reverse
from django.utils import timezone
from mittab.apps.tab.helpers import redirect_and_flash_error, \
redirect_and_flash_success
from mittab.apps.tab.models import *
from mittab.libs.errors import *
from mittab.apps.tab.forms import OutroundResultEntryForm
import mittab.libs.tab_logic as tab_logic
import mittab.libs.outround_tab_logic as outround_tab_logic
from mittab.libs.outround_tab_logic import offset_to_quotient
import mittab.libs.backup as backup
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def pair_next_outround(request, num_teams, type_of_round):
if request.method == "POST":
backup.backup_round("before_pairing_%s_%s" %
(num_teams / 2, type_of_round))
Outround.objects.filter(num_teams__lt=num_teams,
type_of_round=type_of_round).delete()
outround_tab_logic.pair(type_of_round)
return redirect_and_flash_success(
request, "Success!", path=reverse("outround_pairing_view",
kwargs={
"num_teams": int(num_teams / 2),
"type_of_round": type_of_round
}))
# See if we can pair the round
title = "Pairing Outrounds"
current_round_number = 0
previous_round_number = TabSettings.get("tot_rounds", 5)
check_status = []
judges = outround_tab_logic.have_enough_judges_type(type_of_round)
rooms = outround_tab_logic.have_enough_rooms_type(type_of_round)
msg = "Enough judges checked in for Out-rounds? Need {0}, have {1}".format(
judges[1][1], judges[1][0])
if num_teams <= 2:
check_status.append(("Have more rounds?", "No", "Not enough teams"))
else:
check_status.append(("Have more rounds?", "Yes", "Have enough teams!"))
if judges[0]:
check_status.append((msg, "Yes", "Judges are checked in"))
else:
check_status.append((msg, "No", "Not enough judges"))
msg = "N/2 Rooms available Round Out-rounds? Need {0}, have {1}".format(
rooms[1][1], rooms[1][0])
if rooms[0]:
check_status.append((msg, "Yes", "Rooms are checked in"))
else:
check_status.append((msg, "No", "Not enough rooms"))
round_label = "[%s] Ro%s" % ("N" if type_of_round else "V",
num_teams)
msg = "All Rounds properly entered for Round %s" % (
round_label)
ready_to_pair = "Yes"
ready_to_pair_alt = "Checks passed!"
try:
outround_tab_logic.have_properly_entered_data(num_teams, type_of_round)
check_status.append((msg, "Yes", "All rounds look good"))
except PrevRoundNotEnteredError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "Not all rounds are entered. %s" % str(e)))
return render(request, "pairing/pair_round.html", locals())
def get_outround_options(var_teams_to_break,
nov_teams_to_break):
outround_options = []
while not math.log(var_teams_to_break, 2) % 1 == 0:
var_teams_to_break += 1
while not math.log(nov_teams_to_break, 2) % 1 == 0:
nov_teams_to_break += 1
while var_teams_to_break > 1:
if Outround.objects.filter(type_of_round=BreakingTeam.VARSITY,
num_teams=var_teams_to_break).exists():
outround_options.append(
(reverse("outround_pairing_view", kwargs={
"type_of_round": BreakingTeam.VARSITY,
"num_teams": int(var_teams_to_break)}),
"[V] Ro%s" % (int(var_teams_to_break),))
)
var_teams_to_break /= 2
while nov_teams_to_break > 1:
if Outround.objects.filter(type_of_round=BreakingTeam.NOVICE,
num_teams=nov_teams_to_break).exists():
outround_options.append(
(reverse("outround_pairing_view", kwargs={
"type_of_round": BreakingTeam.NOVICE,
"num_teams": int(nov_teams_to_break)}),
"[N] Ro%s" % (int(nov_teams_to_break),))
)
nov_teams_to_break /= 2
return outround_options
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def break_teams(request):
if request.method == "POST":
# Perform the break
backup.backup_round("before_the_break_%s" % (timezone.now().strftime("%H:%M"),))
success, msg = outround_tab_logic.perform_the_break()
if success:
return redirect_and_flash_success(
request, msg, path="/outround_pairing"
)
return redirect_and_flash_error(
request, msg, path="/"
)
# See if we can pair the round
title = "Pairing Outrounds"
current_round_number = 0
previous_round_number = TabSettings.get("tot_rounds", 5)
check_status = []
msg = "All Rounds properly entered for Round %s" % (
previous_round_number)
ready_to_pair = "Yes"
ready_to_pair_alt = "Checks passed!"
try:
tab_logic.have_properly_entered_data(current_round_number)
check_status.append((msg, "Yes", "All rounds look good"))
except PrevRoundNotEnteredError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "Not all rounds are entered. %s" % str(e)))
except ByeAssignmentError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "You have a bye and results. %s" % str(e)))
except NoShowAssignmentError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "You have a noshow and results. %s" % str(e)))
rooms = outround_tab_logic.have_enough_rooms_before_break()
msg = "N/2 Rooms available Round Out-rounds? Need {0}, have {1}".format(
rooms[1][1], rooms[1][0])
if rooms[0]:
check_status.append((msg, "Yes", "Rooms are checked in"))
else:
check_status.append((msg, "No", "Not enough rooms"))
return render(request, "pairing/pair_round.html", locals())
def outround_pairing_view(request,
type_of_round=BreakingTeam.VARSITY,
num_teams=None):
choice = TabSettings.get("choice", 0)
if num_teams is None:
num_teams = TabSettings.get("var_teams_to_break", 8)
while not math.log(num_teams, 2) % 1 == 0:
num_teams += 1
return redirect("outround_pairing_view",
type_of_round=BreakingTeam.VARSITY,
num_teams=num_teams)
pairing_released = False
if type_of_round == BreakingTeam.VARSITY:
pairing_released = TabSettings.get("var_teams_visible", 256) <= num_teams
elif type_of_round == BreakingTeam.NOVICE:
pairing_released = TabSettings.get("nov_teams_visible", 256) <= num_teams
label = "[%s] Ro%s" % ("V" if type_of_round == BreakingTeam.VARSITY else "N",
num_teams)
nov_teams_to_break = TabSettings.get("nov_teams_to_break")
var_teams_to_break = TabSettings.get("var_teams_to_break")
if not nov_teams_to_break or not var_teams_to_break:
return redirect_and_flash_error(request,
"Please check your break tab settings",
path="/")
outround_options = get_outround_options(var_teams_to_break,
nov_teams_to_break)
outrounds = Outround.objects.filter(type_of_round=type_of_round,
num_teams=num_teams).all()
judges_per_panel = TabSettings.get("var_panel_size", 3) \
if type_of_round == BreakingTeam.VARSITY \
else TabSettings.get("nov_panel_size", 3)
judge_slots = [i for i in range(1, judges_per_panel + 1)]
var_to_nov = TabSettings.get("var_to_nov", 2)
var_to_nov = offset_to_quotient(var_to_nov)
other_round_num = num_teams / var_to_nov
if type_of_round == BreakingTeam.NOVICE:
other_round_num = num_teams * var_to_nov
other_round_type = BreakingTeam.VARSITY \
if type_of_round == BreakingTeam.NOVICE \
else BreakingTeam.NOVICE
pairing_exists = len(outrounds) > 0
lost_outrounds = [t.loser.id for t in Outround.objects.all() if t.loser]
excluded_teams = BreakingTeam.objects.filter(
type_of_team=type_of_round
).exclude(
team__id__in=lost_outrounds
)
excluded_teams = [t.team for t in excluded_teams]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=num_teams,
gov_team=t
).exists()]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=num_teams,
opp_team=t
).exists()]
excluded_judges = Judge.objects.exclude(
judges_outrounds__num_teams=num_teams,
judges_outrounds__type_of_round=type_of_round,
).exclude(
judges_outrounds__type_of_round=other_round_type,
judges_outrounds__num_teams=other_round_num
).filter(
checkin__round_number=0
)
non_checkins = Judge.objects.exclude(
judges_outrounds__num_teams=num_teams,
judges_outrounds__type_of_round=type_of_round
).exclude(
judges_outrounds__type_of_round=other_round_type,
judges_outrounds__num_teams=other_round_num
).exclude(
checkin__round_number=0
)
available_rooms = Room.objects.exclude(
rooms_outrounds__num_teams=num_teams,
rooms_outrounds__type_of_round=type_of_round
).exclude(
rooms_outrounds__num_teams=other_round_num,
rooms_outrounds__type_of_round=other_round_type
)
checked_in_rooms = [r.room for r in RoomCheckIn.objects.filter(round_number=0)]
available_rooms = [r for r in available_rooms if r in checked_in_rooms]
size = max(list(
map(
len,
[excluded_teams, excluded_judges, non_checkins, available_rooms]
)))
# The minimum rank you want to warn on
warning = 5
excluded_people = list(
zip(*[
x + [""] * (size - len(x)) for x in [
list(excluded_teams),
list(excluded_judges),
list(non_checkins),
list(available_rooms)
]
]))
return render(request,
"outrounds/pairing_base.html",
locals())
def alternative_judges(request, round_id, judge_id=None):
round_obj = Outround.objects.get(id=int(round_id))
round_gov, round_opp = round_obj.gov_team, round_obj.opp_team
# All of these variables are for the convenience of the template
try:
current_judge_id = int(judge_id)
current_judge_obj = Judge.objects.get(id=current_judge_id)
current_judge_name = current_judge_obj.name
current_judge_rank = current_judge_obj.rank
except TypeError:
current_judge_id, current_judge_obj, current_judge_rank = "", "", ""
current_judge_name = "No judge"
var_to_nov = TabSettings.get("var_to_nov", 2)
var_to_nov = offset_to_quotient(var_to_nov)
other_round_num = round_obj.num_teams / var_to_nov
if round_obj.type_of_round == BreakingTeam.NOVICE:
other_round_num = round_obj.num_teams * var_to_nov
other_round_type = BreakingTeam.NOVICE \
if round_obj.type_of_round == BreakingTeam.VARSITY \
else BreakingTeam.VARSITY
excluded_judges = Judge.objects.exclude(
judges_outrounds__num_teams=round_obj.num_teams,
judges_outrounds__type_of_round=round_obj.type_of_round
).exclude(
judges_outrounds__num_teams=other_round_num,
judges_outrounds__type_of_round=other_round_type
).filter(
checkin__round_number=0
)
query = Q(
judges_outrounds__num_teams=round_obj.num_teams,
judges_outrounds__type_of_round=round_obj.type_of_round
)
query = query | Q(
judges_outrounds__num_teams=other_round_num,
judges_outrounds__type_of_round=other_round_type
)
included_judges = Judge.objects.filter(query) \
.filter(checkin__round_number=0) \
.distinct()
def can_judge(judge, team1, team2):
query = Q(judge=judge, team=team1) | Q(judge=judge, team=team2)
return not Scratch.objects.filter(query).exists()
excluded_judges = [(j.name, j.id, float(j.rank))
for j in excluded_judges if can_judge(j, round_gov, round_opp)]
included_judges = [(j.name, j.id, float(j.rank))
for j in included_judges if can_judge(j, round_gov, round_opp)]
included_judges = sorted(included_judges, key=lambda x: -x[2])
excluded_judges = sorted(excluded_judges, key=lambda x: -x[2])
return render(request, "pairing/judge_dropdown.html", locals())
def alternative_teams(request, round_id, current_team_id, position):
round_obj = Outround.objects.get(pk=round_id)
current_team = Team.objects.get(pk=current_team_id)
breaking_teams_by_type = [t.team.id
for t in BreakingTeam.objects.filter(
type_of_team=current_team.breaking_team.type_of_team
)]
excluded_teams = Team.objects.filter(
id__in=breaking_teams_by_type
).exclude(
gov_team_outround__num_teams=round_obj.num_teams
).exclude(
opp_team_outround__num_teams=round_obj.num_teams
).exclude(pk=current_team_id)
included_teams = Team.objects.filter(
id__in=breaking_teams_by_type
).exclude(
pk__in=excluded_teams
)
return render(request, "pairing/team_dropdown.html", locals())
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def assign_team(request, round_id, position, team_id):
try:
round_obj = Outround.objects.get(id=int(round_id))
team_obj = Team.objects.get(id=int(team_id))
if position.lower() == "gov":
round_obj.gov_team = team_obj
elif position.lower() == "opp":
round_obj.opp_team = team_obj
else:
raise ValueError("Got invalid position: " + position)
round_obj.save()
data = {
"success": True,
"team": {
"id": team_obj.id,
"name": team_obj.name
},
}
except Exception:
emit_current_exception()
data = {"success": False}
return JsonResponse(data)
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def assign_judge(request, round_id, judge_id, remove_id=None):
try:
round_obj = Outround.objects.get(id=int(round_id))
judge_obj = Judge.objects.get(id=int(judge_id))
round_obj.judges.add(judge_obj)
if remove_id is not None:
remove_obj = Judge.objects.get(id=int(remove_id))
round_obj.judges.remove(remove_obj)
if remove_obj == round_obj.chair:
round_obj.chair = round_obj.judges.order_by("-rank").first()
elif not round_obj.chair:
round_obj.chair = judge_obj
round_obj.save()
data = {
"success": True,
"chair_id": round_obj.chair.id,
"round_id": round_obj.id,
"judge_name": judge_obj.name,
"judge_rank": float(judge_obj.rank),
"judge_id": judge_obj.id
}
except Exception:
emit_current_exception()
data = {"success": False}
return JsonResponse(data)
def enter_result(request,
round_id,
form_class=OutroundResultEntryForm):
round_obj = Outround.objects.get(id=round_id)
redirect_to = reverse("outround_pairing_view",
kwargs={
"num_teams": round_obj.num_teams,
"type_of_round": round_obj.type_of_round
})
if request.method == "POST":
form = form_class(request.POST, round_instance=round_obj)
if form.is_valid():
try:
form.save()
except ValueError:
return redirect_and_flash_error(
request, "Invalid round result, could not remedy.")
return redirect_and_flash_success(request,
"Result entered successfully",
path=redirect_to)
else:
form_kwargs = {"round_instance": round_obj}
form = form_class(**form_kwargs)
return render(
request, "outrounds/ballot.html", {
"form": form,
"title": "Entering Ballot for {}".format(round_obj),
"gov_team": round_obj.gov_team,
"opp_team": round_obj.opp_team,
})
def pretty_pair(request, type_of_round=BreakingTeam.VARSITY, printable=False):
gov_opp_display = TabSettings.get("gov_opp_display", 0)
round_number = 256
if type_of_round == BreakingTeam.VARSITY:
round_number = TabSettings.get("var_teams_visible", 256)
else:
round_number = TabSettings.get("nov_teams_visible", 256)
round_pairing = Outround.objects.filter(
num_teams__gte=round_number,
type_of_round=type_of_round
)
unique_values = round_pairing.values_list("num_teams")
unique_values = list(set([value[0] for value in unique_values]))
unique_values.sort(key=lambda v: v, reverse=True)
outround_pairings = []
for value in unique_values:
lost_outrounds = [t.loser.id for t in Outround.objects.all() if t.loser]
excluded_teams = BreakingTeam.objects.filter(
type_of_team=type_of_round
).exclude(
team__id__in=lost_outrounds
)
excluded_teams = [t.team for t in excluded_teams]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=value,
gov_team=t
).exists()]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=value,
opp_team=t
).exists()]
outround_pairings.append({
"label": "[%s] Ro%s" % ("N" if type_of_round else "V", value),
"rounds": Outround.objects.filter(num_teams=value,
type_of_round=type_of_round),
"excluded": excluded_teams
})
label = "%s Outrounds Pairings" % ("Novice" if type_of_round else "Varsity",)
round_pairing = list(round_pairing)
#We want a random looking, but constant ordering of the rounds
random.seed(0xBEEF)
random.shuffle(round_pairing)
round_pairing.sort(key=lambda r: r.gov_team.name)
paired_teams = [team.gov_team for team in round_pairing
] + [team.opp_team for team in round_pairing]
team_count = len(paired_teams)
pairing_exists = True
#pairing_exists = TabSettings.get("pairing_released", 0) == 1
printable = printable
sidelock = TabSettings.get("sidelock", 0)
choice = TabSettings.get("choice", 0)
return render(request, "outrounds/pretty_pairing.html", locals())
def pretty_pair_print(request, type_of_round=BreakingTeam.VARSITY):
return pretty_pair(request, type_of_round, True)
def toggle_pairing_released(request, type_of_round, num_teams):
old = 256
if type_of_round == BreakingTeam.VARSITY:
old = TabSettings.get("var_teams_visible", 256)
if old == num_teams:
TabSettings.set("var_teams_visible", num_teams * 2)
else:
TabSettings.set("var_teams_visible", num_teams)
else:
old = TabSettings.get("nov_teams_visible", 256)
if old == num_teams:
TabSettings.set("nov_teams_visible", num_teams * 2)
else:
TabSettings.set("nov_teams_visible", num_teams)
data = {"success": True, "pairing_released": not old == num_teams}
return JsonResponse(data)
def update_choice(request, outround_id):
outround = get_object_or_404(Outround, pk=outround_id)
outround.choice += 1
if outround.choice == 3:
outround.choice = 0
outround.save()
data = {"success": True,
"data": "%s choice" % (
outround.get_choice_display(),
)}
return JsonResponse(data)
def forum_view(request, type_of_round):
outrounds = Outround.objects.exclude(
victor=Outround.UNKNOWN
).filter(
type_of_round=type_of_round
)
rounds = outrounds.values_list("num_teams")
rounds = [r[0] for r in rounds]
rounds = list(set(rounds))
rounds.sort(key=lambda r: r, reverse=True)
results = []
for _round in rounds:
to_add = {}
to_display = outrounds.filter(num_teams=_round)
to_add["label"] = "[%s] Ro%s" % ("N" if type_of_round else "V", _round)
to_add["results"] = []
for outround in to_display:
to_add["results"] += [
"""[%s] %s (%s, %s) from %s%s (%s) drops to
[%s] %s (%s, %s) from %s%s (%s)""" % (
outround.loser.breaking_team.seed,
outround.loser.display,
outround.loser.debaters.first().name,
outround.loser.debaters.last().name,
outround.loser.school.name,
" / " + outround.loser.hybrid_school.name \
if outround.loser.hybrid_school else "",
"GOV" if outround.loser == outround.gov_team else "OPP",
outround.winner.breaking_team.seed,
outround.winner.display,
outround.winner.debaters.first().name,
outround.winner.debaters.last().name,
outround.winner.school.name,
" / " + outround.winner.hybrid_school.name \
if outround.winner.hybrid_school else "",
"GOV" if outround.winner == outround.gov_team else "OPP",
)
]
results.append(to_add)
return render(request,
"outrounds/forum_result.html",
locals())
|
import sys
import traceback
import uuid
import os
import importlib
import yaml
import subprocess
import cProfile
import io
import pstats
import logging
import asyncio
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class AppManagement:
def __init__(self, ad: AppDaemon, config):
self.AD = ad
self.logger = ad.logging.get_child("_app_management")
self.error = ad.logging.get_error()
self.diag = ad.logging.get_diag()
self.monitored_files = {}
self.filter_files = {}
self.modules = {}
self.objects = {}
self.check_app_updates_profile_stats = None
# Initialize config file tracking
self.app_config_file_modified = 0
self.app_config_files = {}
self.module_dirs = []
self.app_config_file_modified = 0
self.app_config = {}
self.global_module_dependencies = {}
self.app_config_file = config
self.apps_initialized = False
# first declare sensors
self.active_apps_sensor = "sensor.active_apps"
self.inactive_apps_sensor = "sensor.inactive_apps"
self.total_apps_sensor = "sensor.total_apps"
# Add Path for adbase
sys.path.insert(0, os.path.dirname(__file__))
#
# Register App Services
#
self.AD.services.register_service("appdaemon", "app", "start", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "stop", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "restart", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "reload", self.manage_services)
self.active_apps = []
self.inactive_apps = []
self.non_apps = ["global_modules", "sequence"]
async def set_state(self, name, **kwargs):
# not a fully qualified entity name
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
await self.AD.state.set_state("_app_management", "admin", entity_id, _silent=True, **kwargs)
async def get_state(self, name, **kwargs):
# not a fully qualified entity name
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
return await self.AD.state.get_state("_app_management", "admin", entity_id, **kwargs)
async def add_entity(self, name, state, attributes):
# not a fully qualified entity name
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
await self.AD.state.add_entity("admin", entity_id, state, attributes)
async def remove_entity(self, name):
await self.AD.state.remove_entity("admin", "app.{}".format(name))
async def init_admin_stats(self):
# create sensors
await self.add_entity(self.active_apps_sensor, 0, {"friendly_name": "Active Apps"})
await self.add_entity(self.inactive_apps_sensor, 0, {"friendly_name": "Inactive Apps"})
await self.add_entity(self.total_apps_sensor, 0, {"friendly_name": "Total Apps"})
async def terminate(self):
self.logger.debug("terminate() called for app_management")
if self.apps_initialized is True:
await self.check_app_updates(mode="term")
async def dump_objects(self):
self.diag.info("--------------------------------------------------")
self.diag.info("Objects")
self.diag.info("--------------------------------------------------")
for object_ in self.objects.keys():
self.diag.info("%s: %s", object_, self.objects[object_])
self.diag.info("--------------------------------------------------")
async def get_app(self, name):
if name in self.objects:
return self.objects[name]["object"]
else:
return None
def get_app_info(self, name):
if name in self.objects:
return self.objects[name]
else:
return None
async def get_app_instance(self, name, id):
if name in self.objects and self.objects[name]["id"] == id:
return self.AD.app_management.objects[name]["object"]
else:
return None
async def initialize_app(self, name):
if name in self.objects:
init = getattr(self.objects[name]["object"], "initialize", None)
if init is None:
self.logger.warning("Unable to find initialize() function in module %s - skipped", name)
await self.increase_inactive_apps(name)
return
else:
self.logger.warning("Unable to find module %s - initialize() skipped", name)
await self.increase_inactive_apps(name)
return
# Call its initialize function
try:
if asyncio.iscoroutinefunction(init):
await init()
else:
await utils.run_in_executor(self, init)
await self.set_state(name, state="idle")
await self.increase_active_apps(name)
event_data = {"event_type": "app_initialized", "data": {"app": name}}
await self.AD.events.process_event("admin", event_data)
except TypeError:
self.AD.threading.report_callback_sig(name, "initialize", init, {})
except Exception:
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error running initialize() for %s", name)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
await self.set_state(name, state="initialize_error")
await self.increase_inactive_apps(name)
async def terminate_app(self, name):
term = None
if name in self.objects and hasattr(self.objects[name]["object"], "terminate"):
self.logger.info("Calling terminate() for {}".format(name))
# Call terminate directly rather than via worker thread
# so we know terminate has completed before we move on
term = self.objects[name]["object"].terminate
if term is not None:
try:
if asyncio.iscoroutinefunction(term):
await term()
else:
await utils.run_in_executor(self, term)
except TypeError:
self.AD.threading.report_callback_sig(name, "terminate", term, {})
except BaseException:
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error running terminate() for %s", name)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
if name in self.objects:
del self.objects[name]
if name in self.global_module_dependencies:
del self.global_module_dependencies[name]
await self.increase_inactive_apps(name)
await self.AD.callbacks.clear_callbacks(name)
self.AD.futures.cancel_futures(name)
await self.AD.sched.terminate_app(name)
await self.set_state(name, state="terminated")
await self.set_state(name, instancecallbacks=0)
event_data = {"event_type": "app_terminated", "data": {"app": name}}
await self.AD.events.process_event("admin", event_data)
if self.AD.http is not None:
await self.AD.http.terminate_app(name)
async def start_app(self, app):
await self.init_object(app)
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
pass
else:
await self.initialize_app(app)
async def stop_app(self, app):
try:
self.logger.info("Terminating %s", app)
await self.terminate_app(app)
except Exception:
error_logger = logging.getLogger("Error.{}".format(app))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error terminating app: %s:", app)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
async def restart_app(self, app):
await self.stop_app(app)
await self.start_app(app)
def get_app_debug_level(self, app):
if app in self.objects:
return self.AD.logging.get_level_from_int(self.objects[app]["object"].logger.getEffectiveLevel())
else:
return "None"
async def init_object(self, name):
app_args = self.app_config[name]
self.logger.info(
"Initializing app %s using class %s from module %s", name, app_args["class"], app_args["module"],
)
if self.get_file_from_module(app_args["module"]) is not None:
if "pin_thread" in app_args:
if app_args["pin_thread"] < 0 or app_args["pin_thread"] >= self.AD.threading.total_threads:
self.logger.warning(
"pin_thread out of range ({}) in app definition for {} - app will be discarded".format(
app_args["pin_thread"], name
)
)
return
else:
pin = app_args["pin_thread"]
else:
pin = -1
modname = await utils.run_in_executor(self, __import__, app_args["module"])
app_class = getattr(modname, app_args["class"], None)
if app_class is None:
self.logger.warning(
"Unable to find class %s in module %s - '%s' is not initialized",
app_args["class"],
app_args["module"],
name,
)
await self.increase_inactive_apps(name)
else:
self.objects[name] = {
"type": "app",
"object": app_class(
self.AD, name, self.AD.logging, app_args, self.AD.config, self.app_config, self.AD.global_vars,
),
"id": uuid.uuid4().hex,
"pin_app": self.AD.threading.app_should_be_pinned(name),
"pin_thread": pin,
}
else:
self.logger.warning(
"Unable to find module module %s - '%s' is not initialized", app_args["module"], name,
)
await self.increase_inactive_apps(name)
def init_plugin_object(self, name, object):
self.objects[name] = {
"type": "plugin",
"object": object,
"id": uuid.uuid4().hex,
"pin_app": False,
"pin_thread": -1,
}
async def read_config(self): # noqa: C901
new_config = None
if await utils.run_in_executor(self, os.path.isfile, self.app_config_file):
self.logger.warning(
"apps.yaml in the Config directory is deprecated. Please move apps.yaml to the apps directory."
)
new_config = utils.run_in_executor(self.read_config_file, self.app_config_file)
else:
for root, subdirs, files in os.walk(self.AD.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml" and file[0] != ".":
self.logger.debug("Reading %s", os.path.join(root, file))
config = await utils.run_in_executor(self, self.read_config_file, os.path.join(root, file))
valid_apps = {}
if type(config).__name__ == "dict":
for app in config:
if config[app] is not None:
if app == "global_modules":
#
# Check the parameter format for string or list
#
if isinstance(config[app], str):
valid_apps[app] = [config[app]]
elif isinstance(config[app], list):
valid_apps[app] = config[app]
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"global_modules should be a list or a string in File '%s' - ignoring",
file,
)
elif app == "sequence":
#
# We don't care what it looks like just pass it through
#
valid_apps[app] = config[app]
elif (
isinstance(config[app], dict)
and "class" in config[app]
and "module" in config[app]
):
valid_apps[app] = config[app]
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"App '%s' missing 'class' or 'module' entry - ignoring", app,
)
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"File '%s' invalid structure - ignoring", os.path.join(root, file),
)
if new_config is None:
new_config = {}
for app in valid_apps:
if app == "global_modules":
if app in new_config:
new_config[app].extend(valid_apps[app])
continue
if app == "sequence":
if app in new_config:
new_config[app] = {
**new_config[app],
**valid_apps[app],
}
continue
if app in new_config:
self.logger.warning(
"File '%s' duplicate app: %s - ignoring", os.path.join(root, file), app,
)
else:
new_config[app] = valid_apps[app]
await self.AD.sequences.add_sequences(new_config.get("sequence", {}))
return new_config
# Run in executor
def check_later_app_configs(self, last_latest):
if os.path.isfile(self.app_config_file):
ts = os.path.getmtime(self.app_config_file)
return {
"latest": ts,
"files": [{"name": self.app_config_file, "ts": os.path.getmtime(self.app_config_file)}],
}
else:
later_files = {}
app_config_files = []
later_files["files"] = []
later_files["latest"] = last_latest
later_files["deleted"] = []
for root, subdirs, files in os.walk(self.AD.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
path = os.path.join(root, file)
app_config_files.append(path)
ts = os.path.getmtime(path)
if ts > last_latest:
later_files["files"].append(path)
if ts > later_files["latest"]:
later_files["latest"] = ts
for file in self.app_config_files:
if file not in app_config_files:
later_files["deleted"].append(file)
if self.app_config_files != {}:
for file in app_config_files:
if file not in self.app_config_files:
later_files["files"].append(file)
self.app_config_files = app_config_files
return later_files
# Run in executor
def read_config_file(self, file):
new_config = None
try:
with open(file, "r") as yamlfd:
config_file_contents = yamlfd.read()
try:
new_config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
self.logger.warning("Error loading configuration")
if hasattr(exc, "problem_mark"):
if exc.context is not None:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem) + " " + str(exc.context))
else:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem))
return new_config
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error loading config file: %s", file)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
# noinspection PyBroadException
async def check_config(self, silent=False, add_threads=True): # noqa: C901
terminate_apps = {}
initialize_apps = {}
total_apps = len(self.app_config)
try:
latest = await utils.run_in_executor(self, self.check_later_app_configs, self.app_config_file_modified)
self.app_config_file_modified = latest["latest"]
if latest["files"] or latest["deleted"]:
if silent is False:
self.logger.info("Reading config")
new_config = await self.read_config()
if new_config is None:
if silent is False:
self.logger.warning("New config not applied")
return
for file in latest["deleted"]:
if silent is False:
self.logger.info("%s deleted", file)
for file in latest["files"]:
if silent is False:
self.logger.info("%s added or modified", file)
# Check for changes
for name in self.app_config:
if name in self.non_apps:
continue
if name in new_config:
if self.app_config[name] != new_config[name]:
# Something changed, clear and reload
if silent is False:
self.logger.info("App '%s' changed", name)
terminate_apps[name] = 1
initialize_apps[name] = 1
else:
# Section has been deleted, clear it out
if silent is False:
self.logger.info("App '{}' deleted".format(name))
#
# Since the entry has been deleted we can't sensibly determine dependencies
# So just immediately terminate it
#
await self.terminate_app(name)
await self.remove_entity(name)
for name in new_config:
if name in self.non_apps:
continue
if name not in self.app_config:
#
# New section added!
#
if "class" in new_config[name] and "module" in new_config[name]:
self.logger.info("App '%s' added", name)
initialize_apps[name] = 1
await self.add_entity(
name, "loaded", {"totalcallbacks": 0, "instancecallbacks": 0, "args": new_config[name]},
)
elif name in self.non_apps:
pass
else:
if self.AD.invalid_yaml_warnings:
if silent is False:
self.logger.warning(
"App '%s' missing 'class' or 'module' entry - ignoring", name,
)
self.app_config = new_config
total_apps = len(self.app_config)
for name in self.non_apps:
if name in self.app_config:
total_apps -= 1 # remove one
# if silent is False:
self.logger.info("Found %s total apps", total_apps)
await self.set_state(self.total_apps_sensor, state=total_apps)
active_apps = self.get_active_app_count()
inactive_apps = total_apps - active_apps
if inactive_apps > 0:
self.logger.info("Found %s active apps", active_apps)
self.logger.info("Found %s inactive apps", inactive_apps)
# Now we know if we have any new apps we can create new threads if pinning
active_apps = self.get_active_app_count()
if add_threads is True and self.AD.threading.auto_pin is True:
if active_apps > self.AD.threading.thread_count:
for i in range(active_apps - self.AD.threading.thread_count):
await self.AD.threading.add_thread(False, True)
return {
"init": initialize_apps,
"term": terminate_apps,
"total": total_apps,
"active": active_apps,
}
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error:")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def get_active_app_count(self):
c = 0
for name in self.app_config:
if "disable" in self.app_config[name] and self.app_config[name]["disable"] is True:
pass
elif name in self.non_apps:
pass
else:
c += 1
return c
def get_app_from_file(self, file):
module = self.get_module_from_path(file)
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
return app
return None
# noinspection PyBroadException
# Run in executor
def read_app(self, file, reload=False):
name = os.path.basename(file)
module_name = os.path.splitext(name)[0]
# Import the App
if reload:
self.logger.info("Reloading Module: %s", file)
file, ext = os.path.splitext(name)
#
# Reload
#
try:
importlib.reload(self.modules[module_name])
except KeyError:
if name not in sys.modules:
# Probably failed to compile on initial load
# so we need to re-import not reload
self.read_app(file)
else:
# A real KeyError!
raise
else:
app = self.get_app_from_file(file)
if app is not None:
self.logger.info("Loading App Module: %s", file)
if module_name not in self.modules:
self.modules[module_name] = importlib.import_module(module_name)
else:
# We previously imported it so we need to reload to pick up any potential changes
importlib.reload(self.modules[module_name])
elif "global_modules" in self.app_config and module_name in self.app_config["global_modules"]:
self.logger.info("Loading Global Module: %s", file)
self.modules[module_name] = importlib.import_module(module_name)
else:
if self.AD.missing_app_warnings:
self.logger.warning("No app description found for: %s - ignoring", file)
@staticmethod
def get_module_from_path(path):
name = os.path.basename(path)
module_name = os.path.splitext(name)[0]
return module_name
def get_file_from_module(self, mod):
for file in self.monitored_files:
module_name = self.get_module_from_path(file)
if module_name == mod:
return file
return None
# Run in executor
def process_filters(self):
if "filters" in self.AD.config:
for filter in self.AD.config["filters"]:
for root, subdirs, files in os.walk(self.AD.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
ext = filter["input_ext"]
extlen = len(ext) * -1
for file in files:
run = False
if file[extlen:] == ext:
infile = os.path.join(root, file)
modified = os.path.getmtime(infile)
if infile in self.filter_files:
if self.filter_files[infile] < modified:
run = True
else:
self.logger.info("Found new filter file %s", infile)
run = True
if run is True:
self.logger.info("Running filter on %s", infile)
self.filter_files[infile] = modified
# Run the filter
outfile = utils.rreplace(infile, ext, filter["output_ext"], 1)
command_line = filter["command_line"].replace("$1", infile)
command_line = command_line.replace("$2", outfile)
try:
subprocess.Popen(command_line, shell=True)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected running filter on: %s:", infile)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
@staticmethod
def file_in_modules(file, modules):
for mod in modules:
if mod["name"] == file:
return True
return False
@staticmethod
def check_file(file):
fh = open(file)
fh.close()
# @_timeit
async def check_app_updates(self, plugin=None, mode="normal"): # noqa: C901
if self.AD.apps is False:
return
# Lets add some profiling
pr = None
if self.AD.check_app_updates_profile is True:
pr = cProfile.Profile()
pr.enable()
# Process filters
await utils.run_in_executor(self, self.process_filters)
# Get list of apps we need to terminate and/or initialize
apps = await self.check_config()
found_files = []
modules = []
for root, subdirs, files in await utils.run_in_executor(self, os.walk, self.AD.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
if root not in self.module_dirs:
self.logger.info("Adding %s to module import path", root)
sys.path.insert(0, root)
self.module_dirs.append(root)
for file in files:
if file[-3:] == ".py":
found_files.append(os.path.join(root, file))
for file in found_files:
if file == os.path.join(self.AD.app_dir, "__init__.py"):
continue
try:
# check we can actually open the file
await utils.run_in_executor(self, self.check_file, file)
modified = await utils.run_in_executor(self, os.path.getmtime, file)
if file in self.monitored_files:
if self.monitored_files[file] < modified:
modules.append({"name": file, "reload": True})
self.monitored_files[file] = modified
else:
self.logger.debug("Found module %s", file)
modules.append({"name": file, "reload": False})
self.monitored_files[file] = modified
except IOError as err:
self.logger.warning("Unable to read app %s: %s - skipping", file, err)
# Check for deleted modules and add them to the terminate list
deleted_modules = []
for file in self.monitored_files:
if file not in found_files or mode == "term":
deleted_modules.append(file)
self.logger.info("Removing module %s", file)
for file in deleted_modules:
del self.monitored_files[file]
for app in self.apps_per_module(self.get_module_from_path(file)):
apps["term"][app] = 1
# Add any apps we need to reload because of file changes
for module in modules:
for app in self.apps_per_module(self.get_module_from_path(module["name"])):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if "global_modules" in self.app_config:
for gm in utils.single_or_list(self.app_config["global_modules"]):
if gm == self.get_module_from_path(module["name"]):
for app in self.apps_per_global_module(gm):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if plugin is not None:
self.logger.info("Processing restart for %s", plugin)
# This is a restart of one of the plugins so check which apps need to be restarted
for app in self.app_config:
reload = False
if app in self.non_apps:
continue
if "plugin" in self.app_config[app]:
for this_plugin in utils.single_or_list(self.app_config[app]["plugin"]):
if this_plugin == plugin:
# We got a match so do the reload
reload = True
break
elif plugin == "__ALL__":
reload = True
break
else:
# No plugin dependency specified, reload to error on the side of caution
reload = True
if reload is True:
apps["term"][app] = 1
apps["init"][app] = 1
# Terminate apps
if apps is not None and apps["term"]:
prio_apps = self.get_app_deps_and_prios(apps["term"], mode)
for app in sorted(prio_apps, key=prio_apps.get, reverse=True):
await self.stop_app(app)
# Load/reload modules
for mod in modules:
try:
await utils.run_in_executor(self, self.read_app, mod["name"], mod["reload"])
except Exception:
self.error.warning("-" * 60)
self.error.warning("Unexpected error loading module: %s:", mod["name"])
self.error.warning("-" * 60)
self.error.warning(traceback.format_exc())
self.error.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Unexpected error loading module: %s:", mod["name"])
self.logger.warning("Removing associated apps:")
module = self.get_module_from_path(mod["name"])
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
if apps["init"] and app in apps["init"]:
del apps["init"][app]
self.logger.warning("%s", app)
await self.set_state(app, state="compile_error")
if apps is not None and apps["init"]:
prio_apps = self.get_app_deps_and_prios(apps["init"], mode)
# Load Apps
for app in sorted(prio_apps, key=prio_apps.get):
try:
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
self.logger.info("%s is disabled", app)
await self.set_state(app, state="disabled")
await self.increase_inactive_apps(app)
else:
await self.init_object(app)
except Exception:
error_logger = logging.getLogger("Error.{}".format(app))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error initializing app: %s:", app)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
await self.AD.threading.calculate_pin_threads()
# Call initialize() for apps
for app in sorted(prio_apps, key=prio_apps.get):
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
pass
else:
await self.initialize_app(app)
if self.AD.check_app_updates_profile is True:
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.check_app_updates_profile_stats = s.getvalue()
self.apps_initialized = True
def get_app_deps_and_prios(self, applist, mode):
# Build a list of modules and their dependencies
deplist = []
for app in applist:
if app not in deplist:
deplist.append(app)
self.get_dependent_apps(app, deplist)
# Need to gove the topological sort a full list of apps or it will fail
full_list = list(self.app_config.keys())
deps = []
for app in full_list:
dependees = []
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep in self.app_config:
dependees.append(dep)
else:
self.logger.warning("Unable to find app %s in dependencies for %s", dep, app)
self.logger.warning("Ignoring app %s", app)
deps.append((app, dependees))
prio_apps = {}
prio = float(50.1)
try:
for app in self.topological_sort(deps):
if "dependencies" in self.app_config[app] or self.app_has_dependents(app):
prio_apps[app] = prio
prio += float(0.0001)
else:
if mode == "init" and "priority" in self.app_config[app]:
prio_apps[app] = float(self.app_config[app]["priority"])
else:
prio_apps[app] = float(50)
except ValueError:
pass
# now we remove the ones we aren't interested in
final_apps = {}
for app in prio_apps:
if app in deplist:
final_apps[app] = prio_apps[app]
return final_apps
def app_has_dependents(self, name):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep == name:
return True
return False
def get_dependent_apps(self, dependee, deps):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
# print("app= {} dep = {}, dependee = {} deps = {}".format(app, dep, dependee, deps))
if dep == dependee and app not in deps:
deps.append(app)
new_deps = self.get_dependent_apps(app, deps)
if new_deps is not None:
deps.append(new_deps)
def topological_sort(self, source):
pending = [(name, set(deps)) for name, deps in source] # copy deps so we can modify set in-place
emitted = []
while pending:
next_pending = []
next_emitted = []
for entry in pending:
name, deps = entry
deps.difference_update(emitted) # remove deps we emitted last pass
if deps: # still has deps? recheck during next pass
next_pending.append(entry)
else: # no more deps? time to emit
yield name
emitted.append(name) # <-- not required, but helps preserve original ordering
next_emitted.append(name) # remember what we emitted for difference_update() in next pass
if not next_emitted:
# all entries have unmet deps, we have cyclic redundancies
# since we already know all deps are correct
self.logger.warning("Cyclic or missing app dependencies detected")
for pend in next_pending:
deps = ""
for dep in pend[1]:
deps += "{} ".format(dep)
self.logger.warning("%s depends on %s", pend[0], deps)
raise ValueError("cyclic dependency detected")
pending = next_pending
emitted = next_emitted
def apps_per_module(self, module):
apps = []
for app in self.app_config:
if app not in self.non_apps and self.app_config[app]["module"] == module:
apps.append(app)
return apps
def apps_per_global_module(self, module):
apps = []
for app in self.app_config:
if "global_dependencies" in self.app_config[app]:
for gm in utils.single_or_list(self.app_config[app]["global_dependencies"]):
if gm == module:
apps.append(app)
for app, gms in self.global_module_dependencies.items():
for gm in gms:
if gm == module:
apps.append(app)
return apps
async def register_module_dependency(self, name, *modules):
for module in modules:
module_name = None
if isinstance(module, str):
module_name = module
elif isinstance(module, object) and module.__class__.__name__ == "module":
module_name = module.__name__
if (
module_name is not None
and "global_modules" in self.app_config
and module_name in self.app_config["global_modules"]
):
if name not in self.global_module_dependencies:
self.global_module_dependencies[name] = []
if module_name not in self.global_module_dependencies[name]:
self.global_module_dependencies[name].append(module_name)
else:
self.logger.warning(
"Module %s not in global_modules in register_module_dependency() for %s", module_name, name,
)
async def manage_services(self, namespace, domain, service, kwargs):
app = None
if "app" in kwargs:
app = kwargs["app"]
elif service == "reload":
pass
else:
self.logger.warning("App not specified when calling '%s' service. Specify App", service)
return None
if service != "reload" and app not in self.app_config:
self.logger.warning("Specified App '%s' is not a valid App", app)
return None
if service == "start":
await self.start_app(app)
elif service == "stop":
await self.stop_app(app)
elif service == "restart":
await self.restart_app(app)
elif service == "reload":
await self.check_app_updates(mode="init")
async def increase_active_apps(self, name):
if name not in self.active_apps:
self.active_apps.append(name)
if name in self.inactive_apps:
self.inactive_apps.remove(name)
active_apps = len(self.active_apps)
inactive_apps = len(self.inactive_apps)
await self.set_state(self.active_apps_sensor, state=active_apps)
await self.set_state(self.inactive_apps_sensor, state=inactive_apps)
async def increase_inactive_apps(self, name):
if name not in self.inactive_apps:
self.inactive_apps.append(name)
if name in self.active_apps:
self.active_apps.remove(name)
inactive_apps = len(self.inactive_apps)
active_apps = len(self.active_apps)
await self.set_state(self.active_apps_sensor, state=active_apps)
await self.set_state(self.inactive_apps_sensor, state=inactive_apps)
|
from statsmodels.compat.pandas import assert_frame_equal, make_dataframe
from datetime import datetime
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_raises, assert_)
from numpy import array, column_stack
from statsmodels.tsa.filters._utils import pandas_wrapper
from statsmodels.datasets import macrodata
from pandas import DataFrame, date_range, concat
from statsmodels.tsa.filters.api import (bkfilter, hpfilter, cffilter,
convolution_filter, recursive_filter)
def test_bking1d():
# Test Baxter King band-pass filter. Results are taken from Stata
bking_results = array([
7.320813, 2.886914, -6.818976, -13.49436,
-13.27936, -9.405913, -5.691091, -5.133076, -7.273468,
-9.243364, -8.482916, -4.447764, 2.406559, 10.68433,
19.46414, 28.09749, 34.11066, 33.48468, 24.64598, 9.952399,
-4.265528, -12.59471, -13.46714, -9.049501, -3.011248,
.5655082, 2.897976, 7.406077, 14.67959, 18.651, 13.05891,
-2.945415, -24.08659, -41.86147, -48.68383, -43.32689,
-31.66654, -20.38356, -13.76411, -9.978693, -3.7704, 10.27108,
31.02847, 51.87613, 66.93117, 73.51951, 73.4053, 69.17468,
59.8543, 38.23899, -.2604809, -49.0107, -91.1128, -112.1574,
-108.3227, -86.51453, -59.91258, -40.01185, -29.70265,
-22.76396, -13.08037, 1.913622, 20.44045, 37.32873, 46.79802,
51.95937, 59.67393, 70.50803, 81.27311, 83.53191, 67.72536,
33.78039, -6.509092, -37.31579, -46.05207, -29.81496, 1.416417,
28.31503,
32.90134, 8.949259, -35.41895, -84.65775, -124.4288, -144.6036,
-140.2204, -109.2624, -53.6901, 15.07415, 74.44268, 104.0403,
101.0725, 76.58291, 49.27925, 36.15751, 36.48799, 37.60897,
27.75998, 4.216643, -23.20579, -39.33292, -36.6134, -20.90161,
-4.143123, 5.48432, 9.270075, 13.69573, 22.16675, 33.01987,
41.93186, 47.12222, 48.62164, 47.30701, 40.20537, 22.37898,
-7.133002, -43.3339, -78.51229, -101.3684, -105.2179,
-90.97147,
-68.30824, -48.10113, -35.60709, -31.15775, -31.82346,
-32.49278, -28.22499, -14.42852, 10.1827, 36.64189, 49.43468,
38.75517, 6.447761, -33.15883, -62.60446, -72.87829, -66.54629,
-52.61205, -38.06676, -26.19963, -16.51492, -7.007577,
.6125674,
7.866972, 14.8123, 22.52388, 30.65265, 39.47801, 49.05027,
59.02925,
72.88999, 95.08865, 125.8983, 154.4283, 160.7638, 130.6092,
67.84406, -7.070272, -68.08128, -99.39944, -104.911,
-100.2372, -98.11596, -104.2051, -114.0125, -113.3475,
-92.98669, -51.91707, -.7313812, 43.22938, 64.62762, 64.07226,
59.35707, 67.06026, 91.87247, 124.4591, 151.2402, 163.0648,
154.6432])
X = macrodata.load_pandas().data['realinv'].values
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y, bking_results, 4)
def test_bking2d():
# Test Baxter-King band-pass filter with 2d input
bking_results = array([
[7.320813, -.0374475], [2.886914, -.0430094],
[-6.818976, -.053456], [-13.49436, -.0620739], [-13.27936, -.0626929],
[-9.405913, -.0603022], [-5.691091, -.0630016], [-5.133076, -.0832268],
[-7.273468, -.1186448], [-9.243364, -.1619868], [-8.482916, -.2116604],
[-4.447764, -.2670747], [2.406559, -.3209931], [10.68433, -.3583075],
[19.46414, -.3626742], [28.09749, -.3294618], [34.11066, -.2773388],
[33.48468, -.2436127], [24.64598, -.2605531], [9.952399, -.3305166],
[-4.265528, -.4275561], [-12.59471, -.5076068], [-13.46714, -.537573],
[-9.049501, -.5205845], [-3.011248, -.481673], [.5655082, -.4403994],
[2.897976, -.4039957], [7.406077, -.3537394], [14.67959, -.2687359],
[18.651, -.1459743], [13.05891, .0014926], [-2.945415, .1424277],
[-24.08659, .2451936], [-41.86147, .288541], [-48.68383, .2727282],
[-43.32689, .1959127], [-31.66654, .0644874], [-20.38356, -.1158372],
[-13.76411, -.3518627], [-9.978693, -.6557535], [-3.7704, -1.003754],
[10.27108, -1.341632], [31.02847, -1.614486], [51.87613, -1.779089],
[66.93117, -1.807459], [73.51951, -1.679688], [73.4053, -1.401012],
[69.17468, -.9954996], [59.8543, -.511261], [38.23899, -.0146745],
[-.2604809, .4261311], [-49.0107, .7452514], [-91.1128, .8879492],
[-112.1574, .8282748], [-108.3227, .5851508], [-86.51453, .2351699],
[-59.91258, -.1208998], [-40.01185, -.4297895], [-29.70265, -.6821963],
[-22.76396, -.9234254], [-13.08037, -1.217539], [1.913622, -1.57367],
[20.44045, -1.927008], [37.32873, -2.229565], [46.79802, -2.463154],
[51.95937, -2.614697], [59.67393, -2.681357], [70.50803, -2.609654],
[81.27311, -2.301618], [83.53191, -1.720974], [67.72536, -.9837123],
[33.78039, -.2261613], [-6.509092, .4546985], [-37.31579, 1.005751],
[-46.05207, 1.457224], [-29.81496, 1.870815], [1.416417, 2.263313],
[28.31503, 2.599906], [32.90134, 2.812282], [8.949259, 2.83358],
[-35.41895, 2.632667], [-84.65775, 2.201077], [-124.4288, 1.598951],
[-144.6036, .9504762], [-140.2204, .4187932], [-109.2624, .1646726],
[-53.6901, .2034265], [15.07415, .398165], [74.44268, .5427476],
[104.0403, .5454975], [101.0725, .4723354], [76.58291, .4626823],
[49.27925, .5840143], [36.15751, .7187981], [36.48799, .6058422],
[37.60897, .1221227], [27.75998, -.5891272], [4.216643, -1.249841],
[-23.20579, -1.594972], [-39.33292, -1.545968], [-36.6134, -1.275494],
[-20.90161, -1.035783], [-4.143123, -.9971732], [5.48432, -1.154264],
[9.270075, -1.29987], [13.69573, -1.240559], [22.16675, -.9662656],
[33.01987, -.6420301], [41.93186, -.4698712], [47.12222, -.4527797],
[48.62164, -.4407153], [47.30701, -.2416076], [40.20537, .2317583],
[22.37898, .8710276], [-7.133002, 1.426177], [-43.3339, 1.652785],
[-78.51229, 1.488021], [-101.3684, 1.072096], [-105.2179, .6496446],
[-90.97147, .4193682], [-68.30824, .41847], [-48.10113, .5253419],
[-35.60709, .595076], [-31.15775, .5509905], [-31.82346, .3755519],
[-32.49278, .1297979], [-28.22499, -.0916165], [-14.42852, -.2531037],
[10.1827, -.3220784], [36.64189, -.2660561], [49.43468, -.1358522],
[38.75517, -.0279508], [6.447761, .0168735], [-33.15883, .0315687],
[-62.60446, .0819507], [-72.87829, .2274033], [-66.54629, .4641401],
[-52.61205, .7211093], [-38.06676, .907773], [-26.19963, .9387103],
[-16.51492, .7940786], [-7.007577, .5026631], [.6125674, .1224996],
[7.866972, -.2714422], [14.8123, -.6273921], [22.52388, -.9124271],
[30.65265, -1.108861], [39.47801, -1.199206], [49.05027, -1.19908],
[59.02925, -1.139046], [72.88999, -.9775021], [95.08865, -.6592603],
[125.8983, -.1609712], [154.4283, .4796201], [160.7638, 1.100565],
[130.6092, 1.447148], [67.84406, 1.359608], [-7.070272, .8931825],
[-68.08128, .2619787], [-99.39944, -.252208], [-104.911, -.4703874],
[-100.2372, -.4430657], [-98.11596, -.390683], [-104.2051, -.5647846],
[-114.0125, -.9397582], [-113.3475, -1.341633], [-92.98669, -1.567337],
[-51.91707, -1.504943], [-.7313812, -1.30576], [43.22938, -1.17151],
[64.62762, -1.136151], [64.07226, -1.050555], [59.35707, -.7308369],
[67.06026, -.1766731], [91.87247, .3898467], [124.4591, .8135461],
[151.2402, .9644226], [163.0648, .6865934], [154.6432, .0115685]])
mdata = macrodata.load_pandas()
X = mdata.data[['realinv', 'cpi']].values.astype(float)
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y, bking_results, 4)
def test_hpfilter():
# Test Hodrick-Prescott Filter. Results taken from Stata.
hpfilt_res = array([
[3.951191484487844718e+01, 2.670837085155121713e+03],
[8.008853245681075350e+01, 2.698712467543189177e+03],
[4.887545512195401898e+01, 2.726612544878045810e+03],
[3.059193256079834100e+01, 2.754612067439201837e+03],
[6.488266733421960453e+01, 2.782816332665780465e+03],
[2.304024204546703913e+01, 2.811349757954532834e+03],
[-1.355312369487364776e+00, 2.840377312369487299e+03],
[-6.746236512580753697e+01, 2.870078365125807522e+03],
[-8.136743836853429457e+01, 2.900631438368534418e+03],
[-6.016789026443257171e+01, 2.932172890264432681e+03],
[-4.636922433138215638e+01, 2.964788224331382025e+03],
[-2.069533915570400495e+01, 2.998525339155703932e+03],
[-2.162152558595607843e+00, 3.033403152558595593e+03],
[-4.718647774311648391e+00, 3.069427647774311481e+03],
[-1.355645669169007306e+01, 3.106603456691690099e+03],
[-4.436926204475639679e+01, 3.144932262044756499e+03],
[-4.332027378211660107e+01, 3.184407273782116590e+03],
[-4.454697106352068658e+01, 3.224993971063520803e+03],
[-2.629875787765286077e+01, 3.266630757877652741e+03],
[-4.426119635629265758e+01, 3.309228196356292756e+03],
[-1.443441190762496262e+01, 3.352680411907625057e+03],
[-2.026686669186437939e+01, 3.396853866691864368e+03],
[-1.913700136208899494e+01, 3.441606001362089046e+03],
[-5.482458977940950717e+01, 3.486781589779409387e+03],
[-1.596244517937793717e+01, 3.532213445179378141e+03],
[-1.374011542874541192e+01, 3.577700115428745448e+03],
[1.325482813403914406e+01, 3.623030171865960710e+03],
[5.603040174253828809e+01, 3.667983598257461836e+03],
[1.030743373627105939e+02, 3.712348662637289181e+03],
[7.217534795943993231e+01, 3.755948652040559864e+03],
[5.462972503693208637e+01, 3.798671274963067845e+03],
[4.407065050666142270e+01, 3.840449349493338559e+03],
[3.749016270204992907e+01, 3.881249837297949853e+03],
[-1.511244199923112319e+00, 3.921067244199923152e+03],
[-9.093507374079763395e+00, 3.959919507374079785e+03],
[-1.685361946760258434e+01, 3.997823619467602384e+03],
[2.822211031434289907e+01, 4.034790889685657021e+03],
[6.117590627896424849e+01, 4.070822093721035344e+03],
[5.433135391434370831e+01, 4.105935646085656117e+03],
[3.810480376716623141e+01, 4.140188196232833434e+03],
[7.042964928802848590e+01, 4.173670350711971878e+03],
[4.996346842507591646e+01, 4.206496531574924120e+03],
[4.455282059571254649e+01, 4.238825179404287155e+03],
[-7.584961950576143863e+00, 4.270845961950576566e+03],
[-4.620339247697120300e+01, 4.302776392476971523e+03],
[-7.054024364552969928e+01, 4.334829243645529459e+03],
[-6.492941099801464588e+01, 4.367188410998014660e+03],
[-1.433567024239555394e+02, 4.399993702423955256e+03],
[-5.932834493089012540e+01, 4.433344344930889747e+03],
[-6.842096758743628016e+01, 4.467249967587436004e+03],
[-6.774011924654860195e+01, 4.501683119246548813e+03],
[-9.030958565658056614e+01, 4.536573585656580690e+03],
[-4.603981499136807543e+01, 4.571808814991368308e+03],
[2.588118806672991923e+01, 4.607219811933269739e+03],
[3.489419371912299539e+01, 4.642608806280876706e+03],
[7.675179642495095322e+01, 4.677794203575049323e+03],
[1.635497817724171910e+02, 4.712616218227582976e+03],
[1.856079654765617306e+02, 4.746963034523438182e+03],
[1.254269446392718237e+02, 4.780825055360728584e+03],
[1.387413113837174024e+02, 4.814308688616282780e+03],
[6.201826599282230745e+01, 4.847598734007177882e+03],
[4.122129542972197669e+01, 4.880966704570278125e+03],
[-4.120287475842360436e+01, 4.914722874758424041e+03],
[-9.486328233441963675e+01, 4.949203282334419782e+03],
[-1.894232132641573116e+02, 4.984718213264157384e+03],
[-1.895766639620087517e+02, 5.021518663962008759e+03],
[-1.464092413342650616e+02, 5.059737241334265491e+03],
[-1.218770668721217589e+02, 5.099388066872122181e+03],
[-4.973075629078175552e+01, 5.140393756290781312e+03],
[-5.365375213897277717e+01, 5.182600752138972894e+03],
[-7.175241524251214287e+01, 5.225824415242512259e+03],
[-7.834757283225462743e+01, 5.269846572832254424e+03],
[-6.264220687943907251e+01, 5.314404206879438789e+03],
[-3.054332122210325906e+00, 5.359185332122210639e+03],
[4.808218808024685131e+01, 5.403838811919753425e+03],
[2.781399326736391231e+00, 5.448011600673263274e+03],
[-2.197570415173231595e+01, 5.491380704151732061e+03],
[1.509441335012807031e+02, 5.533624866498719712e+03],
[1.658909029574851957e+02, 5.574409097042514986e+03],
[2.027292548049981633e+02, 5.613492745195001589e+03],
[1.752101578176061594e+02, 5.650738842182393455e+03],
[1.452808749847536092e+02, 5.686137125015246056e+03],
[1.535481629475025329e+02, 5.719786837052497503e+03],
[1.376169777998875361e+02, 5.751878022200112355e+03],
[1.257703080340770612e+02, 5.782696691965922582e+03],
[-2.524186846895645431e+01, 5.812614868468956047e+03],
[-6.546618027042404719e+01, 5.842083180270424236e+03],
[1.192352023580315290e+01, 5.871536479764196883e+03],
[1.043482970188742911e+02, 5.901368702981125352e+03],
[2.581376184768396342e+01, 5.931981238152316109e+03],
[6.634330880534071184e+01, 5.963840691194659485e+03],
[-4.236780162594641297e+01, 5.997429801625946311e+03],
[-1.759397735321817891e+02, 6.033272773532181418e+03],
[-1.827933311233055065e+02, 6.071867331123305121e+03],
[-2.472312362505917918e+02, 6.113601236250591683e+03],
[-2.877470049336488955e+02, 6.158748004933649099e+03],
[-2.634066336693540507e+02, 6.207426633669354487e+03],
[-1.819572770763625158e+02, 6.259576277076362203e+03],
[-1.175034606274621183e+02, 6.314971460627461965e+03],
[-4.769898649718379602e+01, 6.373272986497183410e+03],
[1.419578280287896632e+01, 6.434068217197121157e+03],
[6.267929662760798237e+01, 6.496914703372392069e+03],
[6.196413196753746888e+01, 6.561378868032462378e+03],
[5.019769125317907310e+01, 6.627066308746821051e+03],
[4.665364933213822951e+01, 6.693621350667861407e+03],
[3.662430749527266016e+01, 6.760719692504727391e+03],
[7.545680850246480986e+01, 6.828066191497535328e+03],
[6.052940492147536133e+01, 6.895388595078524304e+03],
[6.029518881462354329e+01, 6.962461811185376064e+03],
[2.187042136652689805e+01, 7.029098578633473153e+03],
[2.380067926824722235e+01, 7.095149320731752596e+03],
[-7.119129802169481991e+00, 7.160478129802169860e+03],
[-3.194497359120850888e+01, 7.224963973591208742e+03],
[-1.897137038934124575e+01, 7.288481370389341464e+03],
[-1.832687287845146784e+01, 7.350884872878451461e+03],
[4.600482336597542599e+01, 7.412017176634024509e+03],
[2.489047706403016491e+01, 7.471709522935970199e+03],
[6.305909392127250612e+01, 7.529821906078727807e+03],
[4.585212309498183458e+01, 7.586229876905018500e+03],
[9.314260180878318351e+01, 7.640848398191216802e+03],
[1.129819097095369216e+02, 7.693621090290463144e+03],
[1.204662123176703972e+02, 7.744549787682329224e+03],
[1.336860614601246198e+02, 7.793706938539875409e+03],
[1.034567175813735957e+02, 7.841240282418626521e+03],
[1.403118873372050075e+02, 7.887381112662795204e+03],
[1.271726169351004501e+02, 7.932425383064899506e+03],
[8.271925765282139764e+01, 7.976756742347178260e+03],
[-3.197432211752584408e+01, 8.020838322117525422e+03],
[-1.150209535194062482e+02, 8.065184953519406008e+03],
[-1.064694837456772802e+02, 8.110291483745677397e+03],
[-1.190428718925368230e+02, 8.156580871892536379e+03],
[-1.353635336292991269e+02, 8.204409533629299403e+03],
[-9.644348283027102298e+01, 8.254059482830271008e+03],
[-6.143413116116607853e+01, 8.305728131161165948e+03],
[-3.019161311097923317e+01, 8.359552613110980019e+03],
[1.384333163552582846e+00, 8.415631666836447039e+03],
[-4.156016073666614830e+01, 8.474045160736666730e+03],
[-4.843882841860977351e+01, 8.534873828418609264e+03],
[-6.706442838867042155e+01, 8.598172428388670596e+03],
[-2.019644488579979225e+01, 8.663965444885800025e+03],
[-4.316446881084630149e+00, 8.732235446881084499e+03],
[4.435061943264736328e+01, 8.802952380567352520e+03],
[2.820550564155564643e+01, 8.876083494358445023e+03],
[5.155624419490777655e+01, 8.951623755805092514e+03],
[-4.318760899315748247e+00, 9.029585760899315574e+03],
[-6.534632828542271454e+01, 9.110014328285422380e+03],
[-7.226757738268497633e+01, 9.192951577382684263e+03],
[-9.412378615444868046e+01, 9.278398786154448317e+03],
[-1.191240653288368776e+02, 9.366312065328836979e+03],
[-4.953669826751865912e+01, 9.456588698267518339e+03],
[-6.017251579067487910e+01, 9.549051515790675694e+03],
[-5.103438828313483100e+01, 9.643492388283135369e+03],
[-7.343057830678117170e+01, 9.739665578306781754e+03],
[-2.774245193054957781e+01, 9.837293451930549054e+03],
[-3.380481112519191811e+00, 9.936052481112519672e+03],
[-2.672779877794346248e+01, 1.003560179877794326e+04],
[-3.217342505148371856e+01, 1.013559842505148299e+04],
[-4.140567518359966925e+01, 1.023568267518359971e+04],
[-6.687756033938057953e+00, 1.033547475603393832e+04],
[7.300600408459467872e+01, 1.043456899591540605e+04],
[6.862345670680042531e+01, 1.053255554329319966e+04],
[5.497882461487461114e+01, 1.062907017538512628e+04],
[9.612244093055960548e+01, 1.072379155906944106e+04],
[1.978212770103891671e+02, 1.081643272298961165e+04],
[1.362772276848754700e+02, 1.090676677231512440e+04],
[2.637635494867263333e+02, 1.099469045051327339e+04],
[1.876813256815166824e+02, 1.108018567431848351e+04],
[1.711447873158413131e+02, 1.116339921268415856e+04],
[5.257586460826678376e+01, 1.124459513539173349e+04],
[4.710652228531762375e+01, 1.132414447771468258e+04],
[-6.237613484241046535e+01, 1.140245113484241119e+04],
[-9.982044354035315337e+01, 1.147994844354035376e+04],
[-7.916275548997509759e+01, 1.155703075548997549e+04],
[-9.526003459472303803e+01, 1.163403003459472347e+04],
[-1.147987680369169539e+02, 1.171122876803691724e+04],
[-1.900259054765901965e+02, 1.178884990547659072e+04],
[-2.212256473439556430e+02, 1.186704464734395515e+04],
[-2.071394278781845060e+02, 1.194584542787818464e+04],
[-8.968541528904825100e+01, 1.202514641528904758e+04],
[-6.189531564415665343e+01, 1.210471231564415575e+04],
[-5.662878162551714922e+01, 1.218425178162551674e+04],
[-4.961678134413705266e+01, 1.226343478134413635e+04],
[-3.836288992144181975e+01, 1.234189588992144127e+04],
[-8.956671991456460091e+00, 1.241923867199145570e+04],
[3.907028461866866564e+01, 1.249504271538133071e+04],
[1.865299000184495526e+01, 1.256888200999815490e+04],
[4.279803532226833340e+01, 1.264035496467773191e+04],
[3.962735362631610769e+01, 1.270907164637368442e+04],
[1.412691291877854383e+02, 1.277466887081221466e+04],
[1.256537791844366438e+02, 1.283680822081556289e+04],
[7.067642758858892194e+01, 1.289523957241141034e+04],
[1.108876647603192396e+02, 1.294979133523968085e+04],
[9.956490829291760747e+01, 1.300033609170708223e+04],
[1.571612709880937473e+02, 1.304681572901190702e+04],
[2.318746375812715996e+02, 1.308923436241872878e+04],
[2.635546670125277160e+02, 1.312769433298747208e+04],
[2.044220965739259555e+02, 1.316244290342607383e+04],
[2.213739418903714977e+02, 1.319389205810962812e+04],
[1.020184547767112235e+02, 1.322258154522328914e+04],
[-1.072694716663390864e+02, 1.324918947166633916e+04],
[-3.490477058718843182e+02, 1.327445770587188417e+04],
[-3.975570728533530200e+02, 1.329906107285335383e+04],
[-3.331152428080622485e+02, 1.332345624280806260e+04]])
dta = macrodata.load_pandas().data['realgdp'].values
res = column_stack((hpfilter(dta, 1600)))
assert_almost_equal(res, hpfilt_res, 6)
def test_cfitz_filter():
# Test Christiano-Fitzgerald Filter. Results taken from R.
# NOTE: The Stata mata code and the matlab code it's based on are wrong.
cfilt_res = array([
[0.712599537179426, 0.439563468233128],
[1.06824041304411, 0.352886666575907],
[1.19422467791128, 0.257297004260607],
[0.970845473140327, 0.114504692143872],
[0.467026976628563, -0.070734782329146],
[-0.089153511514031, -0.238609685132605],
[-0.452339254128573, -0.32376584042956],
[-0.513231214461187, -0.314288554228112],
[-0.352372578720063, -0.258815055101336],
[-0.160282602521333, -0.215076844089567],
[-0.0918782593827686, -0.194120745417214],
[-0.168083823205437, -0.158327420072693],
[-0.291595204965808, -0.0742727139742986],
[-0.348638756841307, 0.037008291163602],
[-0.304328040874631, 0.108196527328748],
[-0.215933150969686, 0.0869231107437175],
[-0.165632621390694, -0.0130556619786275],
[-0.182326839507151, -0.126570926191824],
[-0.223737786804725, -0.205535321806185],
[-0.228939291453403, -0.269110078201836],
[-0.185518327227038, -0.375976507132174],
[-0.143900152461529, -0.53760115656157],
[-0.162749541550174, -0.660065018626038],
[-0.236263634756884, -0.588542352053736],
[-0.275785854309211, -0.236867929421996],
[-0.173666515108109, 0.303436335579219],
[0.0963135720251639, 0.779772338801993],
[0.427070069032285, 0.929108075350647],
[0.629034743259998, 0.658330841002647],
[0.557941248993624, 0.118500049361018],
[0.227866624051603, -0.385048321099911],
[-0.179878859883227, -0.582223992561493],
[-0.428263000051965, -0.394053702908091],
[-0.381640684645912, 0.0445437406977307],
[-0.0942745548364887, 0.493997792757968],
[0.238132391504895, 0.764519811304315],
[0.431293754256291, 0.814755206427316],
[0.455010435813661, 0.745567043101108],
[0.452800768971269, 0.709401694610443],
[0.615754619329312, 0.798293251119636],
[1.00256335412457, 0.975856845059388],
[1.44841039351691, 1.09097252730799],
[1.64651971120370, 0.967823457118036],
[1.35534532901802, 0.522397724737059],
[0.580492790312048, -0.16941343361609],
[-0.410746188031773, -0.90760401289056],
[-1.26148406066881, -1.49592867122591],
[-1.75784179124566, -1.87404167409849],
[-1.94478553960064, -2.14586210891112],
[-2.03751202708559, -2.465855239868],
[-2.20376059354166, -2.86294187189049],
[-2.39722338315852, -3.15004697654831],
[-2.38032366161537, -3.01390466643222],
[-1.91798022532025, -2.23395210271226],
[-0.982318490353716, -0.861346053067472],
[0.199047030343412, 0.790266582335616],
[1.28582776574786, 2.33731327460104],
[2.03565905376430, 3.54085486821911],
[2.41201557412526, 4.36519456268955],
[2.52011070482927, 4.84810517685452],
[2.45618479815452, 4.92906708807477],
[2.22272146945388, 4.42591058990048],
[1.78307567169034, 3.20962906108388],
[1.18234431860844, 1.42568060336985],
[0.590069172333348, -0.461896808688991],
[0.19662302949837, -1.89020992539465],
[0.048307034171166, -2.53490571941987],
[-0.0141956981899000, -2.50020338531674],
[-0.230505187108187, -2.20625973569823],
[-0.700947410386801, -2.06643697511048],
[-1.27085123163060, -2.21536883679783],
[-1.64082547897928, -2.49016921117735],
[-1.62286182971254, -2.63948740221362],
[-1.31609762181362, -2.54685250637904],
[-1.03085567704873, -2.27157435428923],
[-1.01100120380112, -1.90404507430561],
[-1.19823958399826, -1.4123209792214],
[-1.26398933608383, -0.654000086153317],
[-0.904710628949692, 0.447960016248203],
[-0.151340093679588, 1.73970411237156],
[0.592926881165989, 2.85741581650685],
[0.851660587507523, 3.4410446351716],
[0.480324393352127, 3.36870271362297],
[-0.165153230782417, 2.82003806696544],
[-0.459235919375844, 2.12858991660866],
[0.0271158842479935, 1.55840980891556],
[1.18759188180671, 1.17980298478623],
[2.43238266962309, 0.904011534980672],
[3.08277213720132, 0.595286911949837],
[2.79953663720953, 0.148014782859571],
[1.73694442845833, -0.496297332023011],
[0.357638079951977, -1.33108149877570],
[-0.891418825216945, -2.22650083183366],
[-1.77646467793627, -2.89359299718574],
[-2.24614790863088, -2.97921619243347],
[-2.29048879096607, -2.30003092779280],
[-1.87929656465888, -1.05298381273274],
[-1.04510101454788, 0.215837488618531],
[0.00413338508394524, 0.937866257924888],
[0.906870625251025, 0.92664365343019],
[1.33869057593416, 0.518564571494679],
[1.22659678454440, 0.288096869652890],
[0.79380139656044, 0.541053084632774],
[0.38029431865832, 1.01905199983437],
[0.183929413600038, 1.10529586616777],
[0.140045425897033, 0.393618564826736],
[0.0337313182352219, -0.86431819007665],
[-0.269208622829813, -1.85638085246792],
[-0.687276639992166, -1.82275359004533],
[-1.00161592325614, -0.692695765071617],
[-1.06320089194036, 0.803577361347341],
[-0.927152307196776, 1.67366338751788],
[-0.786802101366614, 1.42564362251793],
[-0.772970884572502, 0.426446388877964],
[-0.81275662801789, -0.437721213831647],
[-0.686831250382476, -0.504255468075149],
[-0.237936463020255, 0.148656301898438],
[0.459631879129522, 0.832925905720478],
[1.12717379822508, 0.889455302576383],
[1.48640453200855, 0.268042676202216],
[1.46515245776211, -0.446505038539178],
[1.22993484959115, -0.563868578181134],
[1.0272100765927, 0.0996849952196907],
[0.979191212438404, 1.05053652824665],
[1.00733490030391, 1.51658415000556],
[0.932192535457706, 1.06262774912638],
[0.643374300839414, -0.0865180803476065],
[0.186885168954461, -1.24799408923277],
[-0.290842337365465, -1.80035611156538],
[-0.669446735516495, -1.58847333561510],
[-0.928915624595538, -0.932116966867929],
[-1.11758635926997, -0.307879396807850],
[-1.26832454569756, -0.00856199983957032],
[-1.35755577149251, -0.0303537516690989],
[-1.34244112665546, -0.196807620887435],
[-1.22227976023299, -0.342062643495923],
[-1.04601473486818, -0.390474392372016],
[-0.85158508717846, -0.322164402093596],
[-0.605033439160543, -0.126930141915954],
[-0.218304303942818, 0.179551077808122],
[0.352173017779006, 0.512327303000081],
[1.01389600097229, 0.733397490572755],
[1.55149778750607, 0.748740387440165],
[1.75499674757591, 0.601759717901009],
[1.56636057468633, 0.457705308377562],
[1.12239792537274, 0.470849913286519],
[0.655802600286141, 0.646142040378738],
[0.335285115340180, 0.824103600255079],
[0.173454596506888, 0.808068498175582],
[0.0666753011315252, 0.521488214487996],
[-0.0842367474816212, 0.0583493276173476],
[-0.285604762631464, -0.405958418332253],
[-0.465735422869919, -0.747800086512926],
[-0.563586691231348, -0.94982272350799],
[-0.598110322024572, -1.04736894794361],
[-0.65216025756061, -1.04858365218822],
[-0.789663117801624, -0.924145633093637],
[-0.984704045337959, -0.670740724179446],
[-1.12449565589348, -0.359476803003931],
[-1.07878318723543, -0.092290938944355],
[-0.775555435407062, 0.102132527529259],
[-0.231610677329856, 0.314409560305622],
[0.463192794235131, 0.663523546243286],
[1.17416973448423, 1.13156902460931],
[1.74112278814906, 1.48967153067024],
[2.00320855757084, 1.42571085941843],
[1.8529912317336, 0.802460519079555],
[1.30747261947211, -0.169219078629572],
[0.540237070403222, -1.01621539672694],
[-0.177136817092375, -1.3130784867977],
[-0.611981468823591, -0.982477824460773],
[-0.700240028737747, -0.344919609255406],
[-0.572396497740112, 0.125083535035390],
[-0.450934466600975, 0.142553112732280],
[-0.494020014254326, -0.211429053871656],
[-0.701707589094918, -0.599602868825992],
[-0.94721339346157, -0.710669870591623],
[-1.09297139748946, -0.47846194092245],
[-1.08850658866583, -0.082258450179988],
[-0.976082880696692, 0.235758921309309],
[-0.81885695346771, 0.365298185204303],
[-0.63165529525553, 0.384725179378064],
[-0.37983149226421, 0.460240196164378],
[-0.0375551354277652, 0.68580913832794],
[0.361996927427804, 0.984470835955107],
[0.739920615366072, 1.13195975020298],
[1.03583478061534, 0.88812510421667],
[1.25614938962160, 0.172561520611839],
[1.45295030231799, -0.804979390544485],
[1.64887158748426, -1.55662011197859],
[1.78022721495313, -1.52921975346218],
[1.71945683859668, -0.462240366424548],
[1.36728880239190, 1.31213774341268],
[0.740173894315912, 2.88362740582926],
[-0.0205364331835904, 3.20319080963167],
[-0.725643970956428, 1.75222466531151],
[-1.23900506689782, -0.998432917440275],
[-1.52651897508678, -3.72752870885448],
[-1.62857516631435, -5.00551707196292],
[-1.59657420180451, -4.18499132634584],
[-1.45489013276495, -1.81759097305637],
[-1.21309542313047, 0.722029457352468]])
dta = macrodata.load_pandas().data[['tbilrate', 'infl']].values[1:]
cyc, trend = cffilter(dta)
assert_almost_equal(cyc, cfilt_res, 8)
# do 1d
cyc, trend = cffilter(dta[:, 1])
assert_almost_equal(cyc, cfilt_res[:, 1], 8)
def test_bking_pandas():
# 1d
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
filtered = bkfilter(dta["infl"])
nd_filtered = bkfilter(dta['infl'].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.name, "infl_cycle")
# 2d
filtered = bkfilter(dta[["infl", "unemp"]])
nd_filtered = bkfilter(dta[['infl', 'unemp']].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.columns.values, ["infl_cycle", "unemp_cycle"])
def test_cfitz_pandas():
# 1d
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
cycle, trend = cffilter(dta["infl"])
ndcycle, ndtrend = cffilter(dta['infl'].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "infl_cycle")
# 2d
cycle, trend = cffilter(dta[["infl", "unemp"]])
ndcycle, ndtrend = cffilter(dta[['infl', 'unemp']].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.columns.values, ["infl_cycle", "unemp_cycle"])
def test_hpfilter_pandas():
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
cycle, trend = hpfilter(dta["realgdp"])
ndcycle, ndtrend = hpfilter(dta['realgdp'].values)
assert_equal(cycle.values, ndcycle)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "realgdp_cycle")
class TestFilters(object):
@classmethod
def setup_class(cls):
# even
data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]
cls.data = DataFrame(data, date_range(start='1/1/1951',
periods=len(data),
freq='Q'))
data[9] = np.nan
cls.datana = DataFrame(data, date_range(start='1/1/1951',
periods=len(data),
freq='Q'))
from .results import filter_results
cls.expected = filter_results
def test_convolution(self):
x = self.data.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected)
def test_convolution2d(self):
x = self.data.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(np.c_[x, x], [[.75, .75], [.25, .25]])
np.testing.assert_almost_equal(res, np.c_[expected, expected])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected[:, None])
x = self.datana.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected[:, None])
def test_recursive(self):
x = self.data.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse_na
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init_na
np.testing.assert_almost_equal(res, expected)
assert_raises(ValueError, recursive_filter, x,
[.75, .25, .5], [150, 100])
def test_pandas(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .25], nsides=1)
assert_(res.index[0] == start)
# with no nan-padding q1 if not
assert_(res.index[-1] == end)
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
x = self.datana
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_pandas2d(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = concat((self.data[0], self.data[0]), axis=1)
res = convolution_filter(x, [[.75, .75], [.25, .25]])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_odd_length_filter(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .5, .3, .2, .1])
expected = self.expected.conv2_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .5, .3, .2, .1], nsides=1)
expected = self.expected.conv1_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
# with no NAs
# not a stable filter
res = recursive_filter(x, [.75, .5, .3, .2, .1], init=[150, 100,
125, 135,
145])
expected = self.expected.recurse_odd
# only have 12 characters in R and this blows up and gets big
np.testing.assert_almost_equal(res.values.squeeze(), expected, 4)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
def dummy_func(x):
return x
def dummy_func_array(x):
return x.values
def dummy_func_pandas_columns(x):
return x.values
def dummy_func_pandas_series(x):
return x['A']
def test_pandas_freq_decorator():
x = make_dataframe()
# in x, get a function back that returns an x with the same columns
func = pandas_wrapper(dummy_func)
np.testing.assert_equal(func(x.values), x)
func = pandas_wrapper(dummy_func_array)
assert_frame_equal(func(x), x)
expected = x.rename(columns=dict(zip('ABCD', 'EFGH')))
func = pandas_wrapper(dummy_func_array, names=list('EFGH'))
assert_frame_equal(func(x), expected)
|
#!/usr/bin/env python
from __future__ import division, print_function
import os
import sys
from io import BytesIO, IOBase
if sys.version_info[0] < 3:
from __builtin__ import xrange as range
from future_builtins import ascii, filter, hex, map, oct, zip
def main():
pass
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._file = file
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
def print(*args, **kwargs):
"""Prints the values to a stream, or to sys.stdout by default."""
sep, file = kwargs.pop("sep", " "), kwargs.pop("file", sys.stdout)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(kwargs.pop("end", "\n"))
if kwargs.pop("flush", False):
file.flush()
if sys.version_info[0] < 3:
sys.stdin, sys.stdout = FastIO(sys.stdin), FastIO(sys.stdout)
else:
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import xmlrpc.client
import os, sys, shutil, json, subprocess, time, yara, hashlib, datetime, requests, magic, redis, socket, pefile
from pathlib import Path
from pymongo import MongoClient
from rq import get_current_job, Queue
from read_avclass_report import run_avclass
from redis import Redis
with open("tknk.conf", 'r') as f:
tknk_conf = json.load(f)
VM_NAME=tknk_conf['vm_name']
VM_URL=tknk_conf['vm_url']
def download():
proxy = xmlrpc.client.ServerProxy(VM_URL)
with open("dump.zip", "wb") as handle:
try:
handle.write(proxy.download_file().data)
return True
except xmlrpc.client.Fault:
print(sys.exc_info())
return sys.exc_info()
def upload(filename):
proxy = xmlrpc.client.ServerProxy(VM_URL)
with open(filename, "rb") as handle:
binary_data = xmlrpc.client.Binary(handle.read())
if "/" in filename:
filename = filename.rsplit("/", 1)[1]
print("upload..." + filename)
proxy.upload_file(binary_data, filename)
def dump(config):
proxy = xmlrpc.client.ServerProxy(VM_URL)
try:
proxy.dump(config)
return True
except:
return False
def vm_down():
print(subprocess.call(['virsh', "destroy", VM_NAME]))
def current_job_init(r):
q = Queue(connection=Redis())# Getting the number of jobs in the queue
queued_job_ids = q.job_ids # Gets a list of job IDs from the queue
if len(queued_job_ids) == 0:
r.set('current_job_id', None)
return
def size_fmt(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def analyze(uid):
#db connect
client = MongoClient('localhost', 27017)
db = client.scan_database
collection = db.scan_collection
#redis connect
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
r = redis.StrictRedis(connection_pool=pool)
#update current_job
job=get_current_job()
r.set('current_job_id', job.id)
#config read & write
config = eval(r.get(uid).decode('utf-8'))
pe = pefile.PE(config['path'])
config['entrypoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
#make report format
result = {"result":{"detail":"", "is_success":False},
"run_time":str(config['time']),
"mode":config['mode'],
"timestamp":str(datetime.datetime.today().isoformat()),
"scans":[],
"UUID":uid,
"magic":magic.from_file(config['path']),
"virus_total":0,
"avclass":{"flag":None, "data":[]}
}
with open(config['path'],'rb')as f:
d = f.read()
file_md5 = str(hashlib.md5(d).hexdigest())
file_sha1 = str(hashlib.sha1(d).hexdigest())
file_sha256 = str(hashlib.sha256(d).hexdigest())
#avclass
if tknk_conf['virus_total'] == 1:
result['virus_total'] = 1
result['avclass'] = run_avclass(tknk_conf['vt_key'], file_sha256)
#Detect it easy
cmd=["die/diec.sh", config['path']]
p = subprocess.run(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
result['die'] = p.stdout.decode("utf8").split("\n")
if result['die'] != []:
result['die'].pop()
#read yara rules
rules = yara.compile('index.yar')
matches = rules.match(config['path'])
result['target_scan']=({"md5":file_md5, "sha1":file_sha1, "sha256":file_sha256, "detect_rule":list(map(str,matches)), "file_name":config['target_file'], "size":size_fmt(os.path.getsize(config['path']))})
cmd=['virsh', 'snapshot-revert', VM_NAME, '--current']
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stderr.decode('utf-8')
print(output)
if "busy" in output:
print("failed to initialize KVM: Device or resource busy")
result["result"]["is_success"] = False
result["result"]["detail"] = "failed to initialize KVM: Device or resource busy"
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
elif "Domain" in output:
print("Domain snapshot not found: the domain does not have a current snapshot")
result["result"]["is_success"] = False
result["result"]["detail"] = "Domain snapshot not found: the domain does not have a current snapshot"
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
c=0
while(1):
vm_state = subprocess.check_output(["virsh", "domstate", VM_NAME])
time.sleep(1)
c+=1
if "running" in str(vm_state.decode('utf-8')):
break
if c == 60:
current_job_init(r)
os._exit(0)
if config['mode'] == "hollows_hunter":
tools = ["tools/hollows_hunter.exe", "tools/pe-sieve.dll", "tools/mouse_emu.pyw"]
elif config['mode'] == "procdump":
tools = ["tools/procdump.exe", "tools/mouse_emu.pyw"]
elif config['mode'] == "scylla":
tools = ["tools/Scylla.dll", "tools/mouse_emu.pyw"]
elif config['mode'] == "diff":
tools = ["tools/procdump.exe", "tools/mouse_emu.pyw"]
for tool_name in tools:
upload(tool_name)
upload("target/" + config['target_file'])
ret = dump(config)
if ret == False:
print("Connection error\n")
is_success = False
result["result"]["detail"] = "Connection error"
else:
ret = download()
if ret == True:
print("dump finish")
is_success = True
else:
is_success = False
if result["mode"] == "procdump":
result["result"]["detail"] = "Process does not exist"
else:
result["result"]["detail"] = "Dump file does not exist"
vm_down()
if is_success == False:
for scan in result["scans"]:
if scan["detect_rule"] != []:
result["result"]["is_success"] = True
result["result"]["detail"] = "Detected with yara rule!"
break
os.mkdir("result/" + str(uid))
with open("result/"+ str(uid) + "/" +file_sha256+'.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
shutil.copyfile(config['path'], "result/"+str(uid)+"/"+config['target_file'])
print (json.dumps(result, indent=4))
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
elif is_success == True:
p = Path("result/dump.zip")
if p.exists():
p.unlink()
print("remove")
shutil.move("dump.zip", "result/")
subprocess.run(['unzip', "dump.zip"], cwd="result")
p = Path("result/dump/")
for f in p.glob("**/*"):
if (".exe" == f.suffix) or (".dll" == f.suffix) or (".dmp" == f.suffix):
size = os.path.getsize(str(f))
matches = rules.match(str(f.resolve()))
result['scans'].append({"detect_rule":list(map(str,matches)), "file_name":f.name, "size":size_fmt(size)})
for scan in result["scans"]:
if scan["detect_rule"] != []:
result["result"]["is_success"] = True
result["result"]["detail"] = "Detected with yara rule!"
break
print (json.dumps(result, indent=4))
with open("result/dump/"+file_sha256+'.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
shutil.copyfile(config['path'], "result/dump/"+config['target_file'])
os.rename("result/dump/", "result/"+str(uid))
os.remove("result/dump.zip")
collection.update({u'UUID':uid},result)
current_job_init(r)
return
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Ported for Lord-Userbot By liualvinas/Alvin
from telethon import events
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, cilik_cmd
PRINTABLE_ASCII = range(0x21, 0x7F)
def aesthetify(string):
for c in string:
c = ord(c)
if c in PRINTABLE_ASCII:
c += 0xFF00 - 0x20
elif c == ord(" "):
c = 0x3000
yield chr(c)
@cilik_cmd(pattern="ae(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
text = event.pattern_match.group(1)
text = "".join(aesthetify(text))
await edit_or_reply(event, text=text, parse_mode=None, link_preview=False)
raise events.StopPropagation
CMD_HELP.update(
{
"aeshtetic": f"➢ **Plugin : **`aeshtetic`\
\n\n ┌✪ **Command :** `{cmd}ae <teks>`\
\n └✪ **Function : **Mengubah font teks Menjadi aeshtetic.\
"
}
)
|
# -*- coding: utf-8 -*-
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, as long as
# any reuse or further development of the software attributes the
# National Geospatial-Intelligence Agency (NGA) authorship as follows:
# 'This software (django-gamification)
# is provided to the public as a courtesy of the National
# Geospatial-Intelligence Agency.
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.db.models.signals import post_save
from django.db import models
from gamification.badges.models import ProjectBadge, ProjectBadgeToUser
from jsonfield import JSONField
TRUE_FALSE = [(0, 'False'), (1, 'True')]
class ProjectBase(models.Model):
"""
A generic model for GeoQ objects.
"""
active = models.BooleanField(default=True, help_text='If checked, this project will be listed in the active list.')
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200, help_text='Name of the project.')
description = models.TextField(help_text='Details of this project that will be listed on the viewing page.')
updated_at = models.DateTimeField(auto_now=True)
url = models.TextField(help_text='Project Information URL', null=True)
def __unicode__(self):
return self.name
class Meta:
abstract = True
ordering = ('-created_at',)
class Team(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(null=True, blank=True)
members = models.ManyToManyField(User, null=True, blank=True)
order = models.IntegerField(default=0, null=True, blank=True, help_text='Optionally specify the order teams should appear. Lower numbers appear sooner. By default, teams appear in the order they were created.')
date_created = models.DateTimeField(auto_now_add=True)
background_color = models.CharField(max_length=50, null=True, blank=True, help_text='Optional - Color to use for background of all team badges')
icon = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Image to show next to team names')
def __str__(self):
return "%s (%s)" % (self.name, str(len(self.members.all())))
class Meta:
ordering = ['-order', '-date_created', 'id']
class Project(ProjectBase):
"""
Top-level organizational object.
"""
THEMES = (
("", "None"),
("camping", "Camping"),
("camping2", "Camping Theme 2"),
("map", "Geospatial"),
)
private = models.BooleanField(default=False, help_text='If checked, hide this project from the list of projects and public badge APIs.')
supervisors = models.ManyToManyField(User, blank=True, null=True, related_name="supervisors", help_text='Anyone other than site administrators that can add badges and update the site')
teams = models.ManyToManyField(Team, blank=True, null=True)
viewing_pass_phrase = models.CharField(max_length=200, null=True, blank=True, help_text='Phrase that must be entered to view this page.')
project_closing_date = models.DateTimeField(null=True, blank=True, help_text='Date that project "closes" with countdown shown on project page. Badges can still be added after this.')
visual_theme = models.CharField(max_length=20, default="none", choices=THEMES, help_text='Visual Theme used to style the project page')
background_image = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Override theme background with this image')
properties = JSONField(null=True, blank=True, help_text='JSON key/value pairs associated with this object, e.g. {"badges_mode":"blue"}')
query_token = models.CharField(max_length=200, null=True, blank=True, help_text='Token that must be entered by any server requesting data - not implemented yet.')
allowed_api_hosts = models.TextField(null=True, blank=True, help_text='Comma-separated list of hosts (IPs or Hostnames) that can access this project via data requests - not implemented yet')
@property
def user_count(self):
return User.objects.filter(projectbadgetouser__projectbadge__project=self).distinct().count()
@property
def badge_count(self):
return ProjectBadgeToUser.objects.filter(projectbadge__project=self).count()
def get_absolute_url(self):
return reverse('project-list', args=[self.name])
class Points(models.Model):
user = models.ForeignKey(User)
projectbadge = models.ForeignKey(ProjectBadge)
value = models.IntegerField(default=0)
date_awarded = models.DateTimeField('date awarded',auto_now=True)
description = models.CharField(max_length=200)
def get_absolute_url(self):
return reverse('points-list', args=[self.id])
class Meta:
verbose_name_plural = "Points"
class UserProfile(models.Model):
""" from http://stackoverflow.com/questions/44109/extending-the-user-model-with-custom-fields-in-django; this is one mechanism for adding extra details (currently score for badges) to the User model """
defaultScore = 1
user = models.OneToOneField(User)
score = models.IntegerField(default=defaultScore)
def __str__(self):
return "%s's profile" % self.user
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
post_save.connect(create_user_profile, sender=User)
import sys
if not 'syncdb' in sys.argv[1:2] and not 'migrate' in sys.argv[1:2]:
from meta_badges import *
|
"""Warn about binary operations used as exceptions."""
from __future__ import print_function
try:
pass
except Exception or BaseException: # [binary-op-exception]
print("caught1")
except Exception and BaseException: # [binary-op-exception]
print("caught2")
except Exception or BaseException: # [binary-op-exception]
print("caught3")
except (Exception or BaseException) as exc: # [binary-op-exception]
print("caught4")
|
import importlib
from collections import OrderedDict
from django.conf import settings
class ProviderRegistry(object):
def __init__(self):
self.provider_map = OrderedDict()
self.loaded = False
def get_list(self, request=None):
self.load()
return [provider_cls(request) for provider_cls in self.provider_map.values()]
def register(self, cls):
self.provider_map[cls.id] = cls
def by_id(self, id, request=None):
self.load()
return self.provider_map[id](request=request)
def as_choices(self):
self.load()
for provider_cls in self.provider_map.values():
yield (provider_cls.id, provider_cls.name)
def load(self):
# TODO: Providers register with the provider registry when
# loaded. Here, we build the URLs for all registered providers. So, we
# really need to be sure all providers did register, which is why we're
# forcefully importing the `provider` modules here. The overall
# mechanism is way to magical and depends on the import order et al, so
# all of this really needs to be revisited.
if not self.loaded:
for app in settings.INSTALLED_APPS:
try:
provider_module = importlib.import_module(app + ".provider")
except ImportError:
pass
else:
for cls in getattr(provider_module, "provider_classes", []):
self.register(cls)
self.loaded = True
registry = ProviderRegistry()
|
#!/usr/bin/env python
"""Module to deal with halos, to be used with HaloMaker.
This module is heavily inspired by the set of IDL routines originally
found in the Ramses Analysis ToolSuite (RATS).
TODO: Some more documentation
"""
import numpy as np
import pandas as pd
import yt
from yt.utilities.logger import ytLogger as mylog
import yt.utilities.fortran_utils as fpu
from yt.funcs import get_pbar
import os
import pandas as pd
class HaloList(object):
def __init__(self, ds, folder='.', contam=False):
"""
PandaList with halos and their properties
"""
self.folder = folder
self.iout = int(str(ds).split('_')[1])
if os.path.exists(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self)):
self.halos = pd.read_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self))
else:
self.halos = self._read_halos(data_set=ds, with_contam_option=contam)
if self.halos.index.size > 0:
self.halos.to_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self), 'hdf')
self.ds = ds
self.halos['bhid'] = -1 ; self.halos['galID'] = -1
self.halos['mgal'] = 0 ; self.halos['msink'] = 0
# read purity of halos
self.halos['pollution'] = 0
contam_file_path = '{s.folder}/Halos/{s.iout}/contam_halos{s.iout:03d}'.format(
s=self)
if os.path.exists(contam_file_path):
p = np.loadtxt(contam_file_path)
if len(p) > 0:
p = p.T
self.halos.loc[p[0], 'pollution'] = p[1]/p[2]
def get_halo(self, hid, fname=None):
halo = self.halos.loc[hid]
scale_mpc = float(self.ds.length_unit.in_units('Mpc'))
halostr = ("Halo {hid:.0f} (level {h.level:.0f}):\n"
"\tContains {h.nbpart:.0f} particles and {h.nbsub:.0f} subhalo(s)\n"
"\tCenter:\t\t ({h.x}, {h.y}, {h.z}) box units\n"
"\tVelocity:\t ({h.vx}, {h.vy}, {h.vz}) km/s\n"
"\tL:\t\t ({h.Lx}, {h.Ly}, {h.Lz}) ToCheck\n"
"\tMass:\t\t {h.m:.3e} Msun\n"
"\tMvir:\t\t {h.mvir:.3e} Msun\n"
"\tRadius:\t\t {h.r:.3e} Mpc ({rcodeunits:.3e} box units)\n"
"\tRvir:\t\t {h.rvir:.3e} Mpc ({rvcodeunits:.3e} box units)\n"
"\tTvir:\t\t {h.tvir:.3e} K".format(hid=hid,
h=halo,
rcodeunits=halo.r / scale_mpc,
rvcodeunits=halo.rvir / scale_mpc))
if fname is not None:
with open(fname, 'w') as f:
f.write(halostr)
return halostr
def get_halo_sphere(self, hid, rvir_factor=5):
halo_spheres = getattr(self, '_halo_spheres', {})
if (hid, rvir_factor) in halo_spheres:
return halo_spheres[hid, rvir_factor]
tmp = self.halos.loc[hid, ['x', 'y', 'z', 'rvir', 'vx', 'vy', 'vz']]\
.values
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
vel = self.ds.arr(tmp[4:7], 'km/s')
# Get a sphere centered on the halo
sphere = self.ds.sphere(center, radius)
sphere.set_field_parameter('bulk_velocity', vel)
halo_spheres[(hid, rvir_factor)] = sphere
self._halo_spheres = halo_spheres
return sphere
def plot_halo(self, hid, rvir_factor=5, field=('deposit', 'all_density'), folder='./',
weight_field=('index', 'ones'), cmap='viridis', slice=False,
axis='z', **kwargs):
'''Plot a given halo.
Parameters
----------
* hid, integer
The halo id to plot
* rvir_factor, float, default=5
Size of the region to plot in unit of Rvir
* field, tuple
The yt field to plot
* folder, string
The folder where to save the data
* weight_field, tuple
The field to weight the projection by.
* cmap, string
The colormap to use
* slice, boolean
If true, do a slice plot instead of a projection plot
* axis, 'x', 'y' or 'z'
The axis to project onto
'''
for k, v in kwargs.items():
print('%s: %s not supported' % (k, v))
if hid not in self.halos.index:
mylog.error('%s not found.' % hid)
return
# Get position
tmp = np.array(self.halos.loc[hid, ['x', 'y', 'z', 'rvir']])
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
# Get a sphere centered on the halo
sphere = self.ds.sphere(center, radius)
# Make a projection plot
p = yt.ProjectionPlot(self.ds, axis, field, data_source=sphere,
weight_field=weight_field)
p.set_cmap(field=field, cmap=cmap)
p.annotate_timestamp(corner='upper_left', time=True, redshift=True)
p.annotate_scale(corner='upper_right')
# TODO: annotate halos
# TODO: better name
p.save(folder)
# Accessors
def __getitem__(self, item):
if str(item) in self.halos:
return self.halos[item]
else:
return self.halos.ix[item]
# def __getattr__(self, name):
# return self.halos.__getattr__(name) # self.halos[name]
def __len__(self):
return len(self.halos)
def __iter__(self):
return self.halos.iterrows()
# Printing functions
def __str__(self):
return self.halos.__str__()
# Convenience functions
def _read_halos(self, data_set, with_contam_option=False):
halo_keys = ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub',
'x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel')
filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(
s=self)
data = np.empty(shape=(0, len(halo_keys)), dtype=object)
yt.funcs.mylog.debug('Reading halo catalog %s (ds=%s)' % (filename, data_set))
offsets = {}
if os.path.exists(filename):
with open(filename, 'rb') as f:
[npart] = fpu.read_vector(f, 'i')
[massp] = fpu.read_vector(f, 'f')
[aexp] = fpu.read_vector(f, 'f')
[omega_t] = fpu.read_vector(f, 'f')
[age] = fpu.read_vector(f, 'f')
[nhalos, nsubs] = fpu.read_vector(f, 'i')
# Save the age/aexp, the mass of the particle,
# as well as the number of (sub)halos
self.nhalos = nhalos
self.nsubs = nsubs
self.aexp = aexp
self.age = age
self.massp = massp
data = np.empty(shape=(nhalos + nsubs, len(halo_keys)), dtype=object)
mylog.info('Brick: halos : %s' % nhalos)
mylog.info('Brick: sub halos : %s' % nsubs)
mylog.info('Brick: aexp : %s' % aexp)
#pbar = get_pbar('', nhalos+nsubs)
for ihalo in range(nhalos + nsubs):
pos = f.tell()
[nbpart] = fpu.read_vector(f, 'i') # Number of particles
listp = fpu.read_vector(f, 'i') # List of the particles IDs
[ID] = fpu.read_vector(f, 'i') # Halo ID
fpu.skip(f, 1) # Skip timestep
[level, host, hostsub, nbsub, nextsub] = fpu.read_vector(f, 'i')
[m] = fpu.read_vector(f, 'f') # Total mass
[x, y, z] = fpu.read_vector(f, 'f') # Center
[vx, vy, vz] = fpu.read_vector(f, 'f') # Velocity
[Lx, Ly, Lz] = fpu.read_vector(f, 'f') # Angular momentum
[r, a, b, c] = fpu.read_vector(f, 'f') # Shape (ellipticity)
[ek, ep, et] = fpu.read_vector(f, 'f') # Energetics
[spin] = fpu.read_vector(f, 'f') # Total angular momentum
[rvir, mvir, tvir, cvel] = fpu.read_vector(f, 'f') # Virial parameters
[rho0, r_c] = fpu.read_vector(f, 'f') # NFW params
if with_contam_option:
[contam] = fpu.read_vector(f, 'i') # Contamination
# Add the halo to the list
# halos.loc[ihalo] = [ID, nbpart, level, listp.min(),
# host, hostsub, nbsub, nextsub,
# x, y, z, vx, vy, vz, Lx, Ly, Lz,
# a, b, c, ek, ep, et, rho0, r_c,
# spin, m, r, mvir, rvir, tvir, cvel]
data[ihalo] = [ID, nbpart, level, listp.min(),
host, hostsub, nbsub, nextsub,
x, y, z, vx, vy, vz, Lx, Ly, Lz,
a, b, c, ek, ep, et, rho0, r_c,
spin, m, r, mvir, rvir, tvir, cvel]
#pbar.update()
offsets[ID] = pos
print('')
types = {}
for k in ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub'):
types[k] = np.int64
for k in ('x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel'):
types[k] = np.float64
dd = {k: data[:, i].astype(types[k])
for i, k in enumerate(halo_keys)}
halos = pd.DataFrame(dd)
# Get properties in the right units
# Masses
halos.m *= 1e11
halos.mvir *= 1e11
# Positions and distances
scale_mpc = float(data_set.length_unit.in_units('cm') / 3.08e24)
halos.x = halos.x / scale_mpc + .5
halos.y = halos.y / scale_mpc + .5
halos.z = halos.z / scale_mpc + .5
self.offsets = offsets
return halos.set_index('ID')
def get_halo_parts(self, hid):
filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(
s=self)
with open(filename, 'br') as fd:
fd.seek(self.offsets[hid])
fpu.skip(fd, 1)
listp = fpu.read_vector(fd, 'i')
return listp
|
from PIL import Image
from django.conf import settings
from . import forms, recognition
from . import utils
from . import models
from django.shortcuts import render, redirect
from django.contrib import admin
from django.core.mail import send_mail
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import json
def signup(request):
if request.method == 'POST':
form = forms.UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('../admin/')
else:
form = forms.UserCreationForm()
return render(request, 'admin/logon.html',
{'form': form, 'site_header': admin.site.site_header, 'site_title': admin.site.site_title})
@method_decorator(csrf_exempt, name='dispatch')
def hello(request) -> JsonResponse:
"""hello API endpoint, clients request for access tokens through this api by their device_id"""
data = json.loads(request.body)
try:
device_id = data['device_id']
if (device := models.Device.objects.filter(id=device_id)).count():
device = device[0]
else:
# registering newly connected device (waiting for user to claim)
device = models.Device(id=data['device_id'])
device.save()
if not device.user:
return JsonResponse(data=utils.base_response(ok=False, message='Device is yet to be claimed by a user'))
tokens = models.AccessToken.objects.filter(device=device)
if tokens.count():
# request for new token -> invalidate old token
last_token = tokens.latest('time')
last_token.valid = False
last_token.save()
# create new access token
token = models.AccessToken(
device=device, ip=utils.get_client_ip(request))
token.save()
return JsonResponse(data=utils.base_response(response=dict(token=token.token)))
except KeyError:
return JsonResponse(data=utils.base_response(ok=False, message='No `device_id` specified'))
def authenticate_device(funct):
@method_decorator(csrf_exempt, name='dispatch')
def view_wrapper(request, *args, **kwargs):
if request.POST:
data = dict(request.POST)
file = request.FILES.get('image', None)
else:
data = json.loads(request.body)
file = None
try:
token = data['token']
if isinstance(token, list):
token = token[0]
access_token = models.AccessToken.objects.get(token=token)
if not access_token.is_valid(request):
return JsonResponse(data=utils.base_response(message='This token is no longer valid.', ok=False))
auth_res = dict(user=access_token.device.user,
device=access_token.device)
except KeyError:
return JsonResponse(data=utils.base_response(message='No `token` was specified.', ok=False))
except (models.models.ObjectDoesNotExist, Exception):
return JsonResponse(data=utils.base_response(message='Invalid `token` was specified.', ok=False))
return funct(request, *args, data=data, file=file, auth_res=auth_res, **kwargs)
return view_wrapper
@authenticate_device
def fetch(request, data: dict = None, file=None, auth_res=None):
return JsonResponse(
data=utils.base_response(
response=dict(faces=[
dict(embedding=face.embedding, face_id=face.id) for face in
models.Face.objects.filter(user=auth_res['user'])
],
in_count=auth_res['device'].inside_count(),
)
)
)
@authenticate_device
def introduce(request, data: dict = None, file=None, auth_res=None):
try:
embedding = data['embedding']
embedding = json.loads(embedding if not isinstance(
embedding, list) else embedding[0])
image = Image.open(file).convert('RGB')
face = recognition.find_face(
auth_res['user'], image=image, embedding=embedding)
if isinstance(face, bool):
face = models.Face.save_pil(
user=auth_res['user'], image=image, embedding=embedding)
return JsonResponse(data=utils.base_response(response=dict(face_id=face.id)))
except KeyError:
return JsonResponse(data=utils.base_response(message='Embedding was not mentioned', ok=False))
def mail_message(log):
device = f'{log.device.name if log.device.name else log.device.id}'
face = f'{log.face.name if log.face.name else log.face.id}'
kind = f'{"enter" if log.kind == "E" else "exit"}'
num_in = log.device.inside_count()
return f'Your device "{device}", saw "{face}" {kind}.\nThere are currently {num_in} people' \
f' inside this property.'
@authenticate_device
def log(request, data: dict = None, file=None, auth_res=None):
try:
face_id = data['face_id'] if not isinstance(
data['face_id'], list) else data['face_id'][0]
face = models.Face.objects.get(id=face_id)
kind = data['kind'] if not isinstance(
data['kind'], list) else data['kind'][0]
device = auth_res['device']
image = Image.open(file).convert('RGB') if file is not None else None
log = models.Log.save_pil(
face=face, device=device, kind=kind, image=image)
if settings.GMAIL:
send_mail(subject='Surveillance Log',
message=mail_message(log),
from_email=settings.GMAIL,
recipient_list=[device.user.email],
fail_silently=True)
return JsonResponse(data=utils.base_response(
ok=True, message='Logged successfully', response=dict(
in_count=log.device.inside_count(), name='Unknown' if not log.face.name else log.face.name)
))
except KeyError:
return JsonResponse(
data=utils.base_response(message='Both `face_id` and `kind` are expected to be specified', ok=False))
except (models.models.ObjectDoesNotExist,):
return JsonResponse(data=utils.base_response(message='Invalid `face_id` is specified', ok=False))
|
from setuptools import setup
import sys
import os
if sys.version_info.major < 3:
raise Exception("python3 is required to run this script")
# also cleanup the info.json file before building
if os.path.exists('totpauth/database/info.json'):
os.remove('totpauth/database/info.json')
open('totpauth/database/info.json', 'w')
setup(
name='totp-cli',
version='1.0',
description='A CLI tool to generate Time-Based One Time Passwords (TOTP)',
author='Haider Ali Khichi',
author_email='khichihaider@gmail.com',
license='MIT',
url='https://github.com/candh/totp-cli',
keywords='totp otp 2fa cli tools two factor authentication google authenticator',
install_requires=['termcolor', 'tinydb', 'keyring', 'pyotp'],
packages=['totpauth'],
entry_points = {
'console_scripts': [
'totp=totpauth.totp:main'
]
},
package_data = {
'totpauth': ['database/info.json']
},
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Security',
'Topic :: Security :: Cryptography'
]
)
|
from . import *
class Album(Base):
__tablename__ = "album_view"
id = Column(Integer, primary_key=True)
fullname = Column(Text)
name = Column(Text)
prefix = Column(Text)
|
class Solution:
def invalidTransactions(self, transactions):
helper = []
helper_back = []
for transaction in transactions:
tmp_transaction = transaction.split(",")
helper.append(tmp_transaction)
for record in helper:
if int(record[2]) > 1000:
helper_back.append(",".join(record))
else:
for record2 in helper:
if record[0] == record2[0] and record[3] != record2[3] and abs(int(record[1])-int(record2[1])) <= 60:
helper_back.append(",".join(record))
break
return helper_back
slu = Solution()
print(slu.invalidTransactions(["alice,20,800,mtv","alice,50,1200,mtv"]))
|
# flake8: noqa
import os
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.data import ToTensor
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS
class DistilRunner(dl.Runner):
def handle_batch(self, batch):
x, y = batch
self.model["teacher"].eval() # let's manually set teacher model to eval mode
with torch.no_grad():
t_logits = self.model["teacher"](x)
s_logits = self.model["student"](x)
self.batch = {
"t_logits": t_logits,
"s_logits": s_logits,
"targets": y,
"s_logprobs": F.log_softmax(s_logits, dim=-1),
"t_probs": F.softmax(t_logits, dim=-1),
}
def train_experiment(device, engine=None):
with TemporaryDirectory() as logdir:
teacher = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
model = {"teacher": teacher, "student": student}
criterion = {"cls": nn.CrossEntropyLoss(), "kl": nn.KLDivLoss(reduction="batchmean")}
optimizer = optim.Adam(student.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
}
runner = DistilRunner()
# model training
runner.train(
engine=engine or dl.DeviceEngine(device),
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
logdir=logdir,
verbose=False,
callbacks=[
dl.AccuracyCallback(
input_key="t_logits", target_key="targets", num_classes=2, prefix="teacher_"
),
dl.AccuracyCallback(
input_key="s_logits", target_key="targets", num_classes=2, prefix="student_"
),
dl.CriterionCallback(
input_key="s_logits",
target_key="targets",
metric_key="cls_loss",
criterion_key="cls",
),
dl.CriterionCallback(
input_key="s_logprobs",
target_key="t_probs",
metric_key="kl_div_loss",
criterion_key="kl",
),
dl.MetricAggregationCallback(
metric_key="loss", metrics=["kl_div_loss", "cls_loss"], mode="mean"
),
dl.OptimizerCallback(metric_key="loss", model_key="student"),
dl.CheckpointCallback(
logdir=logdir,
loader_key="valid",
metric_key="loss",
minimize=True,
save_n_best=3,
),
],
)
# Torch
def test_distillation_on_cpu():
train_experiment("cpu")
@mark.skipif(not IS_CUDA_AVAILABLE, reason="CUDA device is not available")
def test_distillation_on_torch_cuda0():
train_experiment("cuda:0")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_cuda1():
train_experiment("cuda:1")
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_dp():
train_experiment(None, dl.DataParallelEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason="No CUDA>=2 found",
)
def test_distillation_on_torch_ddp():
train_experiment(None, dl.DistributedDataParallelEngine())
# AMP
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason="No CUDA or AMP found",
)
def test_distillation_on_amp():
train_experiment(None, dl.AMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_dp():
train_experiment(None, dl.DataParallelAMPEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
reason="No CUDA>=2 or AMP found",
)
def test_distillation_on_amp_ddp():
train_experiment(None, dl.DistributedDataParallelAMPEngine())
# APEX
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason="No CUDA or Apex found",
)
def test_distillation_on_apex():
train_experiment(None, dl.APEXEngine())
@mark.skipif(
not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
reason="No CUDA>=2 or Apex found",
)
def test_distillation_on_apex_dp():
train_experiment(None, dl.DataParallelAPEXEngine())
# @mark.skipif(
# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
# reason="No CUDA>=2 or Apex found",
# )
# def test_distillation_on_apex_ddp():
# train_experiment(None, dl.DistributedDataParallelApexEngine())
|
import json
import requests
import pprint
def api_get_request(url):
# In this exercise, you want to call the last.fm API to get a list of the
# top artists in Spain.
#
# Once you've done this, return the name of the number 1 top artist in Spain.
data = requests.get(url).text
data = json.loads(data)
#country_data = data['country']
pp = pprint.PrettyPrinter(depth = 4)
#pp.pprint(data)
top_artists = data['topartists']['artist']
#[e['name'] for e in top_artists]
return top_artists[0]['name'] # return the top artist in Spain
|
from infynipy.models.group import ReferrerGroup
from .. import IntegrationTest, vcrr
class TestReferrerGroup(IntegrationTest):
@vcrr.use_cassette
def test_referrer_group_get_multiple(self):
for group in self.infynity.referrer_groups:
assert isinstance(group, ReferrerGroup)
@vcrr.use_cassette
def test_referrer_group_create(self):
data = {
"group_name": "Test3 Group",
"broker_id": 20041
}
group_id = self.infynity.referrer_group(data=data).create()
assert isinstance(group_id, str)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ALBERT transformer-based text encoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.networks import albert_encoder
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class AlbertEncoderTest(keras_parameterized.TestCase):
def tearDown(self):
super(AlbertEncoderTest, self).tearDown()
tf.keras.mixed_precision.set_global_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="default", expected_dtype=tf.float32),
dict(testcase_name="with_float16_dtype", expected_dtype=tf.float16),
)
def test_network_creation(self, expected_dtype):
hidden_size = 32
sequence_length = 21
kwargs = dict(
vocab_size=100,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=3)
if expected_dtype == tf.float16:
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(**kwargs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertEqual(tf.float32, data.dtype)
self.assertEqual(expected_dtype, pooled.dtype)
# ALBERT has additonal 'embedding_hidden_mapping_in' weights and
# it shares transformer weights.
self.assertNotEmpty(
[x for x in test_network.weights if "embedding_projection/" in x.name])
self.assertNotEmpty(
[x for x in test_network.weights if "transformer/" in x.name])
self.assertEmpty(
[x for x in test_network.weights if "transformer/layer" in x.name])
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
num_layers = 3
# Create a small TransformerEncoder for testing.
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
list_outputs = model.predict([word_id_data, mask_data, type_id_data])
# Creates a TransformerEncoder with max_sequence_length != sequence_length
max_sequence_length = 128
test_network = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types)
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
# Tests dictionary outputs.
test_network_dict = albert_encoder.AlbertEncoder(
vocab_size=vocab_size,
embedding_width=8,
hidden_size=hidden_size,
max_sequence_length=max_sequence_length,
num_attention_heads=2,
num_layers=num_layers,
type_vocab_size=num_types,
dict_outputs=True)
_ = test_network_dict([word_ids, mask, type_ids])
test_network_dict.set_weights(test_network.get_weights())
list_outputs = test_network([word_id_data, mask_data, type_id_data])
dict_outputs = test_network_dict(
dict(
input_word_ids=word_id_data,
input_mask=mask_data,
input_type_ids=type_id_data))
self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])
self.assertLen(dict_outputs["pooled_output"], num_layers)
def test_serialize_deserialize(self):
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Create a network object that sets all of its config options.
kwargs = dict(
vocab_size=100,
embedding_width=8,
hidden_size=32,
num_layers=3,
num_attention_heads=2,
max_sequence_length=21,
type_vocab_size=12,
intermediate_size=1223,
activation="relu",
dropout_rate=0.05,
attention_dropout_rate=0.22,
initializer="glorot_uniform")
network = albert_encoder.AlbertEncoder(**kwargs)
expected_config = dict(kwargs)
expected_config["activation"] = tf.keras.activations.serialize(
tf.keras.activations.get(expected_config["activation"]))
expected_config["initializer"] = tf.keras.initializers.serialize(
tf.keras.initializers.get(expected_config["initializer"]))
self.assertEqual(network.get_config(), expected_config)
# Create another network object from the first object's config.
new_network = (
albert_encoder.AlbertEncoder.from_config(
network.get_config()))
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/python3
# ------------------------------------------------------------------------------
"""@package ble_lights.py
Sets the attached light values and notifies when they change.
"""
# ------------------------------------------------------------------------------
# Kris Dunning ippie52@gmail.com 2020.
# ------------------------------------------------------------------------------
import pybleno as ble
import wiringpi
from typing import Dict
from sys import exit
from time import sleep
from colour_printer import ColourPrinter
class KrisCharacteristic(ColourPrinter, ble.Characteristic):
"""
Provides the base for debugging a characteristic
"""
def __init__(self, settings: Dict[str, any], name: str, colour: str) -> None:
"""
Initialises the object
"""
ble.Characteristic.__init__(self, settings)
ColourPrinter.__init__(colour, name)
class UnlockChar(KrisCharacteristic):
"""
Provides the characteristic for the UnlockChar
"""
def __init__(self, uuid: str) -> None:
"""
Constructs ths UnlockChar
"""
self._changeObservers = {}
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['write'],
'value': ''
},
'UnlockChar',
ColourPrinter.GREEN
)
self._value = ''
# def addObserver(self, name: str, observer) -> None:
# """
# Custom observer to turn on an LED
# """
# self.print(f'Adding observer for {name}.')
# self._changeObservers[name] = observer
# def removeObserver(self, name: str) -> None:
# if name in self._changeObservers.keys():
# self.print(f'Removing observer {name}.')
# del self._changeObservers[name]
# else:
# self.print(f'Could not find observer {name} to remove.')
# def onReadRequest(self, offset, callback):
# """
# Handles the read request for this characteristic
# """
# self.print(f'Read request received, offset {offset}')
# callback(ble.Characteristic.RESULT_SUCCESS, self._value)
def onWriteRequest(self, data, offset, withoutResponse, callback):
"""
Handles the write request
"""
self.print(f'Write request received, data: {data}, offset: {offset}')
if data != self._value[0]:
self.print('The value has changed - Signal any listeners')
for key, observer in self._changeObservers.items():
self.print(f'Signalling observer {key}')
observer(self._value[0])
self._value = value
class StatusChar(KrisCharacteristic):
"""
Provides the characteristic for an LED
"""
def __init__(self, uuid: str, led: int) -> None:
"""
Constructs the StatusChar
"""
self._led = led
self._value = wiringpi.digitalRead(self._led)
KrisCharacteristic.__init__(self, {
'uuid': uuid,
'properties': ['notify'],
'value': self._value
},
'StatusChar',
ColourPrinter.GOLD
)
self._updateValueCallbacks = None
def onSubscribe(self, maxValueSize: int, updateValueCallback) -> None:
"""
Sets the update value callback
"""
self.print('New subscriber added.')
self._updateValueCallback = updateValueCallback
def onUnsubscribe(self) -> None:
"""
Removes the update value callback
"""
self.print('Subscriber removed')
self._updateValueCallback = None
# def set(self, new_value: int):
# """
# Sets the value of the LED
# """
# new_value = 0 if new_value == 0 else 1
# wiringpi.digitalWrite(self._led, new_value)
# self._value = new_value
def onStateChange(state: str) -> None:
"""
The state change handler function
"""
global server
print(f'on -> State Change: {state}')
if state == 'poweredOn':
server.startAdvertising('Kris Service?', ['FF10'])
else:
server.stopAdvertising()
def onAdvertisingStart(error: bool) -> None:
"""
The advertising handler function
"""
print(f'on -> Advertising Start: {error}')
if not error:
global server
status = StatusChar('FF12')
switch = UnlockChar('FF11')
switch.addObserver('FF12', status.set)
server.setServices([
ble.BlenoPrimaryService({
'uuid': 'FF10',
'characteristics': [status, switch]
})
]
)
RED_GPIO = 0
GRN_GPIO = 2
BLU_GPIO = 3
LED_SEQUENCE = [RED_GPIO, GRN_GPIO, BLU_GPIO]
BTN_GPIO = 1
wiringpi.wiringPiSetup() # For GPIO pin numbering
for led in LED_SEQUENCE:
wiringpi.pinMode(led, 1)
wiringpi.digitalWrite(led, 0)
wiringpi.pinMode(BTN_GPIO, 0)
cp = ColourPrinter(ColourPrinter.SILVER, 'Script')
cp.print('Creating the server...')
server = ble.Bleno()
cp.print('Binding the onStateChange handler')
server.on('stateChange', onStateChange)
cp.print('Binding the onAdvertisingStart handler')
server.on('advertisingStart', onAdvertisingStart)
cp.print('Starting the server...')
server.start()
running = True
while running:
try:
sleep(0.1)
except KeyboardInterrupt:
cp.print('Polite exit.')
running = False
server.stopAdvertising()
server.disconnect()
|
# Copyright 2018-present Kensho Technologies, LLC.
import six
from ..blocks import Filter, GlobalOperationsStart
from ..ir_lowering_common import (extract_optional_location_root_info,
extract_simple_optional_location_info,
lower_context_field_existence, merge_consecutive_filter_clauses,
optimize_boolean_expression_comparisons, remove_end_optionals)
from .ir_lowering import (lower_backtrack_blocks,
lower_folded_coerce_types_into_filter_blocks,
lower_has_substring_binary_compositions,
remove_backtrack_blocks_from_fold,
rewrite_binary_composition_inside_ternary_conditional,
truncate_repeated_single_step_traversals,
truncate_repeated_single_step_traversals_in_sub_queries)
from ..ir_sanity_checks import sanity_check_ir_blocks_from_frontend
from .between_lowering import lower_comparisons_to_between
from .optional_traversal import (collect_filters_to_first_location_occurrence,
convert_optional_traversals_to_compound_match_query,
lower_context_field_expressions, prune_non_existent_outputs)
from ..match_query import convert_to_match_query
from ..workarounds import (orientdb_class_with_while, orientdb_eval_scheduling,
orientdb_query_execution)
from .utils import construct_where_filter_predicate
##############
# Public API #
##############
def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None):
"""Lower the IR into an IR form that can be represented in MATCH queries.
Args:
ir_blocks: list of IR blocks to lower into MATCH-compatible form
query_metadata_table: QueryMetadataTable object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
MatchQuery object containing the IR blocks organized in a MATCH-like structure
"""
sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table)
# Construct the mapping of each location to its corresponding GraphQL type.
location_types = {
location: location_info.type
for location, location_info in query_metadata_table.registered_locations
}
# Compute the set of all locations that have associated type coercions.
coerced_locations = {
location
for location, location_info in query_metadata_table.registered_locations
if location_info.coerced_from_type is not None
}
# Extract information for both simple and complex @optional traverses
location_to_optional_results = extract_optional_location_root_info(ir_blocks)
complex_optional_roots, location_to_optional_roots = location_to_optional_results
simple_optional_root_info = extract_simple_optional_location_info(
ir_blocks, complex_optional_roots, location_to_optional_roots)
ir_blocks = remove_end_optionals(ir_blocks)
# Append global operation block(s) to filter out incorrect results
# from simple optional match traverses (using a WHERE statement)
if len(simple_optional_root_info) > 0:
where_filter_predicate = construct_where_filter_predicate(
query_metadata_table, simple_optional_root_info)
ir_blocks.insert(-1, GlobalOperationsStart())
ir_blocks.insert(-1, Filter(where_filter_predicate))
# These lowering / optimization passes work on IR blocks.
ir_blocks = lower_context_field_existence(ir_blocks, query_metadata_table)
ir_blocks = optimize_boolean_expression_comparisons(ir_blocks)
ir_blocks = rewrite_binary_composition_inside_ternary_conditional(ir_blocks)
ir_blocks = merge_consecutive_filter_clauses(ir_blocks)
ir_blocks = lower_has_substring_binary_compositions(ir_blocks)
ir_blocks = orientdb_eval_scheduling.workaround_lowering_pass(ir_blocks, query_metadata_table)
# Here, we lower from raw IR blocks into a MatchQuery object.
# From this point on, the lowering / optimization passes work on the MatchQuery representation.
match_query = convert_to_match_query(ir_blocks)
match_query = lower_comparisons_to_between(match_query)
match_query = lower_backtrack_blocks(match_query, location_types)
match_query = truncate_repeated_single_step_traversals(match_query)
match_query = orientdb_class_with_while.workaround_type_coercions_in_recursions(match_query)
# Optimize and lower the IR blocks inside @fold scopes.
new_folds = {
key: merge_consecutive_filter_clauses(
remove_backtrack_blocks_from_fold(
lower_folded_coerce_types_into_filter_blocks(folded_ir_blocks)
)
)
for key, folded_ir_blocks in six.iteritems(match_query.folds)
}
match_query = match_query._replace(folds=new_folds)
compound_match_query = convert_optional_traversals_to_compound_match_query(
match_query, complex_optional_roots, location_to_optional_roots)
compound_match_query = prune_non_existent_outputs(compound_match_query)
compound_match_query = collect_filters_to_first_location_occurrence(compound_match_query)
compound_match_query = lower_context_field_expressions(compound_match_query)
compound_match_query = truncate_repeated_single_step_traversals_in_sub_queries(
compound_match_query)
compound_match_query = orientdb_query_execution.expose_ideal_query_execution_start_points(
compound_match_query, location_types, coerced_locations)
return compound_match_query
|
# -*- coding: utf-8 -*-
r"""
`p`-adic `L`-functions of elliptic curves
To an elliptic curve `E` over the rational numbers and a prime `p`, one
can associate a `p`-adic L-function; at least if `E` does not have additive
reduction at `p`. This function is defined by interpolation of L-values of `E`
at twists. Through the main conjecture of Iwasawa theory it should also be
equal to a characteristic series of a certain Selmer group.
If `E` is ordinary, then it is an element of the Iwasawa algebra
`\Lambda(\ZZ_p^\times) = \ZZ_p[\Delta][\![T]\!]`, where `\Delta` is the group
of `(p-1)`-st roots of unity in `\ZZ_p^\times`, and `T = [\gamma] - 1` where
`\gamma = 1 + p` is a generator of `1 + p\ZZ_p`. (There is a slightly different
description for `p = 2`.)
One can decompose this algebra as the direct product of the subalgebras
corresponding to the characters of `\Delta`, which are simply the powers
`\tau^\eta` (`0 \le \eta \le p-2`) of the Teichmueller character `\tau: \Delta
\to \ZZ_p^\times`. Projecting the L-function into these components gives `p-1`
power series in `T`, each with coefficients in `\ZZ_p`.
If `E` is supersingular, the series will have coefficients in a quadratic
extension of `\QQ_p`, and the coefficients will be unbounded. In this case we
have only implemented the series for `\eta = 0`. We have also implemented the
`p`-adic L-series as formulated by Perrin-Riou [BP]_, which has coefficients in
the Dieudonné module `D_pE = H^1_{dR}(E/\QQ_p)` of `E`. There is a different
description by Pollack [Po]_ which is not available here.
According to the `p`-adic version of the Birch and Swinnerton-Dyer conjecture
[MTT]_, the order of vanishing of the `L`-function at the trivial character
(i.e. of the series for `\eta = 0` at `T = 0`) is just the rank of `E(\QQ)`, or
this rank plus one if the reduction at `p` is split multiplicative.
See [SW]_ for more details.
REFERENCES:
- [MTT]_
- [BP]_
.. [Po] Robert Pollack, *On the `p`-adic `L`-function of a modular form
at a supersingular prime*, Duke Math. J. 118 (2003), no. 3, 523-558.
- [SW]_
AUTHORS:
- William Stein (2007-01-01): first version
- Chris Wuthrich (22/05/2007): changed minor issues and added supersingular things
- Chris Wuthrich (11/2008): added quadratic_twists
- David Loeffler (01/2011): added nontrivial Teichmueller components
"""
######################################################################
# Copyright (C) 2007 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
######################################################################
from __future__ import print_function
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.padics.factory import Qp
from sage.rings.infinity import infinity
from sage.rings.all import LaurentSeriesRing, PowerSeriesRing, PolynomialRing, Integers
from sage.rings.integer import Integer
from sage.arith.all import valuation, binomial, kronecker_symbol, gcd, prime_divisors
from sage.structure.sage_object import SageObject
from sage.structure.richcmp import richcmp_method, richcmp
from sage.misc.all import verbose, denominator, get_verbose
import sage.arith.all as arith
from sage.modules.free_module_element import vector
import sage.matrix.all as matrix
import sage.schemes.hyperelliptic_curves.monsky_washnitzer
from sage.functions.log import log
from sage.functions.other import floor
from sage.misc.decorators import rename_keyword
@richcmp_method
class pAdicLseries(SageObject):
r"""
The `p`-adic L-series of an elliptic curve.
EXAMPLES:
An ordinary example::
sage: e = EllipticCurve('389a')
sage: L = e.padic_lseries(5)
sage: L.series(0)
Traceback (most recent call last):
...
ValueError: n (=0) must be a positive integer
sage: L.series(1)
O(T^1)
sage: L.series(2)
O(5^4) + O(5)*T + (4 + O(5))*T^2 + (2 + O(5))*T^3 + (3 + O(5))*T^4 + O(T^5)
sage: L.series(3, prec=10)
O(5^5) + O(5^2)*T + (4 + 4*5 + O(5^2))*T^2 + (2 + 4*5 + O(5^2))*T^3 + (3 + O(5^2))*T^4 + (1 + O(5))*T^5 + O(5)*T^6 + (4 + O(5))*T^7 + (2 + O(5))*T^8 + O(5)*T^9 + O(T^10)
sage: L.series(2,quadratic_twist=-3)
2 + 4*5 + 4*5^2 + O(5^4) + O(5)*T + (1 + O(5))*T^2 + (4 + O(5))*T^3 + O(5)*T^4 + O(T^5)
A prime p such that E[p] is reducible::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.series(1)
5 + O(5^2) + O(T)
sage: L.series(2)
5 + 4*5^2 + O(5^3) + O(5^0)*T + O(5^0)*T^2 + O(5^0)*T^3 + O(5^0)*T^4 + O(T^5)
sage: L.series(3)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
An example showing the calculation of nontrivial Teichmueller twists::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(7)
sage: lp.series(4,eta=1)
3 + 7^3 + 6*7^4 + 3*7^5 + O(7^6) + (2*7 + 7^2 + O(7^3))*T + (1 + 5*7^2 + O(7^3))*T^2 + (4 + 4*7 + 4*7^2 + O(7^3))*T^3 + (4 + 3*7 + 7^2 + O(7^3))*T^4 + O(T^5)
sage: lp.series(4,eta=2)
5 + 6*7 + 4*7^2 + 2*7^3 + 3*7^4 + 2*7^5 + O(7^6) + (6 + 4*7 + 7^2 + O(7^3))*T + (3 + 2*7^2 + O(7^3))*T^2 + (1 + 4*7 + 7^2 + O(7^3))*T^3 + (6 + 6*7 + 6*7^2 + O(7^3))*T^4 + O(T^5)
sage: lp.series(4,eta=3)
O(7^6) + (5 + 4*7 + 2*7^2 + O(7^3))*T + (6 + 5*7 + 2*7^2 + O(7^3))*T^2 + (5*7 + O(7^3))*T^3 + (7 + 4*7^2 + O(7^3))*T^4 + O(T^5)
(Note that the last series vanishes at `T = 0`, which is consistent with ::
sage: E.quadratic_twist(-7).rank()
1
This proves that `E` has rank 1 over `\QQ(\zeta_7)`.)
TESTS:
The load-dumps test::
sage: lp = EllipticCurve('11a').padic_lseries(5)
sage: lp == loads(dumps(lp))
True
"""
def __init__(self, E, p, implementation = 'eclib', normalize='L_ratio'):
r"""
INPUT:
- ``E`` -- an elliptic curve
- ``p`` -- a prime of good reduction
- ``implementation`` -- string (default:'eclib'); either 'eclib' to use
John Cremona's ``eclib`` for the computation of modular
symbols or 'sage' to use Sage's own implementation
- ``normalize`` -- ``'L_ratio'`` (default), ``'period'`` or ``'none'``;
this is describes the way the modular symbols
are normalized. See ``modular_symbol`` of
an elliptic curve over Q for more details.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(3)
sage: Lp.series(2,prec=3)
2 + 3 + 3^2 + 2*3^3 + O(3^4) + (1 + O(3))*T + (1 + O(3))*T^2 + O(T^3)
"""
self._E = E
self._p = ZZ(p)
self._normalize = normalize
if implementation not in ['eclib', 'sage']:
raise ValueError("Implementation should be one of 'eclib' or 'sage'")
self._implementation = implementation
if not self._p.is_prime():
raise ValueError("p (=%s) must be a prime" % p)
if E.conductor() % (self._p)**2 == 0:
raise NotImplementedError("p (=%s) must be a prime of semi-stable reduction" % p)
try :
E.label()
except RuntimeError :
print("Warning : Curve outside Cremona's table. Computations of modular symbol space might take very long !")
self._modular_symbol = E.modular_symbol(sign=+1,
implementation=implementation,
normalize=normalize)
def __add_negative_space(self):
r"""
A helper function not designed for direct use.
This function add the attribute ``_negative_modular_symbol`` to the class. This may take time
and will only be needed when twisting with negative fundamental discriminants.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(5)
sage: lp.modular_symbol(1/7,sign=-1) #indirect doctest
-1/2
"""
self._negative_modular_symbol = self._E.modular_symbol(sign=-1, implementation="sage", normalize=self._normalize)
def __richcmp__(self, other, op):
r"""
Compare ``self`` and ``other``.
TESTS::
sage: lp1 = EllipticCurve('11a1').padic_lseries(5)
sage: lp2 = EllipticCurve('11a1').padic_lseries(7)
sage: lp3 = EllipticCurve('11a2').padic_lseries(5)
sage: lp1 == lp1
True
sage: lp1 == lp2
False
sage: lp1 == lp3
False
"""
if type(self) != type(other):
return NotImplemented
return richcmp((self._E, self._p), (other._E, other._p), op)
def elliptic_curve(self):
r"""
Return the elliptic curve to which this `p`-adic L-series is associated.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.elliptic_curve()
Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field
"""
return self._E
def prime(self):
r"""
Return the prime `p` as in 'p-adic L-function'.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.prime()
5
"""
return self._p
def _repr_(self):
r"""
Return print representation.
EXAMPLES::
sage: e = EllipticCurve('37a')
sage: e.padic_lseries(3)._repr_()
'3-adic L-series of Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field'
sage: e.padic_lseries(3,normalize='none')
3-adic L-series of Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field (not normalized)
sage: L = e.padic_lseries(3,normalize='none')
sage: L.rename('(factor)*L_3(T)')
sage: L
(factor)*L_3(T)
"""
s = "%s-adic L-series of %s" % (self._p, self._E)
if not self._normalize == 'L_ratio':
s += ' (not normalized)'
return s
def modular_symbol(self, r, sign=+1, quadratic_twist=+1):
r"""
Return the modular symbol evaluated at `r`.
This is used to compute this `p`-adic L-series.
Note that the normalization is not correct at this
stage: use ``_quotient_of periods_to_twist`` to correct.
Note also that this function does not check if the condition
on the quadratic_twist=D is satisfied. So the result will only
be correct if for each prime `\ell` dividing `D`, we have
`ord_{\ell}(N)<= ord_{\ell}(D)`, where `N` is the conductor of the curve.
INPUT:
- ``r`` -- a cusp given as either a rational number or oo
- ``sign`` -- +1 (default) or -1 (only implemented without twists)
- ``quadratic_twist`` -- a fundamental discriminant of a quadratic field or +1 (default)
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(5)
sage: [lp.modular_symbol(r) for r in [0,1/5,oo,1/11]]
[1/5, 6/5, 0, 0]
sage: [lp.modular_symbol(r,sign=-1) for r in [0,1/3,oo,1/7]]
[0, 1/2, 0, -1/2]
sage: [lp.modular_symbol(r,quadratic_twist=-20) for r in [0,1/5,oo,1/11]]
[1, 1, 0, 1/2]
sage: E = EllipticCurve('20a1')
sage: Et = E.quadratic_twist(-4)
sage: lpt = Et.padic_lseries(5)
sage: eta = lpt._quotient_of_periods_to_twist(-4)
sage: lpt.modular_symbol(0) == lp.modular_symbol(0,quadratic_twist=-4) / eta
True
"""
if quadratic_twist == +1 :
if sign == +1 :
return self._modular_symbol(r)
elif sign == -1:
try:
m = self._negative_modular_symbol
except (KeyError, AttributeError):
if not hasattr(self, '_modular_symbol_negative'):
self.__add_negative_space()
m = self._negative_modular_symbol
return m(r)
else :
D = quadratic_twist
if sign == -1:
raise NotImplementedError("Quadratic twists for negative modular symbols are not yet implemented.")
if D > 0:
m = self._modular_symbol
return sum([kronecker_symbol(D, u) * m(r + ZZ(u) / D)
for u in range(1, D)])
else:
try:
m = self._negative_modular_symbol
except (KeyError, AttributeError):
if not hasattr(self, '_modular_symbol_negative'):
self.__add_negative_space()
m = self._negative_modular_symbol
return -sum([kronecker_symbol(D, u) * m(r + ZZ(u) / D)
for u in range(1, -D)])
def measure(self, a, n, prec, quadratic_twist=+1, sign = +1):
r"""
Return the measure on `\ZZ_p^{\times}` defined by
`\mu_{E,\alpha}^+ ( a + p^n \ZZ_p ) =
\frac{1}{\alpha^n} \left [\frac{a}{p^n}\right]^{+} -
\frac{1}{\alpha^{n+1}} \left[\frac{a}{p^{n-1}}\right]^{+}`
where `[\cdot]^{+}` is the modular symbol. This is used to define
this `p`-adic L-function (at least when the reduction is good).
The optional argument ``sign`` allows the minus symbol `[\cdot]^{-}` to
be substituted for the plus symbol.
The optional argument ``quadratic_twist`` replaces `E` by the twist in
the above formula, but the twisted modular symbol is computed using a
sum over modular symbols of `E` rather than finding the modular symbols
for the twist. Quadratic twists are only implemented if the sign is
`+1`.
Note that the normalization is not correct at this
stage: use ``_quotient_of periods`` and ``_quotient_of periods_to_twist``
to correct.
Note also that this function does not check if the condition
on the ``quadratic_twist=D`` is satisfied. So the result will only
be correct if for each prime `\ell` dividing `D`, we have
`ord_{\ell}(N)<= ord_{\ell}(D)`, where `N` is the conductor of the curve.
INPUT:
- ``a`` -- an integer
- ``n`` -- a non-negative integer
- ``prec`` -- an integer
- ``quadratic_twist`` (default = 1) -- a fundamental discriminant of a quadratic field,
should be coprime to the conductor of `E`
- ``sign`` (default = 1) -- an integer, which should be `\pm 1`.
EXAMPLES::
sage: E = EllipticCurve('37a')
sage: L = E.padic_lseries(5)
sage: L.measure(1,2, prec=9)
2 + 3*5 + 4*5^3 + 2*5^4 + 3*5^5 + 3*5^6 + 4*5^7 + 4*5^8 + O(5^9)
sage: L.measure(1,2, quadratic_twist=8,prec=15)
O(5^15)
sage: L.measure(1,2, quadratic_twist=-4,prec=15)
4 + 4*5 + 4*5^2 + 3*5^3 + 2*5^4 + 5^5 + 3*5^6 + 5^8 + 2*5^9 + 3*5^12 + 2*5^13 + 4*5^14 + O(5^15)
sage: E = EllipticCurve('11a1')
sage: a = E.quadratic_twist(-3).padic_lseries(5).measure(1,2,prec=15)
sage: b = E.padic_lseries(5).measure(1,2, quadratic_twist=-3,prec=15)
sage: a == b * E.padic_lseries(5)._quotient_of_periods_to_twist(-3)
True
"""
s = ZZ(sign)
if s not in [1, -1]:
raise ValueError("Sign must be +- 1")
if quadratic_twist != 1 and s != 1:
raise NotImplementedError("Quadratic twists not implemented for sign -1")
if quadratic_twist < 0:
s = ZZ(-1)
try:
p, alpha, z, w, f = self.__measure_data[(n, prec, s)]
except (KeyError, AttributeError):
if not hasattr(self, '__measure_data'):
self.__measure_data = {}
p = self._p
alpha = self.alpha(prec=prec)
z = 1/(alpha**n)
w = p**(n-1)
if s == +1 :
f = self._modular_symbol
else :
try :
f = self._negative_modular_symbol
except (KeyError, AttributeError):
if not hasattr(self, '_modular_symbol_negative'):
self.__add_negative_space()
f = self._negative_modular_symbol
self.__measure_data[(n, prec, s)] = (p, alpha, z, w, f)
if quadratic_twist == 1:
if self._E.conductor() % p == 0:
return z * f(a/(p*w))
return z * ( f(a/(p*w)) - f(a/w) / alpha)
else:
D = quadratic_twist
if self.is_ordinary():
chip = kronecker_symbol(D,p)
else:
chip = 1 # alpha is +- sqrt(-p) anyway
if self._E.conductor() % p == 0:
mu = chip**n * z * sum([kronecker_symbol(D,u) * f(a/(p*w)+ZZ(u)/D) for u in range(1,D.abs())])
else:
mu = chip**n * z * sum([kronecker_symbol(D,u) *( f(a/(p*w)+ZZ(u)/D) - chip /alpha * f(a/w+ZZ(u)/D) ) for u in range(1,D.abs())])
return s*mu
def alpha(self, prec=20):
r"""
Return a `p`-adic root `\alpha` of the polynomial `x^2 - a_p x
+ p` with `ord_p(\alpha) < 1`. In the ordinary case this is
just the unit root.
INPUT:
- ``prec`` -- positive integer, the `p`-adic precision of the root.
EXAMPLES:
Consider the elliptic curve 37a::
sage: E = EllipticCurve('37a')
An ordinary prime::
sage: L = E.padic_lseries(5)
sage: alpha = L.alpha(10); alpha
3 + 2*5 + 4*5^2 + 2*5^3 + 5^4 + 4*5^5 + 2*5^7 + 5^8 + 5^9 + O(5^10)
sage: alpha^2 - E.ap(5)*alpha + 5
O(5^10)
A supersingular prime::
sage: L = E.padic_lseries(3)
sage: alpha = L.alpha(10); alpha
alpha + O(alpha^21)
sage: alpha^2 - E.ap(3)*alpha + 3
O(alpha^22)
A reducible prime::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.alpha(5)
1 + 4*5 + 3*5^2 + 2*5^3 + 4*5^4 + O(5^5)
"""
try:
return self._alpha[prec]
except AttributeError:
self._alpha = {}
except KeyError:
pass
E = self._E
p = self._p
a_p = E.ap(p)
K = Qp(p, prec, print_mode='series')
if E.conductor() % p == 0:
self._alpha[prec] = K(a_p)
return K(a_p)
R = ZZ['x']
f = R([p, -a_p, 1])
if E.is_ordinary(p):
G = f.factor_padic(p, prec + 5)
for pr, e in G:
a = -pr[0]
if a.valuation() < 1:
self._alpha[prec] = K(a)
return K(a)
raise RuntimeError("bug in p-adic L-function alpha")
else: # supersingular case
f = f.change_ring(K)
A = K.extension(f, names="alpha")
a = A.gen()
self._alpha[prec] = a
return a
def order_of_vanishing(self):
r"""
Return the order of vanishing of this `p`-adic L-series.
The output of this function is provably correct, due to a
theorem of Kato [Ka]_.
.. NOTE:: currently `p` must be a prime of good ordinary reduction.
REFERENCES:
- [MTT]_
- [Ka]_
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(3)
sage: L.order_of_vanishing()
0
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.order_of_vanishing()
0
sage: L = EllipticCurve('37a').padic_lseries(5)
sage: L.order_of_vanishing()
1
sage: L = EllipticCurve('43a').padic_lseries(3)
sage: L.order_of_vanishing()
1
sage: L = EllipticCurve('37b').padic_lseries(3)
sage: L.order_of_vanishing()
0
sage: L = EllipticCurve('389a').padic_lseries(3)
sage: L.order_of_vanishing()
2
sage: L = EllipticCurve('389a').padic_lseries(5)
sage: L.order_of_vanishing()
2
sage: L = EllipticCurve('5077a').padic_lseries(5, implementation = 'eclib')
sage: L.order_of_vanishing()
3
"""
try:
return self.__ord
except AttributeError:
pass
if not self.is_ordinary():
raise NotImplementedError
E = self.elliptic_curve()
if not E.is_good(self.prime()):
raise ValueError("prime must be of good reduction")
r = E.rank()
n = 1
while True:
f = self.series(n)
v = f.valuation()
if v < n and v < r:
raise RuntimeError("while computing p-adic order of vanishing, got a contradiction: the curve is %s, the curve has rank %s, but the p-adic L-series vanishes to order <= %s" % (E, r, v))
if v == r:
self.__ord = v
return v
n += 1
def teichmuller(self, prec):
r"""
Return Teichmuller lifts to the given precision.
INPUT:
- ``prec`` - a positive integer.
OUTPUT:
- a list of `p`-adic numbers, the cached Teichmuller lifts
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(7)
sage: L.teichmuller(1)
[0, 1, 2, 3, 4, 5, 6]
sage: L.teichmuller(2)
[0, 1, 30, 31, 18, 19, 48]
"""
p = self._p
K = Qp(p, prec, print_mode='series')
return [Integer(0)] + \
[a.residue(prec).lift() for a in K.teichmuller_system()]
def _e_bounds(self, n, prec):
r"""
A helper function not designed for direct use.
It computes the valuations of the coefficients of `\omega_n = (1+T)^{p^n}-1`.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(2)
sage: Lp._e_bounds(1,10)
[+Infinity, 1, 0, 0, 0, 0, 0, 0, 0, 0]
sage: Lp._e_bounds(2,10)
[+Infinity, 2, 1, 1, 0, 0, 0, 0, 0, 0]
sage: Lp._e_bounds(3,10)
[+Infinity, 3, 2, 2, 1, 1, 1, 1, 0, 0]
sage: Lp._e_bounds(4,10)
[+Infinity, 4, 3, 3, 2, 2, 2, 2, 1, 1]
"""
# trac 10280: replace with new corrected code, note that the sequence has to be decreasing.
pn = self._p**n
enj = infinity
res = [enj]
for j in range(1,prec):
bino = valuation(binomial(pn,j),self._p)
if bino < enj:
enj = bino
res.append(enj)
return res
def _get_series_from_cache(self, n, prec, D, eta):
r"""
A helper function not designed for direct use.
It picks up the series in the cache if it has been previously computed.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp._pAdicLseries__series = {} # clear cached series
sage: Lp._get_series_from_cache(3,5,1,0)
sage: Lp.series(3,prec=5)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
sage: Lp._get_series_from_cache(3,5,1,0)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
"""
try:
return self.__series[(n,prec,D,eta)]
except AttributeError:
self.__series = {}
except KeyError:
for _n, _prec, _D, _eta in self.__series:
if _n == n and _D == D and _eta == eta and _prec >= prec:
return self.__series[(_n,_prec,_D,_eta)].add_bigoh(prec)
return None
def _set_series_in_cache(self, n, prec, D, eta, f):
r"""
A helper function not designed for direct use.
It picks up the series in the cache if it has been previously computed.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp.series(3,prec=5)
5 + 4*5^2 + 4*5^3 + O(5^4) + O(5)*T + O(5)*T^2 + O(5)*T^3 + O(5)*T^4 + O(T^5)
sage: Lp._set_series_in_cache(3,5,1,0,0)
sage: Lp.series(3,prec=5)
0
"""
self.__series[(n, prec, D, eta)] = f
def _quotient_of_periods_to_twist(self, D):
r"""
For a fundamental discriminant `D` of a quadratic number field this
computes the constant `\eta` such that
`\sqrt{\vert D\vert }\cdot\Omega_{E_D}^{+} =\eta\cdot \Omega_E^{sign(D)}`.
As in [MTT]_ page 40. This is either 1 or 2 unless the condition
on the twist is not satisfied, e.g. if we are 'twisting back' to a
semi-stable curve.
.. NOTE::
No check on precision is made, so this may fail for huge `D`.
EXAMPLES::
sage: E = EllipticCurve('37b1')
sage: lp = E.padic_lseries(3)
sage: lp._quotient_of_periods_to_twist(-20)
1
sage: lp._quotient_of_periods_to_twist(-4)
1
sage: lp._quotient_of_periods_to_twist(-3)
1
sage: lp._quotient_of_periods_to_twist(-8)
2
sage: lp._quotient_of_periods_to_twist(8)
2
sage: lp._quotient_of_periods_to_twist(5)
1
sage: lp._quotient_of_periods_to_twist(12)
1
sage: E = EllipticCurve('11a1')
sage: Et = E.quadratic_twist(-3)
sage: lpt = Et.padic_lseries(5)
sage: lpt._quotient_of_periods_to_twist(-3)
3
"""
from sage.functions.all import sqrt
# This function does not depend on p and could be moved out of this file but it is needed only here
# Note that the number of real components does not change by twisting.
if D == 1:
return 1
Et = self._E.quadratic_twist(D)
if D > 1:
qt = Et.period_lattice().basis()[0]/self._E.period_lattice().basis()[0]
qt *= sqrt(qt.parent()(D))
else:
qt = Et.period_lattice().basis()[1].imag()/self._E.period_lattice().basis()[0]
if Et.real_components() == 1:
qt *= 2
qt *= sqrt(qt.parent()(-D))
verbose('the real approximation is %s'%qt)
# we know from MTT that the result has a denominator 1
return QQ(int(round(8*qt)))/8
class pAdicLseriesOrdinary(pAdicLseries):
def series(self, n=2, quadratic_twist=+1, prec=5, eta=0):
r"""
Return the `n`-th approximation to the `p`-adic L-series, in the
component corresponding to the `\eta`-th power of the Teichmueller
character, as a power series in `T` (corresponding to `\gamma-1` with
`\gamma=1+p` as a generator of `1+p\ZZ_p`). Each coefficient is a
`p`-adic number whose precision is provably correct.
Here the normalization of the `p`-adic L-series is chosen
such that `L_p(E,1) = (1-1/\alpha)^2 L(E,1)/\Omega_E`
where `\alpha` is the unit root of the characteristic
polynomial of Frobenius on `T_pE` and `\Omega_E` is the
Néron period of `E`.
INPUT:
- ``n`` - (default: 2) a positive integer
- ``quadratic_twist`` - (default: +1) a fundamental discriminant of a
quadratic field, coprime to the conductor of the curve
- ``prec`` - (default: 5) maximal number of terms of the series to
compute; to compute as many as possible just give a very large
number for ``prec``; the result will still be correct.
- ``eta`` (default: 0) an integer (specifying the power of the
Teichmueller character on the group of roots of unity in
`\ZZ_p^\times`)
:meth:`power_series` is identical to ``series``.
EXAMPLES:
We compute some `p`-adic L-functions associated to the elliptic
curve 11a::
sage: E = EllipticCurve('11a')
sage: p = 3
sage: E.is_ordinary(p)
True
sage: L = E.padic_lseries(p)
sage: L.series(3)
2 + 3 + 3^2 + 2*3^3 + O(3^5) + (1 + 3 + O(3^2))*T + (1 + 2*3 + O(3^2))*T^2 + O(3)*T^3 + O(3)*T^4 + O(T^5)
Another example at a prime of bad reduction, where the
`p`-adic L-function has an extra 0 (compared to the non
`p`-adic L-function)::
sage: E = EllipticCurve('11a')
sage: p = 11
sage: E.is_ordinary(p)
True
sage: L = E.padic_lseries(p)
sage: L.series(2)
O(11^4) + (10 + O(11))*T + (6 + O(11))*T^2 + (2 + O(11))*T^3 + (5 + O(11))*T^4 + O(T^5)
We compute a `p`-adic L-function that vanishes to order 2::
sage: E = EllipticCurve('389a')
sage: p = 3
sage: E.is_ordinary(p)
True
sage: L = E.padic_lseries(p)
sage: L.series(1)
O(T^1)
sage: L.series(2)
O(3^4) + O(3)*T + (2 + O(3))*T^2 + O(T^3)
sage: L.series(3)
O(3^5) + O(3^2)*T + (2 + 2*3 + O(3^2))*T^2 + (2 + O(3))*T^3 + (1 + O(3))*T^4 + O(T^5)
Checks if the precision can be changed (:trac:`5846`)::
sage: L.series(3,prec=4)
O(3^5) + O(3^2)*T + (2 + 2*3 + O(3^2))*T^2 + (2 + O(3))*T^3 + O(T^4)
sage: L.series(3,prec=6)
O(3^5) + O(3^2)*T + (2 + 2*3 + O(3^2))*T^2 + (2 + O(3))*T^3 + (1 + O(3))*T^4 + (1 + O(3))*T^5 + O(T^6)
Rather than computing the `p`-adic L-function for the curve '15523a1', one can
compute it as a quadratic_twist::
sage: E = EllipticCurve('43a1')
sage: lp = E.padic_lseries(3)
sage: lp.series(2,quadratic_twist=-19)
2 + 2*3 + 2*3^2 + O(3^4) + (1 + O(3))*T + (1 + O(3))*T^2 + O(T^3)
sage: E.quadratic_twist(-19).label() # optional -- database_cremona_ellcurve
'15523a1'
This proves that the rank of '15523a1' is zero, even if ``mwrank`` can not determine this.
We calculate the `L`-series in the nontrivial Teichmueller components::
sage: L = EllipticCurve('110a1').padic_lseries(5)
sage: for j in [0..3]: print(L.series(4, eta=j))
O(5^6) + (2 + 2*5 + 2*5^2 + O(5^3))*T + (5 + 5^2 + O(5^3))*T^2 + (4 + 4*5 + 2*5^2 + O(5^3))*T^3 + (1 + 5 + 3*5^2 + O(5^3))*T^4 + O(T^5)
4 + 3*5 + 2*5^2 + 3*5^3 + 5^4 + O(5^6) + (1 + 3*5 + 4*5^2 + O(5^3))*T + (3 + 4*5 + 3*5^2 + O(5^3))*T^2 + (3 + 3*5^2 + O(5^3))*T^3 + (1 + 2*5 + 2*5^2 + O(5^3))*T^4 + O(T^5)
2 + O(5^6) + (1 + 5 + O(5^3))*T + (2 + 4*5 + 3*5^2 + O(5^3))*T^2 + (4 + 5 + 2*5^2 + O(5^3))*T^3 + (4 + O(5^3))*T^4 + O(T^5)
3 + 5 + 2*5^2 + 5^3 + 3*5^4 + 4*5^5 + O(5^6) + (1 + 2*5 + 4*5^2 + O(5^3))*T + (1 + 4*5 + O(5^3))*T^2 + (3 + 2*5 + 2*5^2 + O(5^3))*T^3 + (5 + 5^2 + O(5^3))*T^4 + O(T^5)
It should now also work with `p=2` (:trac:`20798`)::
sage: E = EllipticCurve("53a1")
sage: lp = E.padic_lseries(2)
sage: lp.series(7)
O(2^8) + (1 + 2^2 + 2^3 + O(2^5))*T + (1 + 2^3 + O(2^4))*T^2 + (2^2 + 2^3 + O(2^4))*T^3 + (2 + 2^2 + O(2^3))*T^4 + O(T^5)
sage: E = EllipticCurve("109a1")
sage: lp = E.padic_lseries(2)
sage: lp.series(6)
2^2 + 2^6 + O(2^7) + (2 + O(2^4))*T + O(2^3)*T^2 + (2^2 + O(2^3))*T^3 + (2 + O(2^2))*T^4 + O(T^5)
"""
n = ZZ(n)
if n < 1:
raise ValueError("n (=%s) must be a positive integer" % n)
if self._p == 2 and n == 1:
raise ValueError("n (=%s) must be a at least 2 if p is 2" % n)
if prec < 1:
raise ValueError("Insufficient precision (%s)" % prec)
# check if the conditions on quadratic_twist are satisfied
eta = ZZ(eta) % (self._p - 1)
D = ZZ(quadratic_twist)
if D != 1:
if eta != 0: raise NotImplementedError("quadratic twists only implemented for the 0th Teichmueller component")
if D % 4 == 0:
d = D//4
if not d.is_squarefree() or d % 4 == 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
else:
if not D.is_squarefree() or D % 4 != 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
if gcd(D,self._p) != 1:
raise ValueError("quadratic twist (=%s) must be coprime to p (=%s) "%(D,self._p))
if gcd(D,self._E.conductor())!= 1:
for ell in prime_divisors(D):
if valuation(self._E.conductor(),ell) > valuation(D,ell) :
raise ValueError("can not twist a curve of conductor (=%s) by the quadratic twist (=%s)."%(self._E.conductor(),D))
p = self._p
#verbose("computing L-series for p=%s, n=%s, and prec=%s"%(p,n,prec))
if prec == 1:
if eta == 0:
# trac 15737: if we only ask for the leading term we don't
# need to do any sum as L_p(E,0) = (1-1/alpha)^2 * m(0) (good case)
# set prec arbitrary to 20.
K = Qp(p, 20, print_mode='series')
R = PowerSeriesRing(K,'T',1)
L = self.modular_symbol(0, sign=+1, quadratic_twist=D)
chip = kronecker_symbol(D,p)
if self._E.conductor() % p == 0:
L *= 1 - chip/self.alpha()
else:
L *= (1-chip/self.alpha())**2
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
L = R(L, 1)
return L
else:
# here we need some sums anyway
bounds = self._prec_bounds(n,prec)
padic_prec = 20
else:
bounds = self._prec_bounds(n,prec)
padic_prec = max(bounds[1:]) + 5
verbose("using p-adic precision of %s"%padic_prec)
if p == 2:
res_series_prec = min(p**(n-2), prec)
else:
res_series_prec = min(p**(n-1), prec)
verbose("using series precision of %s"%res_series_prec)
ans = self._get_series_from_cache(n, res_series_prec,D,eta)
if not ans is None:
verbose("found series in cache")
return ans
K = QQ
R = PowerSeriesRing(K,'T',res_series_prec)
T = R(R.gen(),res_series_prec )
L = R(0)
one_plus_T_factor = R(1)
gamma_power = K(1)
teich = self.teichmuller(padic_prec)
if p == 2:
teich = [0, 1,-1]
gamma = K(5)
p_power = 2**(n-2)
a_range = 3
else:
teich = self.teichmuller(padic_prec)
gamma = K(1+ p)
p_power = p**(n-1)
a_range = p
si = 1-2*(eta % 2)
verbose("Now iterating over %s summands"%((p-1)*p_power))
verbose_level = get_verbose()
count_verb = 0
for j in range(p_power):
s = K(0)
if verbose_level >= 2 and j/p_power*100 > count_verb + 3:
verbose("%.2f percent done"%(float(j)/p_power*100))
count_verb += 3
for a in range(1,a_range):
b = teich[a] * gamma_power
s += teich[a]**eta * self.measure(b, n, padic_prec, quadratic_twist=D, sign=si).lift()
L += s * one_plus_T_factor
one_plus_T_factor *= 1+T
gamma_power *= gamma
verbose("the series before adjusting the precision is %s"%L)
# Now create series but with each coefficient truncated
# so it is proven correct:
K = Qp(p, padic_prec, print_mode='series')
R = PowerSeriesRing(K,'T',res_series_prec)
L = R(L,res_series_prec)
aj = L.list()
if len(aj) > 0:
aj = [aj[0].add_bigoh(padic_prec-2)] + \
[aj[j].add_bigoh(bounds[j]) for j in range(1,len(aj))]
L = R(aj,res_series_prec )
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
self._set_series_in_cache(n, res_series_prec, D, eta, L)
return L
power_series = series
def is_ordinary(self):
r"""
Return ``True`` if the elliptic curve that this L-function is attached
to is ordinary.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.is_ordinary()
True
"""
return True
def is_supersingular(self):
r"""
Return ``True`` if the elliptic curve that this L function is attached
to is supersingular.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(5)
sage: L.is_supersingular()
False
"""
return False
def _c_bound(self):
r"""
A helper function not designed for direct use.
It returns the maximal `p`-adic valuation of the possible denominators
of the modular symbols.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp._c_bound()
1
sage: Lp = E.padic_lseries(17)
sage: Lp._c_bound()
0
"""
try:
return self.__c_bound
except AttributeError:
pass
E = self._E
p = self._p
if E.galois_representation().is_irreducible(p):
ans = 0
else:
m = E.modular_symbol_space(sign=1)
b = m.boundary_map().codomain()
C = b._known_cusps() # all known, since computed the boundary map
ans = max([valuation(self.modular_symbol(a).denominator(), p)
for a in C])
self.__c_bound = ans
return ans
def _prec_bounds(self, n, prec):
r"""
A helper function not designed for direct use.
It returns the `p`-adic precisions of the approximation
to the `p`-adic L-function.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(5)
sage: Lp._prec_bounds(3,10)
[+Infinity, 1, 1, 1, 1, 0, 0, 0, 0, 0]
sage: Lp._prec_bounds(3,12)
[+Infinity, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
sage: Lp._prec_bounds(4,5)
[+Infinity, 2, 2, 2, 2]
sage: Lp._prec_bounds(15,10)
[+Infinity, 13, 13, 13, 13, 12, 12, 12, 12, 12]
sage: Lp = E.padic_lseries(3)
sage: Lp._prec_bounds(15,10)
[+Infinity, 14, 14, 13, 13, 13, 13, 13, 13, 12]
"""
if self._p == 2:
e = self._e_bounds(n - 2, prec)
else:
e = self._e_bounds(n - 1, prec)
c = self._c_bound()
return [e[j] - c for j in range(len(e))]
class pAdicLseriesSupersingular(pAdicLseries):
def series(self, n=3, quadratic_twist=+1, prec=5, eta=0):
r"""
Return the `n`-th approximation to the `p`-adic L-series as a
power series in `T` (corresponding to `\gamma-1` with
`\gamma=1+p` as a generator of `1+p\ZZ_p`). Each
coefficient is an element of a quadratic extension of the `p`-adic
number whose precision is probably (?) correct.
Here the normalization of the `p`-adic L-series is chosen
such that `L_p(E,1) = (1-1/\alpha)^2 L(E,1)/\Omega_E`
where `\alpha` is a root of the characteristic
polynomial of Frobenius on `T_pE` and `\Omega_E` is the
Néron period of `E`.
INPUT:
- ``n`` - (default: 2) a positive integer
- ``quadratic_twist`` - (default: +1) a fundamental discriminant of a
quadratic field, coprime to the conductor of the curve
- ``prec`` - (default: 5) maximal number of terms of the series to
compute; to compute as many as possible just give a very large
number for ``prec``; the result will still be correct.
- ``eta`` (default: 0) an integer (specifying the power of the
Teichmueller character on the group of roots of unity in
`\ZZ_p^\times`)
OUTPUT:
a power series with coefficients in a quadratic ramified extension of
the `p`-adic numbers generated by a root `alpha` of the characteristic
polynomial of Frobenius on `T_pE`.
ALIAS: power_series is identical to series.
EXAMPLES:
A supersingular example, where we must compute to higher precision to see anything::
sage: e = EllipticCurve('37a')
sage: L = e.padic_lseries(3); L
3-adic L-series of Elliptic Curve defined by y^2 + y = x^3 - x over Rational Field
sage: L.series(2)
O(T^3)
sage: L.series(4) # takes a long time (several seconds)
O(alpha) + (alpha^-2 + O(alpha^0))*T + (alpha^-2 + O(alpha^0))*T^2 + O(T^5)
sage: L.alpha(2).parent()
3-adic Eisenstein Extension Field in alpha defined by x^2 + 3*x + 3
An example where we only compute the leading term (:trac:`15737`)::
sage: E = EllipticCurve("17a1")
sage: L = E.padic_lseries(3)
sage: L.series(4,prec=1)
alpha^-2 + alpha^-1 + 2 + 2*alpha + ... + O(alpha^38) + O(T)
It works also for `p=2`::
sage: E = EllipticCurve("11a1")
sage: lp = E.padic_lseries(2)
sage: lp.series(10)
O(alpha^-3) + (alpha^-4 + O(alpha^-3))*T + (alpha^-4 + O(alpha^-3))*T^2 + (alpha^-5 + alpha^-4 + O(alpha^-3))*T^3 + (alpha^-4 + O(alpha^-3))*T^4 + O(T^5)
"""
n = ZZ(n)
if n < 1:
raise ValueError("n (=%s) must be a positive integer" % n)
if self._p == 2 and n == 1:
raise ValueError("n (=%s) must be at least 2 when p=2" % n)
if prec < 1:
raise ValueError("Insufficient precision (%s)" % prec)
# check if the conditions on quadratic_twist are satisfied
D = ZZ(quadratic_twist)
if D != 1:
if eta != 0: raise NotImplementedError("quadratic twists only implemented for the 0th Teichmueller component")
if D % 4 == 0:
d = D//4
if not d.is_squarefree() or d % 4 == 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field"%D)
else:
if not D.is_squarefree() or D % 4 != 1:
raise ValueError("quadratic_twist (=%s) must be a fundamental discriminant of a quadratic field" % D)
if gcd(D,self._E.conductor()) != 1:
for ell in prime_divisors(D):
if valuation(self._E.conductor(), ell) > valuation(D,ell) :
raise ValueError("can not twist a curve of conductor (=%s) by the quadratic twist (=%s)." % (self._E.conductor(), D))
p = self._p
eta = ZZ(eta) % (p - 1)
#if p == 2 and self._normalize :
#print('Warning : for p = 2 the normalization might not be correct !')
if prec == 1:
if eta == 0:
# trac 15737: if we only ask for the leading term we don't
# need to do any sum as L_p(E,0) = (1-1/alpha)^2 * m(0) (good case)
# set prec arbitrary to 20.
alpha = self.alpha(prec=20)
K = alpha.parent()
R = PowerSeriesRing(K,'T',1)
L = self.modular_symbol(0, sign=+1, quadratic_twist=D)
L *= (1-1/self.alpha())**2
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
L = R(L, 1)
return L
else:
# here we need some sums anyway
bounds = self._prec_bounds(n,prec)
alphaadic_prec = 20
else:
prec = min(p**(n-1), prec)
bounds = self._prec_bounds(n,prec)
alphaadic_prec = max(bounds[1:]) + 5
padic_prec = alphaadic_prec//2+1
verbose("using alpha-adic precision of %s"%padic_prec)
ans = self._get_series_from_cache(n, prec, quadratic_twist,eta)
if not ans is None:
verbose("found series in cache")
return ans
alpha = self.alpha(prec=padic_prec)
K = alpha.parent()
R = PowerSeriesRing(K,'T',prec)
T = R(R.gen(), prec)
L = R(0)
one_plus_T_factor = R(1)
gamma_power = 1
teich = self.teichmuller(padic_prec)
if p == 2:
teich = [0, 1,-1]
gamma = 5
p_power = 2**(n-2)
a_range = 3
else:
teich = self.teichmuller(padic_prec)
gamma = 1+ p
p_power = p**(n-1)
a_range = p
si = 1-2*(eta % 2)
verbose("Now iterating over %s summands"%((p-1)*p_power))
verbose_level = get_verbose()
count_verb = 0
for j in range(p_power):
s = K(0)
if verbose_level >= 2 and j/p_power*100 > count_verb + 3:
verbose("%.2f percent done"%(float(j)/p_power*100))
count_verb += 3
for a in range(1,a_range):
b = teich[a] * gamma_power
s += teich[a]**eta * self.measure(b, n, padic_prec, quadratic_twist=D, sign=si)
L += s * one_plus_T_factor
one_plus_T_factor *= 1+T
gamma_power *= gamma
# Now create series but with each coefficient truncated
# so it is proven correct:
# the coefficients are now treated as alpha-adic numbers (trac 20254)
L = R(L,prec)
aj = L.list()
if len(aj) > 0:
bj = [aj[0].add_bigoh(2*(padic_prec-2))]
j = 1
while j < len(aj):
bj.append( aj[j].add_bigoh(bounds[j]) )
j += 1
L = R(bj, prec)
L /= self._quotient_of_periods_to_twist(D)*self._E.real_components()
self._set_series_in_cache(n, prec, quadratic_twist, eta, L)
return L
power_series = series
def is_ordinary(self):
r"""
Return ``True`` if the elliptic curve that this L-function is attached
to is ordinary.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(19)
sage: L.is_ordinary()
False
"""
return False
def is_supersingular(self):
r"""
Return ``True`` if the elliptic curve that this L function is attached
to is supersingular.
EXAMPLES::
sage: L = EllipticCurve('11a').padic_lseries(19)
sage: L.is_supersingular()
True
"""
return True
def _prec_bounds(self, n, prec):
r"""
A helper function not designed for direct use.
It returns the `\alpha`-adic precisions of the approximation
to the `p`-adic L-function.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Lp = E.padic_lseries(19)
sage: Lp._prec_bounds(3,5)
[+Infinity, -1, -1, -1, -1]
sage: Lp._prec_bounds(2,5)
[+Infinity, -2, -2, -2, -2]
sage: Lp._prec_bounds(10,5)
[+Infinity, 6, 6, 6, 6]
"""
if self._p == 2:
e = self._e_bounds(n - 2, prec)
else:
e = self._e_bounds(n - 1, prec)
c0 = ZZ(n + 2)
return [infinity] + [2 * e[j] - c0 for j in range(1, len(e))]
def _poly(self, a):
"""
Given an element a in Qp[alpha] this returns the list
containing the two coordinates in Qp.
EXAMPLES::
sage: E = EllipticCurve("14a1")
sage: lp = E.padic_lseries(5)
sage: K = lp.alpha().parent()
sage: a = K(5)
sage: a
4*alpha^2 + alpha^4 + O(alpha^42)
sage: lp._poly(a)
[5 + O(5^21), O(5^21)]
"""
# this should be implemented in elements of Eisenstein rings at some point trac 20248
if a.is_zero():
return [0,0]
v, k = a._ntl_rep_abs()
K = a.base_ring()
pi = K.uniformiser()
v0 = K(v[0]._sage_()) * pi**k
v1 = K(v[1]._sage_()) * pi**k
alpha = a.parent().gen()
assert v0 + v1*alpha == a
return [ v0, v1 ]
def Dp_valued_series(self, n=3, quadratic_twist=+1, prec=5):
r"""
Return a vector of two components which are p-adic power series.
The answer v is such that
`(1-\varphi)^{-2}\cdot L_p(E,T) =` ``v[1]`` `\cdot \omega +` ``v[2]`` `\cdot \varphi(\omega)`
as an element of the Dieudonné module `D_p(E) = H^1_{dR}(E/\QQ_p)` where
`\omega` is the invariant differential and `\varphi` is the Frobenius on `D_p(E)`.
According to the `p`-adic Birch and Swinnerton-Dyer
conjecture [BP]_ this function has a zero of order
rank of `E(\QQ)` and it's leading term is contains the order of
the Tate-Shafarevich group, the Tamagawa numbers, the order of the
torsion subgroup and the `D_p`-valued `p`-adic regulator.
INPUT:
- ``n`` -- (default: 3) a positive integer
- ``prec`` -- (default: 5) a positive integer
EXAMPLES::
sage: E = EllipticCurve('14a')
sage: L = E.padic_lseries(5)
sage: L.Dp_valued_series(4) # long time (9s on sage.math, 2011)
(1 + 4*5 + O(5^2) + (4 + O(5))*T + (1 + O(5))*T^2 + (4 + O(5))*T^3 + (2 + O(5))*T^4 + O(T^5), 5^2 + O(5^3) + O(5^2)*T + (4*5 + O(5^2))*T^2 + (2*5 + O(5^2))*T^3 + (2 + 2*5 + O(5^2))*T^4 + O(T^5))
"""
E = self._E
p = self._p
lps = self.series(n, quadratic_twist=quadratic_twist, prec=prec)
# now split up the series in two lps = G + H * alpha
R = lps.base_ring().base_ring() # Qp
QpT , T = PowerSeriesRing(R, 'T', prec).objgen()
Gli = []
Hli = []
for n in range(lps.prec()):
v = self._poly(lps[n])
Gli.append(v[0])
Hli.append(v[1])
G = QpT(Gli, prec)
H = QpT(Hli, prec)
# now compute phi
phi = matrix.matrix([[0,-1/p],[1,E.ap(p)/p]])
lpv = vector([G + (E.ap(p))*H , - R(p) * H ]) # this is L_p
eps = (1-phi)**(-2)
resu = lpv*eps.transpose()
return resu
@rename_keyword(deprecation=6094, method="algorithm")
def frobenius(self, prec=20, algorithm = "mw"):
r"""
Return a geometric Frobenius `\varphi` on the Dieudonné module `D_p(E)`
with respect to the basis `\omega`, the invariant differential, and `\eta=x\omega`.
It satisfies `\varphi^2 - a_p/p\, \varphi + 1/p = 0`.
INPUT:
- ``prec`` - (default: 20) a positive integer
- ``algorithm`` - either 'mw' (default) for Monsky-Washnitzer
or 'approx' for the algorithm described by Bernardi and Perrin-Riou
(much slower and not fully tested)
EXAMPLES::
sage: E = EllipticCurve('14a')
sage: L = E.padic_lseries(5)
sage: phi = L.frobenius(5)
sage: phi
[ 2 + 5^2 + 5^4 + O(5^5) 3*5^-1 + 3 + 5 + 4*5^2 + 5^3 + O(5^4)]
[ 3 + 3*5^2 + 4*5^3 + 3*5^4 + O(5^5) 3 + 4*5 + 3*5^2 + 4*5^3 + 3*5^4 + O(5^5)]
sage: -phi^2
[5^-1 + O(5^4) O(5^4)]
[ O(5^5) 5^-1 + O(5^4)]
"""
E = self._E
p = self._p
if algorithm != "mw" and algorithm !="approx":
raise ValueError("Unknown algorithm %s."%algorithm)
if algorithm == "approx":
return self.__phi_bpr(prec=prec)
if p < 4 and algorithm == "mw":
print("Warning: If this fails try again using algorithm=\"approx\"")
Ew = E.integral_short_weierstrass_model()
adjusted_prec = sage.schemes.hyperelliptic_curves.monsky_washnitzer.adjusted_prec(p, prec)
modprecring = Integers(p**adjusted_prec)
output_ring = Qp(p, prec)
R, x = PolynomialRing(modprecring, 'x').objgen()
Q = x**3 + modprecring(Ew.a4()) * x + modprecring(Ew.a6())
trace = Ew.ap(p)
fr = sage.schemes.hyperelliptic_curves.monsky_washnitzer.matrix_of_frobenius(Q, p, adjusted_prec, trace)
fr = matrix.matrix(output_ring,2,2,fr)
# return a vector for PARI's ellchangecurve to pass from e1 to e2
def isom(e1,e2):
if not e1.is_isomorphic(e2):
raise ValueError("Curves must be isomorphic.")
usq = (e1.discriminant()/e2.discriminant()).nth_root(6)
u = usq.sqrt()
s = (u * e2.a1() - e1.a1() )/ZZ(2)
r = (usq * e2.a2() - e1.a2() + s**2 + e1.a1()*s)/ZZ(3)
t = (u**3 * e2.a3() - e1.a3() - e1.a1()*r)/ZZ(2)
return [u,r,s,t]
v = isom(E,Ew)
u = v[0]
r = v[1]
# change basis
A = matrix.matrix([[u,-r/u],[0,1/u]])
frn = A * fr * A**(-1)
return 1/p*frn
def __phi_bpr(self, prec=0):
r"""
This returns a geometric Frobenius `\varphi` on the Dieudonné module `D_p(E)`
with respect to the basis `\omega`, the invariant differential, and `\eta=x\omega`.
It satisfies `\varphi^2 - a_p/p\, \varphi + 1/p = 0`.
The algorithm used here is described in bernardi-perrin-riou on page 232.
.. WARNING::
This function has not been sufficiently tested. It is very slow.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: lp = E.padic_lseries(19)
sage: lp.frobenius(prec=1,algorithm="approx") #indirect doctest
[ O(19^0) 4*19^-1 + O(19^0)]
[ 14 + O(19) O(19^0)]
sage: E = EllipticCurve('17a1')
sage: lp = E.padic_lseries(3)
sage: lp.frobenius(prec=3,algorithm="approx")
[ O(3) 2*3^-1 + 2 + O(3)]
[ 1 + O(3^2) O(3)]
sage: lp.frobenius(prec=5,algorithm="approx")
[ 3 + O(3^2) 2*3^-1 + 2 + 3 + O(3^2)]
[ 1 + 2*3^2 + O(3^3) 2*3 + O(3^2)]
"""
E = self._E
p = self._p
if prec > 10:
print("Warning: Very large value for the precision.")
if prec == 0:
prec = floor((log(10000)/log(p)))
verbose("prec set to %s"%prec)
eh = E.formal()
om = eh.differential(prec = p**prec+3)
verbose("differential computed")
xt = eh.x(prec=p**prec + 3)
et = xt*om
# c_(p^k) = cs[k] d...
cs = [om[p**k-1] for k in range(prec + 1)]
ds = [et[p**k-1] for k in range(prec + 1)]
delta = 0
dpr = 0
gamma = 0
dga = 0
for k in range(1,prec+1):
# this is the equation eq[0]*x+eq[1]*y+eq[2] == 0
# such that delta_ = delta + d^dpr*x ...
eq = [(p**dpr*cs[k]) % p**k,(-p**dga*ds[k]) % p**k , (delta*cs[k]-gamma*ds[k]-cs[k-1]) % p**k ]
verbose("valuations : %s"%([x.valuation(p) for x in eq]))
v = min([x.valuation(p) for x in eq])
if v == infinity:
verbose("no new information at step k=%s"%k)
else:
eq = [ZZ(x/p**v) for x in eq]
verbose("renormalised eq mod p^%s is now %s"%(k-v,eq))
if eq[0].valuation(p) == 0:
l = min(eq[1].valuation(p),k-v)
if l == 0:
verbose("not uniquely determined at step k=%s"%k)
else:
ainv = eq[0].inverse_mod(p**l)
delta = delta - eq[2]*ainv*p**dpr
dpr = dpr + l
delta = delta % p**dpr
verbose("delta_prec increased to %s\n delta is now %s"%(dpr,delta))
elif eq[1].valuation(p) == 0:
l = min(eq[0].valuation(p),k-v)
ainv = eq[1].inverse_mod(p**l)
gamma = gamma - eq[2]*ainv*p**dga
dga = dga + l
gamma = gamma % p**dga
verbose("gamma_prec increased to %s\n gamma is now %s"%(dga,gamma))
else:
raise RuntimeError("Bug: no delta or gamma can exist")
# end of approximation of delta and gamma
R = Qp(p,max(dpr,dga)+1)
delta = R(delta,absprec=dpr)
gamma = R(gamma,absprec=dga)
verbose("result delta = %s\n gamma = %s\n check : %s"%(delta,gamma, [Qp(p,k)(delta * cs[k] - gamma * ds[k] - cs[k-1]) for k in range(1,prec+1)] ))
a = delta
c = -gamma
d = E.ap(p) - a
b = (-1/p+a*d)/c
phi = matrix.matrix([[a,b],[c,d]])
return phi
def bernardi_sigma_function(self, prec=20):
r"""
Return the `p`-adic sigma function of Bernardi in terms of `z = log(t)`.
This is the same as ``padic_sigma`` with ``E2 = 0``.
EXAMPLES::
sage: E = EllipticCurve('14a')
sage: L = E.padic_lseries(5)
sage: L.bernardi_sigma_function(prec=5) # Todo: some sort of consistency check!?
z + 1/24*z^3 + 29/384*z^5 - 8399/322560*z^7 - 291743/92897280*z^9 + O(z^10)
"""
E = self._E
Eh = E.formal()
lo = Eh.log(prec + 5)
F = lo.reverse()
S = LaurentSeriesRing(QQ,'z')
z = S.gen()
F = F(z)
xofF = Eh.x(prec + 2)(F)
#r = ( E.a1()**2 + 4*E.a2() ) / ZZ(12)
g = (1/z**2 - xofF ).power_series()
h = g.integral().integral()
sigma_of_z = z.power_series() * h.exp()
return sigma_of_z
def Dp_valued_height(self,prec=20):
r"""
Return the canonical `p`-adic height with values in the Dieudonné module `D_p(E)`.
It is defined to be
`h_{\eta} \cdot \omega - h_{\omega} \cdot \eta`
where `h_{\eta}` is made out of the sigma function of Bernardi and
`h_{\omega}` is `log_E^2`.
The answer ``v`` is given as ``v[1]*omega + v[2]*eta``.
The coordinates of ``v`` are dependent of the
Weierstrass equation.
EXAMPLES::
sage: E = EllipticCurve('53a')
sage: L = E.padic_lseries(5)
sage: h = L.Dp_valued_height(7)
sage: h(E.gens()[0])
(3*5 + 5^2 + 2*5^3 + 3*5^4 + 4*5^5 + 5^6 + 5^7 + O(5^8), 5^2 + 4*5^4 + 2*5^7 + 3*5^8 + O(5^9))
"""
E = self._E
p = self._p
Ehat = E.formal()
elog = Ehat.log(prec + Integer(3))
# we will have to do it properly with David Harvey's _multiply_point()
n = arith.LCM(E.tamagawa_numbers())
n = arith.LCM(n, E.Np(p)) # allowed here because E has good reduction at p
def height(P,check=True):
if P.is_finite_order():
return Qp(p,prec)(0)
if check:
assert P.curve() == E, 'the point P must lie on the curve from which the height function was created'
Q = n * P
tt = - Q[0]/Q[1]
R = Qp(p,prec+5)
tt = R(tt)
zz = elog(tt)
homega = -zz**2/n**2
eQ = denominator(Q[1])/denominator(Q[0])
si = self.bernardi_sigma_function(prec=prec+4)
heta = 2 * log(si(zz)/eQ) / n**2
R = Qp(p,prec)
return vector([-R(heta),R(homega)])
return height
def Dp_valued_regulator(self, prec=20, v1=0, v2=0):
r"""
Return the canonical `p`-adic regulator with values in the Dieudonné module `D_p(E)`
as defined by Perrin-Riou using the `p`-adic height with values in `D_p(E)`.
The result is written in the basis `\omega`, `\varphi(\omega)`, and hence the
coordinates of the result are independent of the chosen Weierstrass equation.
.. NOTE::
The definition here is corrected with respect to
Perrin-Riou's article [PR]_. See [SW]_.
REFERENCES:
.. [PR] Perrin-Riou, *Arithmétique des courbes elliptiques à
réduction supersingulière en `p`*,
Experiment. Math. 12 (2003), no. 2, 155-186.
EXAMPLES::
sage: E = EllipticCurve('43a')
sage: L = E.padic_lseries(7)
sage: L.Dp_valued_regulator(7)
(5*7 + 6*7^2 + 4*7^3 + 4*7^4 + 7^5 + 4*7^7 + O(7^8), 4*7^2 + 2*7^3 + 3*7^4 + 7^5 + 6*7^6 + 4*7^7 + O(7^8))
"""
p = self._p
E = self._E
h = self.Dp_valued_height(prec=prec)
# this is the height_{v} (P) for a v in D_p
def hv(vec,P):
hP = h(P)
return - vec[0]*hP[1] +vec[1]*hP[0]
# def hvpairing(vec,P,Q):
# return (hv(vec, P+Q) - hv(vec,P)-hv(vec,Q))/2
K = Qp(p, prec)
if v1 == 0 and v2 == 0:
v1 = vector([K(0), K(1)]) # that is eta
v2 = vector([K(-1), K(1)]) # and this is eta-omega.
# the rest should not depend on this choice
# as long as it is outside Q_p * omega
rk = E.rank()
if rk == 0:
return vector([K(1), K(0)])
basis = E.gens()
def regv(vec):
M = matrix.matrix(K, rk, rk, 0)
point_height = [hv(vec, P) for P in basis]
for i in range(rk):
for j in range(i+1, rk):
M[i, j] = M[j, i] = (hv(vec,basis[i] + basis[j])- point_height[i] - point_height[j] )/2
for i in range(rk):
M[i, i] = point_height[i]
return M.determinant()
def Dp_pairing(vec1,vec2):
return (vec1[0]*vec2[1]-vec1[1]*vec2[0])
omega_vec = vector([K(1),K(0)])
# note the correction here with respect to Perrin-Riou's definition.
# only this way the result will be independent of the choice of v1 and v2.
reg1 = regv(v1) / Dp_pairing(omega_vec, v1)**(rk - 1)
reg2 = regv(v2) / Dp_pairing(omega_vec, v2)**(rk - 1)
# the regulator in the basis omega,eta
reg_oe = (reg1 * v2 - reg2 * v1 ) / Dp_pairing(v2, v1)
if p < 5:
phi = self.frobenius(min(6, prec), algorithm="approx")
else:
phi = self.frobenius(prec + 2, algorithm="mw")
c = phi[1, 0] # this is the 'period' [omega,phi(omega)]
a = phi[0, 0]
return vector([reg_oe[0] - a/c*reg_oe[1],reg_oe[1]/c])
|
#!/usr/bin/python
import monkDebug as debug
import sys
import monkTools
import re
##
## @brief Transcode:
## [http://votre_site.con] => http://votre_site.con
## [http://votre_site.con | text displayed] => text displayed
## [http://votre_site.con text displayed] => text displayed.
##
## @param[in] value String to transform.
## @return Transformed string.
##
def transcode(value):
# named link : [[http://plop.html | link name]]
value = re.sub(r'\[\[http://(.*?) \| (.*?)\]\]',
r'<a href="http://\1">\2</a>',
value)
# direct link : [[http://plop.html]]
value = re.sub(r'\[\[http://(.*?)\]\]',
r'<a href="http://\1">http://\1</a>',
value)
# direct lib link : [lib[libname]]
value = re.sub(r'\[lib\[(.*?) \| (.*?)\]\]',
r'<a href="../\1">\2</a>',
value)
value = re.sub(r'\[doc\[(.*?) \| (.*?)\]\]',
r'<a href="\1.html">\2</a>',
value)
value = re.sub(r'\[tutorial\[(.*?) \| (.*?)\]\]',
r'<a href="tutorial_\1.html">\2</a>',
value)
value = re.sub(r'\[(lib|class|methode)\[(.*?)\]\]',
replace_link_class,
value)
"""
p = re.compile('\[\[(.*?)(|(.*?))\]\])',
flags=re.DOTALL)
value = p.sub(replace_link,
value)
"""
return value
"""
def replace_link(match):
if match.group() == "":
return ""
#debug.verbose("plop: " + str(match.group()))
value = "<ul>"
value += re.sub(r':INDENT:',
r'',
match.group())
value += "</ul>"
return transcode(value)
"""
def replace_link_class(match):
if match.group() == "":
return ""
#debug.info("plop: " + str(match.group()))
if match.groups()[0] == 'class':
className = match.groups()[1]
value = re.sub(':', '_', className)
return '<a href="class_' + value + '.html">' + className + '</a>'
elif match.groups()[0] == 'lib':
return match.groups()[1]
elif match.groups()[0] == 'methode':
return match.groups()[1]
else:
return match.groups()[1]
|
#!/usr/bin/python
import numpy as np
from scipy import signal
class RandomMapGenerator():
def __init__(self,size=(5,6)):
self.initialize_thresh = 0.5
self.size = size
def initialize(self):
self.map = np.random.random(self.size)
super_thresh_indices = self.map >= self.initialize_thresh
sub_thresh_indices = self.map < self.initialize_thresh
self.map[super_thresh_indices] = 1
self.map[sub_thresh_indices] = -1
def apply_rules(self):
# pad the map with -1's around the outside (i.e. a row of non-road around the outside)
map_padded = np.lib.pad(self.map,((1,1),(1,1)),'constant',constant_values=((-1,-1),(-1,-1)))
# TODO read from yaml and put the masks into a loop
mask = np.array([[1,1],[1,1]])
if not self.check_mask(mask,map_padded):
return False
mask = np.array([[0,-1,0],[-1,1,-1]])
if not self.check_mask(mask,map_padded):
return False
mask = np.array([[-1,0],[1,-1],[-1,0]])
if not self.check_mask(mask,map_padded):
return False
mask = np.array([[-1,1,-1],[0,-1,0]])
if not self.check_mask(mask,map_padded):
return False
mask = np.array([[0,-1],[-1,1],[0,-1]])
if not self.check_mask(mask,map_padded):
return False
return True
def check_mask(self,mask,map_padded):
# the maximum correlation value will equal the number of non-zero vals
check_val = np.count_nonzero(mask)
masked = signal.correlate(map_padded,mask)
if (np.amax(masked) == check_val):
return False
return True
def generate(self):
while True:
self.initialize()
if self.apply_rules():
break
return self.map;
def main():
size=(4,4)
rmg = RandomMapGenerator(size)
map = rmg.generate()
print map
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui Core.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import re
import sys
import glob
from karesansui.lib.dict_op import DictOp
from karesansui.lib.parser.base.xml_like_conf_parser import xmlLikeConfParser as Parser
from karesansui.lib.utils import preprint_r, r_chgrp, r_chmod
from karesansui.lib.const import VENDOR_SYSCONF_DIR, \
COLLECTD_DATA_DIR, KARESANSUI_GROUP
"""
Define Variables for This Parser
"""
PARSER_COLLECTD_PLUGIN_DIR = "%s/collectd.d" % VENDOR_SYSCONF_DIR
class collectdpluginParser:
_module = "collectdplugin"
def __init__(self):
self.dop = DictOp()
self.dop.addconf(self._module,{})
self.parser = Parser()
self.parser.set_delim("[ \t]+")
self.parser.set_new_delim("\t")
self.parser.set_comment("#")
self.base_parser_name = self.parser.__class__.__name__
from karesansui.lib.parser.collectd import collectdParser
collectdp = collectdParser()
self.parser.set_opt_uni(collectdp.opt_uni)
self.parser.set_opt_multi(collectdp.opt_multi)
self.parser.set_opt_sect(collectdp.opt_sect)
pass
def set_footer(self, footer=""):
self.parser.set_footer(footer)
def source_file(self):
retval = []
glob_str = "%s/*.conf" % (PARSER_COLLECTD_PLUGIN_DIR,)
for _afile in glob.glob(glob_str):
retval.append(_afile)
return retval
def read_conf(self,extra_args=None):
retval = {}
for _afile in self.source_file():
plugin_name = re.sub("\.conf$","",os.path.basename(_afile))
try:
extra_args['include']
if not re.search(extra_args['include'],plugin_name):
continue
except:
pass
self.parser.set_source_file([_afile])
conf_arr = self.parser.read_conf()
try:
self.dop.set(self._module,[plugin_name],conf_arr[_afile]['value'])
except:
pass
self.dop.set(self._module,['@BASE_PARSER'],self.base_parser_name)
#self.dop.preprint_r(self._module)
return self.dop.getconf(self._module)
def _pre_write_conf(self,conf_arr={}):
# Change permission to be able to read/write data kss group.
if os.path.exists(COLLECTD_DATA_DIR):
if os.getuid() == 0:
r_chgrp(COLLECTD_DATA_DIR,KARESANSUI_GROUP)
r_chmod(COLLECTD_DATA_DIR,"g+rwx")
r_chmod(COLLECTD_DATA_DIR,"o-rwx")
dop = DictOp()
dop.addconf("__",conf_arr)
if dop.isset("__",["python"]) is True:
dop.cdp_unset("__",["python","Plugin","python","@ORDERS"],multiple_file=True)
orders = []
orders.append(['Encoding'])
orders.append(['LogTraces'])
orders.append(['Interactive'])
orders.append(['ModulePath'])
orders.append(['Import'])
orders.append(['Module'])
dop.cdp_set("__",["python","Plugin","python","@ORDERS"],orders,is_opt_multi=True,multiple_file=True)
return dop.getconf("__")
def write_conf(self,conf_arr={},extra_args=None,dryrun=False):
retval = True
conf_arr = self._pre_write_conf(conf_arr)
for plugin_name,_v in conf_arr.items():
_afile = "%s/%s.conf" % (PARSER_COLLECTD_PLUGIN_DIR,plugin_name,)
try:
_v['action']
if _v['action'] == "delete":
if os.path.exists(_afile):
os.unlink(_afile)
continue
except:
pass
#continue
try:
_v['value']
self.dop.addconf("parser",{})
self.dop.set("parser",[_afile],_v['value'])
#self.dop.preprint_r("parser")
arr = self.dop.getconf("parser")
self.parser.write_conf(arr,dryrun=dryrun)
except:
pass
return retval
"""
"""
if __name__ == '__main__':
"""Testing
"""
parser = collectdpluginParser()
# 読み込み
dop = DictOp()
dop.addconf("dum",parser.read_conf())
new_plugin_name = "takuma"
##########################################################
# Uniオプション (一箇所しか設定できないオプション) の追加
##########################################################
# 'Foo foo' を追加(設定値リスト形式モードよる addメソッド)
dop.add("dum",[new_plugin_name,"Foo"],["foo",[["comment foo1","comment foo2"],"comment foo3"]])
# 'Bar bar' を追加(設定値文字列形式モードによる cdp_setメソッド)
dop.cdp_set("dum",[new_plugin_name,"Bar"],"bar",multiple_file=True)
dop.cdp_set_pre_comment("dum",[new_plugin_name,"Bar"],["","comment bar1","comment bar2"],multiple_file=True)
dop.cdp_set_post_comment("dum",[new_plugin_name,"Bar"],"comment bar3",multiple_file=True)
##########################################################
# Multiオプション (複数設定できるオプション) の追加
##########################################################
# 'LoadPlugin target_hoge' を追加
dop.cdp_set("dum",[new_plugin_name,"LoadPlugin","target_hoge"],"target_hoge",multiple_file=True,is_opt_multi=True)
dop.cdp_set_pre_comment("dum",[new_plugin_name,"LoadPlugin","target_hoge"],["","Dis is target_hoge"],multiple_file=True)
##########################################################
# Sectオプション (<ブラケット>ディレクティブオプション) の追加
##########################################################
# 下記 を追加
# <Plugin "foobar">
# <View "hoge">
# SubOpt1 gege # post
# </View>
# Option2 false
# Option1 true
# </Plugin>
dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","Option1"],"true",multiple_file=True)
dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","Option2"],"false",multiple_file=True)
dop.cdp_set_pre_comment("dum",[new_plugin_name,"Plugin","foobar","Option2"],"pre comment",multiple_file=True)
dop.cdp_set_post_comment("dum",[new_plugin_name,"Plugin","foobar","Option2"],"post comment",multiple_file=True)
dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","View","hoge","SubOpt1"],"gege",multiple_file=True)
dop.cdp_set_post_comment("dum",[new_plugin_name,"Plugin","foobar","View","hoge","SubOpt1"],"post",multiple_file=True)
print(dop.get("dum",["filter","@ORDERS"],multiple_file=True))
# 複数ファイルを読み込むパーサーの場合は、is_parent_parser=Trueにすること
# '<Plugin foobar>' を 先頭にする
key = [new_plugin_name,"Plugin","foobar"]
dop.insert_order("dum",key,0,is_parent_parser=True)
# 'LoadPlugin target_hoge' を 先頭にする => '<Plugin foobar>' は2番目になる
key = [new_plugin_name,"LoadPlugin","target_hoge"]
dop.insert_order("dum",key,0,is_parent_parser=True)
# 'Foo foo' を 先頭にする => 'LoadPlugin target_hoge' は2番目になる
key = [new_plugin_name,"Foo"]
dop.insert_order("dum",key,0,is_parent_parser=True)
# work completely
#dop.cdp_comment("dum",["python","Plugin","python","Import"],multiple_file=True)
#dop.cdp_comment("dum",["python","Plugin","python","Module","notification"],multiple_file=True)
#dop.cdp_comment("dum",["python","Plugin","python","Module","notification","CountupDBPath"],multiple_file=True)
#dop.cdp_set("dum",["python","Plugin","python","Module","notification","@ORDERS"],[['Environ'],['CountupDBPath']],multiple_file=True,is_opt_multi=True)
# work completely, too.
#dop.cdp_comment("dum",["python","Plugin","python","ModulePath"],multiple_file=True)
# work completely, too. (but this is overwritten by _pre_write_conf() method)
#dop.cdp_set("dum",["python","Plugin","python","@ORDERS"],[['ModulePath'],['Encoding']],multiple_file=True,is_opt_multi=True)
#sys.exit()
# 配列確認
conf = dop.getconf("dum")
preprint_r(conf)
parser.write_conf(conf,dryrun=True)
|
# ---------------------------------------------------------------------
# Interface Classification Rules models
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# Third-party modules
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import StringField, IntField, ListField, EmbeddedDocumentField, BooleanField
# NOC modules
from noc.core.mongo.fields import ForeignKeyField, PlainReferenceField
from noc.core.ip import IP
from noc.main.models.prefixtable import PrefixTable, PrefixTablePrefix
from noc.sa.models.managedobjectselector import ManagedObjectSelector
from noc.vc.models.vcfilter import VCFilter
from noc.core.comp import smart_text
from .interfaceprofile import InterfaceProfile
class InterfaceClassificationMatch(EmbeddedDocument):
# Field name
field = StringField(
choices=[
("name", "name"),
("description", "description"),
("ip", "ip"),
("tagged", "tagged vlan"),
("untagged", "untagged vlan"),
("hints", "hints"),
]
)
# Operation
op = StringField(choices=[("eq", "Equals"), ("regexp", "RegExp"), ("in", "in")])
#
value = StringField()
# "ip in"
prefix_table = ForeignKeyField(PrefixTable, required=False)
# *vlan in
vc_filter = ForeignKeyField(VCFilter, required=False)
description = StringField(required=False)
def __str__(self):
if self.prefix_table:
v = self.prefix_table.name
elif self.vc_filter:
v = self.vc_filter.name
else:
v = self.value
return "%s %s %s" % (self.field, self.op, v)
@property
def get_confdb_query(self):
query = ['Match("interfaces", ifname)']
if self.field == "name" and self.op == "eq":
query += ['Filter(ifname == "%s")' % self.value]
elif self.field == "name" and self.op == "regexp":
query += ['Re("%s", ifname, ignore_case=True)' % self.value]
if self.field == "description":
query += ['Match("interfaces", ifname, "description", ifdescr)']
if self.op == "eq":
query += ['Filter(ifdescr == "%s")' % self.value]
elif self.op == "regexp":
query += ['Re("%s", ifdescr, ignore_case=True)' % self.value]
if self.field == "hints" and self.op == "eq":
query += ['Match("interfaces", ifname, "hints", "%s")' % self.value]
if self.field == "ip" and self.op == "eq":
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "inet", "address", "%s")' % self.value
]
elif self.field == "ip" and self.op == "in" and self.prefix_table:
prefix_match = "( %s )" % " or ".join(
" MatchPrefix('%s', address)" % ptp.prefix
for ptp in PrefixTablePrefix.objects.filter(table=self.prefix_table)
)
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "inet", "address", address)'
" and %s and Del(vr, fi, address)" % prefix_match
]
if self.field == "untagged" and self.op == "eq" and self.value:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "untagged", %s)' % self.value
]
elif self.field == "untagged" and self.op == "in" and self.vc_filter:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "untagged", untagged)'
' and HasVLAN("%s", untagged) and Del(vr, fi, untagged)' % self.vc_filter.expression
]
if self.field == "tagged" and self.op == "eq" and (self.value or self.vc_filter):
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "tagged", tagged)'
' and MatchExactVLAN("%s", tagged) and Del(vr, fi, tagged)'
% (self.value or self.vc_filter.expression)
]
elif self.field == "tagged" and self.op == "in" and self.vc_filter:
query += [
'Match("virtual-router", vr, "forwarding-instance", fi, "interfaces",'
' ifname, "unit", ifname, "bridge", "switchport", "tagged", tagged)'
' and MatchAnyVLAN("%s", tagged) and Del(vr, fi, tagged)'
% self.vc_filter.expression
]
return " and ".join(query)
def compile(self, f_name):
a = getattr(self, "compile_%s_%s" % (self.field, self.op), None)
if a:
return a(f_name)
else:
raise SyntaxError("%s %s is not implemented" % (self.field, self.op))
# name
def compile_name_eq(self, f_name):
return "\n".join(
[
"def %s(iface):" % f_name,
" return iface.name.lower() == %s" % repr(self.value.lower()),
]
)
def compile_name_regexp(self, f_name):
return "\n".join(
[
"rx_%s = re.compile(%s, re.IGNORECASE)" % (f_name, repr(self.value)),
"def %s(iface):" % f_name,
" return bool(rx_%s.search(iface.name))" % f_name,
]
)
# description
def compile_description_eq(self, f_name):
return "\n".join(
[
"def %s(iface):" % f_name,
" return iface.description.lower() == %s" % repr(self.value.lower()),
]
)
def compile_description_regexp(self, f_name):
return "\n".join(
[
"rx_%s = re.compile(%s, re.IGNORECASE)" % (f_name, repr(self.value)),
"def %s(iface):" % f_name,
" return iface.description and bool(rx_%s.search(iface.description))" % f_name,
]
)
# IP
def compile_ip_eq(self, f_name):
v = IP.prefix(self.value)
r = [
"def %s(iface):" % f_name,
" a = [si.ipv%(afi)s_addresses for si in iface.subinterface_set.filter(enabled_afi='IPv%(afi)s')]"
% {"afi": v.afi},
" a = sum(a, [])",
]
if "/" in self.value:
# Compare prefixes
r += [" return any(x for x in a if x == %r)" % v.prefix]
else:
# Compare addresses
v = v.prefix.split("/")[0]
r += [" return any(x for x in a if x.split('/')[0] == %r)" % v]
return "\n".join(r)
def compile_ip_in(self, f_name):
r = [
"pt_%s = PrefixTable.objects.get(id=%s)" % (f_name, self.prefix_table.id),
"def %s(iface):" % f_name,
" for si in iface.subinterface_set.filter(enabled_afi='IPv4'):",
" for a in si.ipv4_addresses:",
" if a in pt_%s:" % f_name,
" return True",
" for si in iface.subinterface_set.filter(enabled_afi='IPv6'):",
" for a in si.ipv6_addresses:",
" if a in pt_%s:" % f_name,
" return True",
" return False",
]
return "\n".join(r)
# Untagged
def compile_untagged_eq(self, f_name):
vlan = int(self.value)
if vlan < 1 or vlan > 4096:
raise SyntaxError("Invalid VLAN")
r = [
"def %s(iface):" % f_name,
" return bool(iface.parent.subinterface_set.filter(enabled_afi='BRIDGE', untagged_vlan=%d).count())"
% vlan,
]
return "\n".join(r)
def compile_untagged_in(self, f_name):
r = [
"vcf_%s = VCFilter.get_by_id(id=%s)" % (f_name, self.vc_filter.id),
"if not vcf_%s:" % f_name,
" raise ValueError('Invalid VC Filter: %s')" % self.vc_filter.name,
"def %s(iface):" % f_name,
" for si in iface.parent.subinterface_set.filter(enabled_afi='BRIDGE'):",
" if si.untagged_vlan and vcf_%s.check(si.untagged_vlan):" % f_name,
" return True",
" return False",
]
return "\n".join(r)
# Tagged
def compile_tagged_eq(self, f_name):
vlan = int(self.value)
if vlan < 1 or vlan > 4096:
raise SyntaxError("Invalid VLAN")
r = [
"def %s(iface):" % f_name,
" return bool(iface.parent.subinterface_set.filter(enabled_afi='BRIDGE', tagged_vlans=%d).count())"
% vlan,
]
return "\n".join(r)
def compile_tagged_in(self, f_name):
r = [
"vcf_%s = VCFilter.get_by_id(id=%s)" % (f_name, self.vc_filter.id),
"if not vcf_%s:" % f_name,
" raise ValueError('Invalid VC Filter: %s')" % self.vc_filter.name,
"def %s(iface):" % f_name,
" for si in iface.parent.subinterface_set.filter(enabled_afi='BRIDGE'):",
" if si.tagged_vlans:",
" if any(vlan for vlan in si.tagged_vlans if vcf_%s.check(vlan)):" % f_name,
" return True",
" return False",
]
return "\n".join(r)
def compile_hints_eq(self, f_name):
r = ["def %s(iface):" % f_name, " return iface.hints and %r in iface.hints" % self.value]
return "\n".join(r)
class InterfaceClassificationRule(Document):
meta = {
"collection": "noc.inv.interfaceclassificationrules",
"strict": False,
"auto_create_index": False,
}
name = StringField(required=False)
is_active = BooleanField(default=True)
description = StringField(required=False)
order = IntField()
selector = ForeignKeyField(ManagedObjectSelector, required=False)
match = ListField(EmbeddedDocumentField(InterfaceClassificationMatch), required=False)
profile = PlainReferenceField(InterfaceProfile, default=InterfaceProfile.get_default_profile)
def __str__(self):
r = [smart_text(x) for x in self.match]
return "%s -> %s" % (", ".join(r), self.profile.name)
@property
def match_expr(self):
"""
Stringified match expression
"""
if not len(self.match):
return "any"
elif len(self.match) == 1:
return smart_text(self.match[0])
else:
return " AND ".join("(%s)" % smart_text(m) for m in self.match)
@property
def get_confdb_query(self):
if not len(self.match):
return 'Match("interfaces", ifname, "type", "physical")'
elif len(self.match) == 1:
return self.match[0].get_confdb_query
else:
return " and ".join("(%s)" % m.get_confdb_query for m in self.match)
@classmethod
def get_classificator_code(cls):
r = ["import re", "import bson", "from noc.sa.models.selectorcache import SelectorCache"]
mf = [
"gsc = {}",
"def classify(interface):",
" def in_selector(o, s):",
" if s in s_cache:",
" return s_cache[s]",
" if s in gsc:",
" selector = gsc[s]",
" else:",
" selector = ManagedObjectSelector.get_by_id(s)",
" gsc[s] = selector",
" r = SelectorCache.is_in_selector(o, selector)",
" # r = o in selector",
" s_cache[s] = r",
" return r",
" s_cache = {}",
" mo = interface.managed_object",
]
for rule in cls.objects.filter(is_active=True).order_by("order"):
rid = str(rule.id)
lmn = []
for i, m in enumerate(rule.match):
mn = "match_%s_%d" % (rid, i)
r += [m.compile(mn)]
lmn += ["%s(interface)" % mn]
if lmn:
mf += [
" if in_selector(mo, %d) and %s:" % (rule.selector.id, " and ".join(lmn)),
" return bson.ObjectId('%s')" % rule.profile.id,
]
else:
mf += [
" if in_selector(mo, %d):" % rule.selector.id,
" return bson.ObjectId('%s')" % rule.profile.id,
]
r += mf
return "\n".join(r)
@classmethod
def get_classificator(cls):
code = cls.get_classificator_code() + "\nhandlers[0] = classify\n"
# Hack to retrieve reference
handlers = {}
# Compile code
exec(
code,
{
"re": re,
"PrefixTable": PrefixTable,
"VCFilter": VCFilter,
"ManagedObjectSelector": ManagedObjectSelector,
"handlers": handlers,
},
)
return handlers[0]
|
import time
import hashlib
import torch
from torch_geometric.data import DataLoader
from cgl.utils.params import ParamDict
from cgl.data.graph_data import CircuitInMemDataset, CircuitGraphDataset
# from cgl.models.gnn import DeepGENNet
s = time.time()
print('Loading the dataset ...')
root = '/store/nosnap/results/ngspice_biased_pmos_gain/two_stage_biased_pmos'
cir_dset = CircuitGraphDataset(root=root, mode='train', circuit_type='opamp_biased_pmos')
node_output_idx = next(iter(cir_dset.graph_nodes.values()))['V_net6']
vout_idx = torch.where((torch.where(cir_dset[0].output_node_mask)[0] == node_output_idx))[0].item()
# gain mean and variance
gmean, gstd = -1.1057, 0.6559
def transform_fn(data):
data.gain = (data.vac_mag[vout_idx, 0].float() - gmean) / gstd
return data
dset = CircuitInMemDataset(root=root, mode='train', transform=transform_fn)
print(f'Dataset was loaded in {time.time() - s:.6f} seconds.')
sample_data = dset[0]
fract = 0.05
splits = dset.splits
train_idx = int(fract * len(splits['train']))
train_dset = dset[splits['train'][:train_idx]]
valid_dset = dset[splits['valid']]
test_dset = dset[splits['test']]
backbone_config = 'configs/opamp/dc/deep_gen_net/15-layer/config.py'
bb_id = hashlib.sha256(backbone_config.encode('utf-8')).hexdigest()[:6]
lr = 1e-3
activation = 'relu'
hidden_channels = 128
num_layers = 15
train_batch_size = min(256, len(train_dset))
valid_batch_size = min(256, len(valid_dset))
test_batch_size = min(256, len(test_dset))
exp_name = f'GAIN_PMOS_FT_Pool_{fract*10:.1f}_DeepGEN_h{hidden_channels}_nl{num_layers}_bs{train_batch_size}_lr{lr:.0e}_{activation}'
mdl_config = ParamDict(
exp_name=exp_name,
num_nodes=sample_data.vdc.shape[0],
in_channels=sample_data.x.shape[-1] + sample_data.type_tens.shape[-1],
hidden_channels=hidden_channels,
num_layers=num_layers,
dropout=0,
activation=activation,
bins=50,
lr=lr,
freeze_backbone=False,
use_pooling=True,
output_label='gain',
output_sigmoid=False,
lr_warmup={'peak_lr': lr, 'weight_decay': 0,
'warmup_updates': 50, 'tot_updates': 20000, 'end_lr': 5e-5},
)
train_dloader = DataLoader(train_dset, batch_size=train_batch_size, shuffle=True, num_workers=0)
valid_dloader = DataLoader(valid_dset, batch_size=valid_batch_size, num_workers=0)
test_dloader = DataLoader(test_dset, batch_size=test_batch_size, num_workers=0)
# .to converts the weight dtype to match input
# model = DeepGENNet(mdl_config).to(sample_data.x.dtype)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import math
import numpy as np
import pandas as pd
from packaging import version
from collections.abc import Iterable
from pandapower.plotting.plotly.get_colors import get_plotly_color, get_plotly_cmap
from pandapower.plotting.plotly.mapbox_plot import _on_map_test, _get_mapbox_token, MapboxTokenMissing
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
try:
from plotly import __version__ as plotly_version
from plotly.graph_objs.scatter.marker import ColorBar
from plotly.graph_objs import Figure, Layout
from plotly.graph_objs.layout import XAxis, YAxis
from plotly.graph_objs.scatter import Line, Marker
from plotly.graph_objs.scattermapbox import Line as scmLine
from plotly.graph_objs.scattermapbox import Marker as scmMarker
except ImportError:
logger.info("Failed to import plotly - interactive plotting will not be available")
def version_check():
if version.parse(plotly_version) < version.parse("3.1.1"):
raise UserWarning("Your plotly version {} is no longer supported.\r\n"
"Please upgrade your python-plotly installation, "
"e.g., via pip install --upgrade plotly".format(__version__))
def _in_ipynb():
"""
an auxiliary function which checks if plot is called from a jupyter-notebook or not
"""
import __main__ as main
return not hasattr(main, '__file__')
def sum_line_length(pts):
pt_diff = lambda p: (p[0][0] - p[1][0], p[0][1] - p[1][1])
diffs = map(pt_diff, zip(pts[:-1], pts[1:]))
line_length = sum(math.hypot(d1, d2) for d1, d2 in diffs)
return line_length
def get_line_neutral(coord):
if len(coord) == 1:
return coord[0]
half_length = sum_line_length(coord) / 2.0
length = 0.0
ind = 0
while length < half_length:
ind += 1
length = sum_line_length(coord[:ind])
start_coord = coord[ind - 2]
end_coord = coord[ind - 1]
mid = [(a1 + a2) / 2.0 for a1, a2 in zip(start_coord, end_coord)]
return mid
def create_edge_center_trace(line_trace, size=1, patch_type="circle", color="white", infofunc=None,
trace_name='edge_center', use_line_geodata=False):
"""
Creates a plotly trace of pandapower buses.
INPUT:
**line traces** (from pandapowerNet) - The already generated line traces with center geodata
OPTIONAL:
**size** (int, 5) - patch size
**patch_type** (str, "circle") - patch type, can be
- "circle" for a circle
- "square" for a rectangle
- "diamond" for a diamond
- much more pathc types at https://plot.ly/python/reference/#scatter-marker
**infofunc** (pd.Series, None) - hoverinfo for each trace element. Indices should correspond to the pandapower element indices
**trace_name** (String, "buses") - name of the trace which will appear in the legend
**color** (String, "blue") - color of buses in the trace
"""
# color = get_plotly_color(color)
center_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,
marker=dict(color=color, size=size, symbol=patch_type))
if not use_line_geodata:
center_trace['x'], center_trace['y'] = (line_trace[0]["x"][1::4], line_trace[0]["y"][1::4])
else:
x, y = [], []
for trace in line_trace:
coord = list(zip(trace["x"], trace["y"]))
mid_coord = get_line_neutral(coord)
x.append(mid_coord[0])
y.append(mid_coord[1])
center_trace['x'], center_trace['y'] = (x, y)
center_trace['text'] = infofunc
return center_trace
def create_bus_trace(net, buses=None, size=5, patch_type="circle", color="blue", infofunc=None,
trace_name='buses', legendgroup=None, cmap=None, cmap_vals=None,
cbar_title=None, cmin=None, cmax=None, cpos=1.0, colormap_column="vm_pu"):
"""
Creates a plotly trace of pandapower buses.
INPUT:
**net** (pandapowerNet) - The pandapower network
OPTIONAL:
**buses** (list, None) - The buses for which the collections are created.
If None, all buses in the network are considered.
**size** (int, 5) - patch size
**patch_type** (str, "circle") - patch type, can be
- "circle" for a circle
- "square" for a rectangle
- "diamond" for a diamond
- much more pathc types at https://plot.ly/python/reference/#scatter-marker
**infofunc** (pd.Series, None) - hoverinfo for bus elements. Indices should correspond to the pandapower element indices
**trace_name** (String, "buses") - name of the trace which will appear in the legend
**color** (String, "blue") - color of buses in the trace
**cmap** (String, None) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,
Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)
alternatively a custom discrete colormap can be used
**cmap_vals** (list, None) - values used for coloring using colormap
**cbar_title** (String, None) - title for the colorbar
**cmin** (float, None) - colorbar range minimum
**cmax** (float, None) - colorbar range maximum
**cpos** (float, 1.1) - position of the colorbar
**colormap_column** (str, "vm_pu") - set color of bus according to this variable
"""
color = get_plotly_color(color)
bus_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,
marker=dict(color=color, size=size, symbol=patch_type))
buses = net.bus.index.tolist() if buses is None else list(buses)
bus_plot_index = [b for b in buses if b in list(set(buses) & set(net.bus_geodata.index))]
bus_trace['x'], bus_trace['y'] = (net.bus_geodata.loc[bus_plot_index, 'x'].tolist(),
net.bus_geodata.loc[bus_plot_index, 'y'].tolist())
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(buses):
infofunc = pd.Series(index=buses, data=infofunc)
bus_trace['text'] = net.bus.loc[bus_plot_index, 'name'] if infofunc is None else infofunc.loc[buses]
if legendgroup:
bus_trace['legendgroup'] = legendgroup
# if color map is set
if cmap is not None:
# TODO introduce discrete colormaps (see contour plots in plotly)
# if cmap_vals are not given
cmap = 'Jet' if cmap is True else cmap
if cmap_vals is not None:
cmap_vals = cmap_vals
else:
if net.res_line.shape[0] == 0:
logger.error("There are no power flow results for buses voltage magnitudes which are default for bus "
"colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column].values
cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column] if cmap_vals is None else cmap_vals
cmin = cmin if cmin else cmap_vals.min()
cmax = cmax if cmax else cmap_vals.max()
bus_trace['marker'] = Marker(size=size,
color=cmap_vals, cmin=cmin, cmax=cmax,
colorscale=cmap,
colorbar=ColorBar(thickness=10,
x=cpos),
symbol=patch_type
)
if cbar_title:
bus_trace['marker']['colorbar']['title'] = cbar_title
bus_trace['marker']['colorbar']['title']['side'] = 'right'
return [bus_trace]
def _get_line_geodata_plotly(net, lines, use_line_geodata):
xs = []
ys = []
if use_line_geodata:
for line_ind, _ in lines.iterrows():
line_coords = net.line_geodata.loc[line_ind, 'coords']
linex, liney = list(zip(*line_coords))
xs += linex
xs += [None]
ys += liney
ys += [None]
else:
# getting x and y values from bus_geodata for from and to side of each line
from_bus = net.bus_geodata.loc[lines.from_bus, 'x'].tolist()
to_bus = net.bus_geodata.loc[lines.to_bus, 'x'].tolist()
# center point added because of the hovertool
center = (np.array(from_bus) + np.array(to_bus)) / 2
none_list = [None] * len(from_bus)
xs = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()
from_bus = net.bus_geodata.loc[lines.from_bus, 'y'].tolist()
to_bus = net.bus_geodata.loc[lines.to_bus, 'y'].tolist()
# center point added because of the hovertool
center = (np.array(from_bus) + np.array(to_bus)) / 2
none_list = [None] * len(from_bus)
ys = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()
# [:-1] is because the trace will not appear on maps if None is at the end
return xs[:-1], ys[:-1]
def create_line_trace(net, lines=None, use_line_geodata=True, respect_switches=False, width=1.0,
color='grey', infofunc=None, trace_name='lines', legendgroup=None,
cmap=None, cbar_title=None, show_colorbar=True, cmap_vals=None, cmin=None,
cmax=None, cpos=1.1):
"""
Creates a plotly trace of pandapower lines.
INPUT:
**net** (pandapowerNet) - The pandapower network
OPTIONAL:
**lines** (list, None) - The lines for which the collections are created.
If None, all lines in the network are considered.
**width** (int, 1) - line width
**respect_switches** (bool, False) - flag for consideration of disconnected lines
**infofunc** (pd.Series, None) - hoverinfo for line elements. Indices should correspond to the pandapower element indices
**trace_name** (String, "lines") - name of the trace which will appear in the legend
**color** (String, "grey") - color of lines in the trace
**legendgroup** (String, None) - defines groups of layers that will be displayed in a legend
e.g. groups according to voltage level (as used in `vlevel_plotly`)
**cmap** (String, None) - name of a colormap which exists within plotly if set to True default `Jet`
colormap is used, alternative colormaps : Greys, YlGnBu, Greens, YlOrRd,
Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis
**cmap_vals** (list, None) - values used for coloring using colormap
**show_colorbar** (bool, False) - flag for showing or not corresponding colorbar
**cbar_title** (String, None) - title for the colorbar
**cmin** (float, None) - colorbar range minimum
**cmax** (float, None) - colorbar range maximum
**cpos** (float, 1.1) - position of the colorbar
"""
color = get_plotly_color(color)
# defining lines to be plot
lines = net.line.index.tolist() if lines is None else list(lines)
if len(lines) == 0:
return []
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(lines):
infofunc = pd.Series(index=lines, data=infofunc)
if len(infofunc) != len(lines) and len(infofunc) != len(net.line):
raise UserWarning("Different amount of hover info than lines to plot")
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.line.index to the infofunc contents"
no_go_lines = set()
if respect_switches:
no_go_lines = set(lines) & set(net.switch.element[(net.switch.et == "l") & (net.switch.closed == 0)])
lines_to_plot = net.line.loc[set(net.line.index) & (set(lines) - no_go_lines)]
no_go_lines_to_plot = None
use_line_geodata = use_line_geodata if net.line_geodata.shape[0] > 0 else False
if use_line_geodata:
lines_to_plot = lines_to_plot.loc[set(lines_to_plot.index) & set(net.line_geodata.index)]
else:
lines_with_geodata = lines_to_plot.from_bus.isin(net.bus_geodata.index) & \
lines_to_plot.to_bus.isin(net.bus_geodata.index)
lines_to_plot = lines_to_plot.loc[lines_with_geodata]
cmap_lines = None
if cmap is not None:
# workaround: if colormap plot is used, each line need to be separate scatter object because
# plotly still doesn't support appropriately colormap for line objects
# TODO correct this when plotly solves existing github issue about Line colorbar
cmap = 'jet' if cmap is True else cmap
if cmap_vals is not None:
if not isinstance(cmap_vals, np.ndarray):
cmap_vals = np.asarray(cmap_vals)
else:
if net.res_line.shape[0] == 0:
logger.error("There are no power flow results for lines which are default for line colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_line.loc[lines_to_plot.index, 'loading_percent'].values
cmap_lines = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)
if len(cmap_lines) == len(net.line):
# some lines are not plotted although cmap_value were provided for all lines
line_idx_map = dict(zip(net.line.loc[lines].index.tolist(), range(len(lines))))
cmap_lines = [cmap_lines[line_idx_map[idx]] for idx in lines_to_plot.index]
else:
assert len(cmap_lines) == len(lines_to_plot), \
"Different amounts of cmap values and lines to plot were supplied"
line_traces = []
for col_i, (idx, line) in enumerate(lines_to_plot.iterrows()):
line_color = color
line_info = line['name']
if cmap is not None:
try:
line_color = cmap_lines[col_i]
line_info = line['name'] if infofunc is None else infofunc.loc[idx]
except IndexError:
logger.warning("No color and info for line {:d} (name: {}) available".format(idx, line['name']))
line_trace = dict(type='scatter', text=[], hoverinfo='text', mode='lines', name=trace_name,
line=Line(width=width, color=color))
line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, lines_to_plot.loc[idx:idx], use_line_geodata)
line_trace['line']['color'] = line_color
line_trace['text'] = line_info
line_traces.append(line_trace)
if show_colorbar and cmap is not None:
cmin = cmin if cmin else cmap_vals.min()
cmax = cmax if cmax else cmap_vals.max()
try:
# TODO for custom colormaps
cbar_cmap_name = 'Jet' if cmap is 'jet' else cmap
# workaround to get colorbar for lines (an unvisible node is added)
lines_cbar = dict(type='scatter', x=[net.bus_geodata.x[0]], y=[net.bus_geodata.y[0]], mode='markers',
marker=Marker(size=0, cmin=cmin, cmax=cmax,
color='rgb(255,255,255)',
colorscale=cbar_cmap_name,
colorbar=ColorBar(thickness=10,
x=cpos),
))
if cbar_title:
lines_cbar['marker']['colorbar']['title'] = cbar_title
lines_cbar['marker']['colorbar']['title']['side'] = 'right'
line_traces.append(lines_cbar)
except:
pass
if len(no_go_lines) > 0:
no_go_lines_to_plot = net.line.loc[no_go_lines]
for idx, line in no_go_lines_to_plot.iterrows():
line_color = color
line_trace = dict(type='scatter',
text=[], hoverinfo='text', mode='lines', name='disconnected lines',
line=Line(width=width / 2, color='grey', dash='dot'))
line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, no_go_lines_to_plot.loc[idx:idx], use_line_geodata)
line_trace['line']['color'] = line_color
try:
line_trace['text'] = infofunc.loc[idx]
except (KeyError, IndexError):
line_trace["text"] = line['name']
line_traces.append(line_trace)
if legendgroup:
line_trace['legendgroup'] = legendgroup
# sort infofunc so that it is the correct order lines_to_plot + no_go_lines_to_plot
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(net.line):
infofunc = pd.Series(index=net.line.index, data=infofunc)
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.line.index to the infofunc contents"
sorted_idx = lines_to_plot.index.tolist()
if no_go_lines_to_plot is not None:
sorted_idx += no_go_lines_to_plot.index.tolist()
infofunc = infofunc.loc[sorted_idx]
center_trace = create_edge_center_trace(line_traces, color=color, infofunc=infofunc,
use_line_geodata=use_line_geodata)
line_traces.append(center_trace)
return line_traces
def create_trafo_trace(net, trafos=None, color='green', width=5, infofunc=None, cmap=None,
trace_name='trafos', cmin=None, cmax=None, cmap_vals=None, use_line_geodata=None):
"""
Creates a plotly trace of pandapower trafos.
INPUT:
**net** (pandapowerNet) - The pandapower network
OPTIONAL:
**trafos** (list, None) - The trafos for which the collections are created.
If None, all trafos in the network are considered.
**width** (int, 5) - line width
**infofunc** (pd.Series, None) - hoverinfo for trafo elements. Indices should correspond to the pandapower element indices
**trace_name** (String, "lines") - name of the trace which will appear in the legend
**color** (String, "green") - color of lines in the trace
**cmap** (bool, False) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,
Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)
**cmap_vals** (list, None) - values used for coloring using colormap
**cbar_title** (String, None) - title for the colorbar
**cmin** (float, None) - colorbar range minimum
**cmax** (float, None) - colorbar range maximum
"""
color = get_plotly_color(color)
# defining lines to be plot
trafos = net.trafo.index.tolist() if trafos is None else list(trafos)
if len(trafos) == 0:
return []
trafo_buses_with_geodata = net.trafo.hv_bus.isin(net.bus_geodata.index) & \
net.trafo.lv_bus.isin(net.bus_geodata.index)
trafos_mask = net.trafo.index.isin(trafos)
trafos_to_plot = net.trafo[trafo_buses_with_geodata & trafos_mask]
if infofunc is not None:
if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(trafos):
infofunc = pd.Series(index=trafos, data=infofunc)
assert isinstance(infofunc, pd.Series), \
"infofunc should be a pandas series with the net.trafo.index to the infofunc contents"
infofunc = infofunc.loc[trafos_to_plot.index]
cmap_colors = []
if cmap is not None:
cmap = 'jet' if cmap is None else cmap
cmin = 0 if cmin is None else cmin
cmax = 100 if cmin is None else cmax
if cmap_vals is not None:
cmap_vals = cmap_vals
else:
if net.res_trafo.shape[0] == 0:
logger.error("There are no power flow results for lines which are default for line colormap coloring..."
"set cmap_vals input argument if you want colormap according to some specific values...")
cmap_vals = net.res_trafo.loc[trafos_to_plot.index, 'loading_percent'].values
cmap_colors = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)
trafo_traces = []
for col_i, (idx, trafo) in enumerate(trafos_to_plot.iterrows()):
if cmap is not None:
color = cmap_colors[col_i]
trafo_trace = dict(type='scatter', text=[], line=Line(width=width, color=color),
hoverinfo='text', mode='lines', name=trace_name)
trafo_trace['text'] = trafo['name'] if infofunc is None else infofunc.loc[idx]
from_bus = net.bus_geodata.loc[trafo.hv_bus, 'x']
to_bus = net.bus_geodata.loc[trafo.lv_bus, 'x']
trafo_trace['x'] = [from_bus, (from_bus + to_bus) / 2, to_bus]
from_bus = net.bus_geodata.loc[trafo.hv_bus, 'y']
to_bus = net.bus_geodata.loc[trafo.lv_bus, 'y']
trafo_trace['y'] = [from_bus, (from_bus + to_bus) / 2, to_bus]
trafo_traces.append(trafo_trace)
center_trace = create_edge_center_trace(trafo_traces, color=color, infofunc=infofunc,
use_line_geodata=use_line_geodata)
trafo_traces.append(center_trace)
return trafo_traces
def draw_traces(traces, on_map=False, map_style='basic', showlegend=True, figsize=1,
aspectratio='auto', filename="temp-plot.html"):
"""
plots all the traces (which can be created using :func:`create_bus_trace`, :func:`create_line_trace`,
:func:`create_trafo_trace`)
to PLOTLY (see https://plot.ly/python/)
INPUT:
**traces** - list of dicts which correspond to plotly traces
generated using: `create_bus_trace`, `create_line_trace`, `create_trafo_trace`
OPTIONAL:
**on_map** (bool, False) - enables using mapbox plot in plotly
**map_style** (str, 'basic') - enables using mapbox plot in plotly
- 'streets'
- 'bright'
- 'light'
- 'dark'
- 'satellite'
**showlegend** (bool, 'True') - enables legend display
**figsize** (float, 1) - aspectratio is multiplied by it in order to get final image size
**aspectratio** (tuple, 'auto') - when 'auto' it preserves original aspect ratio of the network geodata
any custom aspectration can be given as a tuple, e.g. (1.2, 1)
**filename** (str, "temp-plot.html") - plots to a html file called filename
"""
if on_map:
try:
on_map = _on_map_test(traces[0]['x'][0], traces[0]['y'][0])
except:
logger.warning("Test if geo-data are in lat/long cannot be performed using geopy -> "
"eventual plot errors are possible.")
if on_map is False:
logger.warning("Existing geodata are not real lat/lon geographical coordinates. -> "
"plot on maps is not possible.\n"
"Use geo_data_to_latlong(net, projection) to transform geodata from specific projection.")
if on_map:
# change traces for mapbox
# change trace_type to scattermapbox and rename x to lat and y to lon
for trace in traces:
trace['lat'] = trace.pop('x')
trace['lon'] = trace.pop('y')
trace['type'] = 'scattermapbox'
if "line" in trace and isinstance(trace["line"], Line):
# scattermapboxplot lines do not support dash for some reason, make it a red line instead
if "dash" in trace["line"]._props:
_prps = dict(trace["line"]._props)
_prps.pop("dash", None)
_prps["color"] = "red"
trace["line"] = scmLine(_prps)
else:
trace["line"] = scmLine(dict(trace["line"]._props))
elif "marker" in trace and isinstance(trace["marker"], Marker):
trace["marker"] = scmMarker(trace["marker"]._props)
# setting Figure object
fig = Figure(data=traces, # edge_trace
layout=Layout(
titlefont=dict(size=16),
showlegend=showlegend,
autosize=True if aspectratio is 'auto' else False,
hovermode='closest',
margin=dict(b=5, l=5, r=5, t=5),
# annotations=[dict(
# text="",
# showarrow=False,
# xref="paper", yref="paper",
# x=0.005, y=-0.002)],
xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),
yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False),
# legend=dict(x=0, y=1.0)
), )
# check if geodata are real geographycal lat/lon coordinates using geopy
if on_map:
try:
mapbox_access_token = _get_mapbox_token()
except Exception:
logger.exception('mapbox token required for map plots. '
'Get Mapbox token by signing in to https://www.mapbox.com/.\n'
'After getting a token, set it to pandapower using:\n'
'pandapower.plotting.plotly.mapbox_plot.set_mapbox_token(\'<token>\')')
raise MapboxTokenMissing
fig['layout']['mapbox'] = dict(accesstoken=mapbox_access_token,
bearing=0,
center=dict(lat=pd.Series(traces[0]['lat']).dropna().mean(),
lon=pd.Series(traces[0]['lon']).dropna().mean()),
style=map_style,
pitch=0,
zoom=11)
# default aspectratio: if on_map use auto, else use 'original'
aspectratio = 'original' if not on_map and aspectratio is 'auto' else aspectratio
if aspectratio is not 'auto':
if aspectratio is 'original':
# TODO improve this workaround for getting original aspectratio
xs = []
ys = []
for trace in traces:
xs += trace['x']
ys += trace['y']
x_dropna = pd.Series(xs).dropna()
y_dropna = pd.Series(ys).dropna()
xrange = x_dropna.max() - x_dropna.min()
yrange = y_dropna.max() - y_dropna.min()
ratio = xrange / yrange
if ratio < 1:
aspectratio = (ratio, 1.)
else:
aspectratio = (1., 1 / ratio)
aspectratio = np.array(aspectratio) / max(aspectratio)
fig['layout']['width'], fig['layout']['height'] = ([ar * figsize * 700 for ar in aspectratio])
# check if called from ipynb or not in order to consider appropriate plot function
if _in_ipynb():
from plotly.offline import init_notebook_mode, iplot as plot
init_notebook_mode()
else:
from plotly.offline import plot as plot
plot(fig, filename=filename)
|
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
first_obs = np.zeros(df.shape[0])
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"], first_obs)
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
# from R
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
# Zeros is exactly random
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
# Itself is 1
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
# Random is close to 0.5
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
expected = pd.DataFrame.from_records(
[
{"E": False, "id": 1, "stop": 1.0, "start": 0, "var1": 0.1},
{"E": False, "id": 1, "stop": 2.0, "start": 1, "var1": 1.0},
{"E": True, "id": 1, "stop": 10.0, "start": 2, "var1": 2.0},
]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
old_value_at_time_0 = seed_df["var1"].iloc[0]
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0, "var2": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=False)
expected = pd.DataFrame.from_records(
[{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0 + old_value_at_time_0, "var2": 2.0}]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_overwrite_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=True)
expected = pd.DataFrame.from_records([{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0}])
assert_frame_equal(df, expected, check_like=True)
def test_enum_flag(self, seed_df, cv1, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", add_enum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", add_enum=True
)
idx = df["id"] == 1
n = idx.sum()
try:
assert_series_equal(df["enum"].loc[idx], pd.Series(np.arange(1, n + 1)), check_names=False)
except AssertionError as e:
# Windows Numpy and Pandas sometimes have int32 or int64 as default dtype
if os.name == "nt" and "int32" in str(e) and "int64" in str(e):
assert_series_equal(
df["enum"].loc[idx], pd.Series(np.arange(1, n + 1), dtype=df["enum"].loc[idx].dtypes), check_names=False
)
else:
raise e
def test_event_col_is_properly_inserted(self, seed_df, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E")
assert df.groupby("id").last()["E"].tolist() == [1, 0]
def test_redundant_cv_columns_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0, "var4": 1},
{"id": 1, "t": 1, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 3, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 6, "var3": 1, "var4": 1},
{"id": 1, "t": 9, "var3": 1, "var4": 1}, # redundant, as nothing changed during the interval
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_will_convert_event_column_to_bools(self, seed_df, cv1):
seed_df["E"] = seed_df["E"].astype(int)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E")
assert df.dtypes["E"] == bool
def test_if_cvs_include_a_start_time_after_the_final_time_it_is_excluded(self, seed_df):
max_T = seed_df["stop"].max()
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0},
{"id": 1, "t": max_T + 10, "var3": 1}, # will be excluded
{"id": 2, "t": 0, "var3": 0},
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_if_cvs_include_a_start_time_before_it_is_included(self, seed_df):
min_T = seed_df["start"].min()
cv = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": min_T - 1, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 3
def test_cvs_with_null_values_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[{"id": None, "t": 0, "var3": 0}, {"id": 1, "t": None, "var3": 1}, {"id": 2, "t": 0, "var3": None}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 1
def test_a_new_row_is_not_created_if_start_times_are_the_same(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv1 = pd.DataFrame.from_records([{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 5, "var3": 1}])
cv2 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var4": 0}, {"id": 1, "t": 5, "var4": 1.5}, {"id": 1, "t": 6, "var4": 1.7}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
assert df.shape[0] == 3
def test_error_is_raised_if_columns_are_missing_in_seed_df(self, seed_df, cv1):
del seed_df["start"]
with pytest.raises(IndexError):
utils.add_covariate_to_timeline(seed_df, cv1, "id", "t", "E")
def test_cumulative_sum(self):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 5, "E": 1}])
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var4": 1}, {"id": 1, "t": 1, "var4": 1}, {"id": 1, "t": 3, "var4": 1}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{"id": 1, "start": 0, "stop": 1.0, "cumsum_var4": 1, "E": False},
{"id": 1, "start": 1, "stop": 3.0, "cumsum_var4": 2, "E": False},
{"id": 1, "start": 3, "stop": 5.0, "cumsum_var4": 3, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_delay(self, cv2):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 50, "E": 1}])
cv3 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "varA": 2}, {"id": 1, "t": 10, "varA": 4}, {"id": 1, "t": 20, "varA": 6}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv3, "id", "t", "E", delay=2).fillna(0)
expected = pd.DataFrame.from_records(
[
{"start": 0, "stop": 2.0, "varA": 0.0, "id": 1, "E": False},
{"start": 2, "stop": 12.0, "varA": 2.0, "id": 1, "E": False},
{"start": 12, "stop": 22.0, "varA": 4.0, "id": 1, "E": False},
{"start": 22, "stop": 50.0, "varA": 6.0, "id": 1, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_covariates_from_event_matrix_with_simple_addition(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame([[1, 1], [2, 2], [3, 3], [4, None]], columns=["id", "poison"])
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
assert pd.notnull(ldf).all().all()
expected = pd.DataFrame(
[
(0.0, 0.0, 1.0, 1, False),
(1.0, 1.0, 5.0, 1, True),
(0.0, 0.0, 2.0, 2, False),
(2.0, 1.0, 4.0, 2, True),
(0.0, 0.0, 3.0, 3, False),
(3.0, 1.0, 8.0, 3, True),
(0.0, 0.0, 4.0, 4, True),
],
columns=["start", "cumsum_poison", "stop", "id", "e"],
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_covariates_from_event_matrix(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame(
[[1, 1, None, 2], [2, None, 5, None], [3, 3, 3, 7]], columns=["id", "promotion", "movement", "raise"]
)
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 0.0,
"stop": 1.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 1.0,
"start": 1.0,
"stop": 2.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 1.0,
"start": 2.0,
"stop": 5.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 1.0,
"id": 2.0,
"start": 0.0,
"stop": 4.0,
},
{
"cumsum_movement": 0.0,
"cumsum_promotion": 0.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 0.0,
"stop": 3.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 0.0,
"e": 0.0,
"id": 3.0,
"start": 3.0,
"stop": 7.0,
},
{
"cumsum_movement": 1.0,
"cumsum_promotion": 1.0,
"cumsum_raise": 1.0,
"e": 1.0,
"id": 3.0,
"start": 7.0,
"stop": 8.0,
},
{
"cumsum_movement": None,
"cumsum_promotion": None,
"cumsum_raise": None,
"e": 1.0,
"id": 4.0,
"start": 0.0,
"stop": 4.0,
},
]
)
assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)
def test_to_episodic_format_with_long_time_gap_is_identical(self):
rossi = load_rossi()
rossi["id"] = np.arange(rossi.shape[0])
long_rossi = utils.to_episodic_format(rossi, duration_col="week", event_col="arrest", id_col="id", time_gaps=1000.0)
# using astype(int) would fail on Windows because int32 and int64 are used as dtype
long_rossi["week"] = long_rossi["stop"].astype(rossi["week"].dtype)
del long_rossi["start"]
del long_rossi["stop"]
assert_frame_equal(long_rossi, rossi, check_like=True)
def test_to_episodic_format_preserves_outcome(self):
E = [1, 1, 0, 0]
df = pd.DataFrame({"T": [1, 3, 1, 3], "E": E, "id": [1, 2, 3, 4]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 3 + 1 + 3
assert long_df.groupby("id").last()["E"].tolist() == E
def test_to_episodic_format_handles_floating_durations(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id").sort_values(["id", "stop"])
assert long_df.shape[0] == 1 + 4
assert long_df["stop"].tolist() == [0.1, 1, 2, 3, 3.5]
def test_to_episodic_format_handles_floating_durations_with_time_gaps(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 1], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df["stop"].tolist() == [0.1, 2, 3.5]
def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 0], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df.groupby("id").last()["E"].tolist() == [1, 0]
def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):
df = pd.DataFrame({"T": [0.1, 3.5], "E": [1, 0], "id": [1, 2]})
long_df = utils.to_episodic_format(df, "T", "E", id_col="id", time_gaps=2.0).sort_values(["id", "stop"])
assert long_df.groupby("id").last()["E"].tolist() == [1, 0]
def test_to_episodic_format_adds_id_col(self):
df = pd.DataFrame({"T": [1, 3], "E": [1, 0]})
long_df = utils.to_episodic_format(df, "T", "E")
assert "id" in long_df.columns
def test_to_episodic_format_uses_custom_index_as_id(self):
df = pd.DataFrame({"T": [1, 3], "E": [1, 0]}, index=["A", "B"])
long_df = utils.to_episodic_format(df, "T", "E")
assert long_df["id"].tolist() == ["A", "B", "B", "B"]
class TestStepSizer:
def test_StepSizer_step_will_decrease_if_unstable(self):
start = 0.95
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(1.0)
ss.update(2.0)
ss.update(1.0)
ss.update(2.0)
assert ss.next() < start
def test_StepSizer_step_will_increase_if_stable(self):
start = 0.5
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(1.0)
ss.update(0.5)
ss.update(0.4)
ss.update(0.1)
assert ss.next() > start
def test_StepSizer_step_will_decrease_if_explodes(self):
start = 0.5
ss = utils.StepSizer(start)
assert ss.next() == start
ss.update(20.0)
assert ss.next() < start
class TestSklearnAdapter:
@pytest.fixture
def X(self):
return load_regression_dataset().drop("T", axis=1)
@pytest.fixture
def Y(self):
return load_regression_dataset().pop("T")
def test_model_has_correct_api(self, X, Y):
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
assert hasattr(cph, "fit")
cph.fit(X, Y)
assert hasattr(cph, "predict")
cph.predict(X)
assert hasattr(cph, "score")
cph.score(X, Y)
def test_sklearn_cross_val_score_accept_model(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
wf = base_model(penalizer=1.0)
assert len(cross_val_score(wf, X, Y, cv=3)) == 3
def test_sklearn_GridSearchCV_accept_model(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
grid_params = {"penalizer": 10.0 ** np.arange(-2, 3), "model_ancillary": [True, False]}
clf = GridSearchCV(base_model(), grid_params, cv=4)
clf.fit(X, Y)
assert clf.best_params_ == {"model_ancillary": True, "penalizer": 100.0}
assert clf.predict(X).shape[0] == X.shape[0]
def test_model_can_accept_things_like_strata(self, X, Y):
X["strata"] = np.random.randint(0, 2, size=X.shape[0])
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model(strata="strata")
cph.fit(X, Y)
def test_we_can_user_other_prediction_methods(self, X, Y):
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E", predict_method="predict_median")
wf = base_model(strata="strata")
wf.fit(X, Y)
assert wf.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_dill(self, X, Y):
import dill
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
cph.fit(X, Y)
s = dill.dumps(cph)
s = dill.loads(s)
assert cph.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_pickle(self, X, Y):
import pickle
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
cph = base_model()
cph.fit(X, Y)
s = pickle.dumps(cph, protocol=-1)
s = pickle.loads(s)
assert cph.predict(X).shape[0] == X.shape[0]
def test_isinstance(self):
from sklearn.base import BaseEstimator, RegressorMixin, MetaEstimatorMixin, MultiOutputMixin
base_model = sklearn_adapter(CoxPHFitter, event_col="E")
assert isinstance(base_model(), BaseEstimator)
assert isinstance(base_model(), RegressorMixin)
assert isinstance(base_model(), MetaEstimatorMixin)
@pytest.mark.xfail
def test_sklearn_GridSearchCV_accept_model_with_parallelization(self, X, Y):
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
grid_params = {"penalizer": 10.0 ** np.arange(-2, 3), "l1_ratio": [0.05, 0.5, 0.95], "model_ancillary": [True, False]}
# note the n_jobs
clf = GridSearchCV(base_model(), grid_params, cv=4, n_jobs=-1)
clf.fit(X, Y)
assert clf.best_params_ == {"l1_ratio": 0.5, "model_ancillary": False, "penalizer": 0.01}
assert clf.predict(X).shape[0] == X.shape[0]
@pytest.mark.xfail
def test_joblib(self, X, Y):
from joblib import dump, load
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
clf = base_model()
clf.fit(X, Y)
dump(clf, "filename.joblib")
clf = load("filename.joblib")
@pytest.mark.xfail
def test_sklearn_check():
from sklearn.utils.estimator_checks import check_estimator
base_model = sklearn_adapter(WeibullAFTFitter, event_col="E")
check_estimator(base_model())
def test_rmst_works_at_kaplan_meier_edge_case():
T = [1, 2, 3, 4, 10]
kmf = KaplanMeierFitter().fit(T)
# when S(t)=0, doesn't matter about extending past
assert utils.restricted_mean_survival_time(kmf, t=10) == utils.restricted_mean_survival_time(kmf, t=10.001)
assert utils.restricted_mean_survival_time(kmf, t=9.9) <= utils.restricted_mean_survival_time(kmf, t=10.0)
assert abs((utils.restricted_mean_survival_time(kmf, t=4) - (1.0 + 0.8 + 0.6 + 0.4))) < 0.0001
assert abs((utils.restricted_mean_survival_time(kmf, t=4 + 0.1) - (1.0 + 0.8 + 0.6 + 0.4 + 0.2 * 0.1))) < 0.0001
def test_rmst_exactely_with_known_solution():
T = np.random.exponential(2, 100)
exp = ExponentialFitter().fit(T)
lambda_ = exp.lambda_
assert abs(utils.restricted_mean_survival_time(exp) - lambda_) < 0.001
assert abs(utils.restricted_mean_survival_time(exp, t=lambda_) - lambda_ * (np.e - 1) / np.e) < 0.001
@flaky
def test_rmst_approximate_solution():
T = np.random.exponential(2, 4000)
exp = ExponentialFitter().fit(T, timeline=np.linspace(0, T.max(), 10000))
lambda_ = exp.lambda_
with pytest.warns(exceptions.ApproximationWarning) as w:
assert (
abs(
utils.restricted_mean_survival_time(exp, t=lambda_)
- utils.restricted_mean_survival_time(exp.survival_function_, t=lambda_)
)
< 0.001
)
def test_rmst_variance():
T = np.random.exponential(2, 1000)
expf = ExponentialFitter().fit(T)
hazard = 1 / expf.lambda_
t = 1
sq = 2 / hazard ** 2 * (1 - np.exp(-hazard * t) * (1 + hazard * t))
actual_mean = 1 / hazard * (1 - np.exp(-hazard * t))
actual_var = sq - actual_mean ** 2
assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[0] - actual_mean) < 0.001
assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[1] - actual_var) < 0.001
def test_find_best_parametric_model():
T = np.random.exponential(2, 1000)
E = np.ones_like(T)
model, score = utils.find_best_parametric_model(T, E)
assert True
def test_find_best_parametric_model_can_accept_other_models():
T = np.random.exponential(2, 1000)
model, score = utils.find_best_parametric_model(T, additional_models=[ExponentialFitter(), ExponentialFitter()])
assert True
def test_find_best_parametric_model_with_BIC():
T = np.random.exponential(2, 1000)
model, score = utils.find_best_parametric_model(T, scoring_method="BIC")
assert True
def test_find_best_parametric_model_works_for_left_censoring():
T = np.random.exponential(2, 100)
model, score = utils.find_best_parametric_model(T, censoring_type="left", show_progress=True)
assert True
def test_find_best_parametric_model_works_for_interval_censoring():
T_1 = np.random.exponential(2, 100)
T_2 = T_1 + 1
model, score = utils.find_best_parametric_model((T_1, T_2), censoring_type="interval", show_progress=True)
assert True
def test_find_best_parametric_model_works_with_weights_and_entry():
T = np.random.exponential(5, 100)
W = np.random.randint(1, 5, size=100)
entry = np.random.exponential(0.01, 100)
model, score = utils.find_best_parametric_model(T, weights=W, entry=entry, show_progress=True)
assert True
def test_safe_exp():
from lifelines.utils.safe_exp import MAX
assert safe_exp(4.0) == np.exp(4.0)
assert safe_exp(MAX) == np.exp(MAX)
assert safe_exp(MAX + 1) == np.exp(MAX)
from autograd import grad
assert grad(safe_exp)(4.0) == np.exp(4.0)
assert grad(safe_exp)(MAX) == np.exp(MAX)
assert grad(safe_exp)(MAX + 1) == np.exp(MAX)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import ctypes
import platform
import cgi
import re
import hashlib
import stat
import codecs
import common.print_color
environment_check_shm = None
global_opts = None
global_cahce = dict(id_offset=0, services_type=[])
server_opts = None
server_name = ''
server_index = 1
server_alloc_listen_port = 0
server_proxy_addr = ''
server_cache_id = None
server_cache_full_name = None
server_cache_ip = dict()
project_templete_engine_lookup = None
def set_global_opts(opts, id_offset):
global global_opts
global global_cahce
global_opts = opts
global_cahce['id_offset'] = id_offset
for server_type in sorted(opts.items('atservice'), key=lambda x: int(x[1])):
if 'atgateway' == server_type[0]:
continue
svr_name = 'server.{0}'.format(server_type[0])
if global_opts.has_section(svr_name):
global_cahce['services_type'].append(server_type[0])
def set_templete_engine(engine):
global project_templete_engine_lookup
project_templete_engine_lookup = engine
def render_string(content, **render_options):
from mako.template import Template
tmpl = Template(content)
return tmpl.render(**render_options)
def render(template_name, **render_options):
if project_templete_engine_lookup is None:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'template not available now\r\n')
return ""
tmpl = project_templete_engine_lookup.get_template(template_name)
if tmpl is None:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'template {0} not found\r\n', template_name)
return ""
return tmpl.render(**render_options)
def render_to(template_name, output_path, **render_options):
dir_path = os.path.dirname(output_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
output_file = codecs.open(output_path, mode='w', encoding='utf-8')
if not output_file:
common.print_color.cprintf_stderr([common.print_color.print_style.FC_RED, common.print_color.print_style.FW_BOLD],
'try to render {0} but open {1} for writing failed\r\n', template_name, output_path)
return
output_file.write(render(template_name, **render_options))
os.chmod(output_path, stat.S_IRWXU + stat.S_IRWXG + stat.S_IROTH + stat.S_IXOTH)
def get_service_index_range(number=1):
return range(1 + global_cahce['id_offset'], 1 + global_cahce['id_offset'] + number)
def get_global_all_services():
return global_cahce['services_type']
def set_server_inst(opts, key, index):
global server_opts
global server_name
global server_index
global server_cache_id
global server_cache_full_name
server_opts = opts
server_name = key
server_index = index
server_cache_id = None
server_cache_full_name = None
def get_ipv4_level(ip_addr):
ip_addrs = [int(x) for x in ip_addr.split('.')]
# => invalid ipv4 level 99
if len(ip_addrs) != 4:
return 99
# 10.0.0.0/8 => private level 1
# 172.16.0.0/12 => private level 2
# 192.168.0.0/16 => private level 3
if ip_addrs[0] == 10:
return 1
if ip_addrs[0] == 172 and (ip_addrs[1] & 0x10) == 0x10:
return 2
if ip_addrs[0] == 192 and ip_addrs[1] == 168:
return 3
# 169.254.0.0/16 => link-local level 11
if ip_addrs[0] == 169 and ip_addrs[1] == 254:
return 11
# 127.0.0.0/8 => loopback level 21
if ip_addrs[0] == 127:
return 21
# 224.0.0.0/4 => group-cast level 31
if (ip_addrs[0] & 0xE0) == 0xE0:
return 31
# 240.0.0.0/4 => for-test level 32
if (ip_addrs[0] & 0xF0) == 0xF0:
return 32
# 255.255.255.255 => multi-cast level 51
if ip_addrs[0] == 255 and ip_addrs[1] == 255 and ip_addrs[2] == 255 and ip_addrs[3] == 255:
return 51
# public address => level 0
return 0
def is_ipv4_link_local(ip_addr):
return get_ipv4_level(ip_addr) >= 11
def is_ipv6_link_local(ip_addr):
ip_addr = ip_addr.lower()
if ip_addr == "::1" or ip_addr == "0:0:0:0:0:0:0:1":
return True
# fe80:/10 => Link local address
# FEC0:/10 => Site local address
if len(ip_addr) > 4 and ip_addr[0:4] == "fe80":
return True
# IPv4-mapped IPv6 addresses
if ip_addr == '::127.0.0.1' or ip_addr == '::ffff:127.0.0.1':
return True
return False
def get_ip_list_v4():
global server_cache_ip
if 'ipv4' not in server_cache_ip:
import socket
server_cache_ip['ipv4'] = []
try:
for ip_pair in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET, socket.SOCK_STREAM):
ip_addr = ip_pair[4][0]
if not is_ipv4_link_local(ip_addr):
server_cache_ip['ipv4'].append(ip_addr)
# use socket to detect ipv6 address if can not find any address
if 0 == len(server_cache_ip['ipv4']):
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 53)) # use google's DNS
res = csock.getsockname()
if res:
server_cache_ip['ipv4'].append(res[0])
csock.close()
server_cache_ip['ipv4'] = sorted(server_cache_ip['ipv4'], key=get_ipv4_level)
except:
pass
return server_cache_ip['ipv4']
def get_ip_list_v6():
global server_cache_ip
if 'ipv6' not in server_cache_ip:
import socket
server_cache_ip['ipv6'] = []
try:
for ip_pair in socket.getaddrinfo(socket.gethostname(), 0, socket.AF_INET6, socket.SOCK_STREAM):
ip_addr = ip_pair[4][0]
interface_index = ip_addr.find('%')
# remove interface name
if interface_index > 0:
ip_addr = ip_addr[0:interface_index]
if not is_ipv6_link_local(ip_addr):
server_cache_ip['ipv6'].append(ip_addr)
# use socket to detect ipv6 address if can not find any address
if 0 == len(server_cache_ip['ipv6']):
csock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
csock.connect(('2001:4860:4860::8888', 53)) # use google's DNS
res = csock.getsockname()
if res:
ip_addr = res[0]
interface_index = ip_addr.find('%')
# remove interface name
if interface_index > 0:
ip_addr = ip_addr[0:interface_index]
if not is_ipv6_link_local(ip_addr):
server_cache_ip['ipv6'].append(ip_addr)
csock.close()
except:
pass
return server_cache_ip['ipv6']
def is_ip_v6_enabled():
ipv6s = get_ip_list_v6()
return len(ipv6s) > 0
def get_inner_ipv4():
if 'SYSTEM_MACRO_INNER_IPV4' in os.environ:
return os.environ['SYSTEM_MACRO_INNER_IPV4']
# detect inner ip address
res = get_ip_list_v4()
if 0 == len(res):
return '127.0.0.1'
return res[0]
def get_outer_ipv4():
if 'SYSTEM_MACRO_OUTER_IPV4' in os.environ:
return os.environ['SYSTEM_MACRO_OUTER_IPV4']
# detect inner ip address
res = get_ip_list_v4()
if 0 == len(res):
return '0.0.0.0'
ret = res[0]
if '127.0.0.1' == ret:
ret = '0.0.0.0'
return ret
def get_inner_ipv6():
if 'SYSTEM_MACRO_INNER_IPV6' in os.environ:
return os.environ['SYSTEM_MACRO_INNER_IPV6']
# detect inner ip address
res = get_ip_list_v6()
if 0 == len(res):
return '::1'
return res[0]
def get_outer_ipv6():
if 'SYSTEM_MACRO_OUTER_IPV6' in os.environ:
return os.environ['SYSTEM_MACRO_OUTER_IPV6']
# detect inner ip address
res = get_ip_list_v6()
if 0 == len(res):
return '::'
ret = res[0]
if '::1' == ret:
ret = '::'
return ret
def get_global_option(section, key, default_val, env_name=None):
global global_opts
if not env_name is None and env_name in os.environ:
return os.environ[env_name]
if global_opts.has_option(section, key):
return global_opts.get(section, key)
return default_val
def get_hostname():
global server_cache_ip
if 'hostname' not in server_cache_ip:
server_cache_ip['hostname'] = get_global_option(
'atsystem', 'hostname', '', 'SYSTEM_MACRO_HOSTNAME')
server_cache_ip['hostname_is_uuid'] = False
if server_cache_ip['hostname'] is None or len(server_cache_ip['hostname']) == 0:
# using uuid module to find physic address
import uuid
server_cache_ip['hostname'] = uuid.UUID(
int=uuid.getnode()).hex[-12:]
server_cache_ip['hostname_is_uuid'] = True
return server_cache_ip['hostname'], server_cache_ip['hostname_is_uuid']
def str_to_list(val):
ret = []
if val is None:
return ret
for item in str(val).split(','):
item_strip = item.strip()
if len(item_strip) > 0:
ret.append(item_strip)
return ret
def list_to_hosts(val):
ret = []
mat = re.compile('(.*):(\d+)-(\d+)(.*)$')
for item in val:
mat_res = mat.match(item)
if not mat_res is None:
for i in range(int(mat_res.group(2)), int(mat_res.group(3)) + 1):
ret.append('{0}:{1}{2}'.format(
mat_res.group(1), i, mat_res.group(4)))
else:
ret.append(item)
return ret
def str_to_hosts(val):
return list_to_hosts(str_to_list(val))
def get_global_list(section, key, default_val, env_name=None):
res = get_global_option(section, key, default_val, env_name)
if res is None:
return []
return str_to_list(res)
def get_global_list_to_hosts(section, key, default_val, env_name=None):
res = get_global_list(section, key, default_val, env_name)
return list_to_hosts(res)
def get_global_option_bool(section, key, default_val, env_name=None):
val = get_global_option(section, key, default_val, env_name)
if not val:
return False
val = str(val).lower().strip()
return len(val) > 0 and '0' != val and 'false' != val and 'no' != val and 'disable' != val
def get_server_name():
global server_name
return server_name
def get_server_type_id(server_name=None):
if server_name is None:
server_name = get_server_name()
if not global_opts.has_option('atservice', server_name):
return 0
return int(get_global_option('atservice', server_name, 0))
def get_server_option(key, default_val, env_name=None):
return get_global_option('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_list(key, default_val, env_name=None):
return get_global_list('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_list_to_hosts(key, default_val, env_name=None):
return get_global_list_to_hosts('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_option_bool(key, default_val, env_name=None):
return get_global_option_bool('server.{0}'.format(get_server_name()), key, default_val, env_name)
def get_server_or_global_option(section, key, default_val, env_name=None):
ret = get_server_option('{0}.{1}'.format(section, key), None, None)
if ret is None:
return get_global_option(section, key, default_val, env_name)
return ret
def get_server_or_global_list(section, key, default_val, env_name=None):
ret = get_server_list('{0}.{1}'.format(section, key), None, None)
if ret is None or len(ret) == 0:
return get_global_list(section, key, default_val, env_name)
return ret
def get_server_or_global_list_to_hosts(section, key, default_val, env_name=None):
ret = get_server_list_to_hosts('{0}.{1}'.format(section, key), None, None)
if ret is None or len(ret) == 0:
return get_global_list_to_hosts(section, key, default_val, env_name)
return ret
def get_server_or_global_bool(section, key, default_val, env_name=None):
try_section_name = '{0}.{1}'.format(section, key)
if get_server_option(try_section_name, None) is None:
return get_global_option_bool(section, key, default_val, env_name)
else:
return get_server_option_bool(try_section_name, default_val, env_name)
def get_server_index():
global server_index
return server_index
def get_server_group_inner_id(server_name=None, server_index=None):
global global_opts
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
if not global_opts.has_option('atservice', server_name):
return 0
type_step = int(get_global_option('global', 'type_step', 0x100))
type_id = int(get_global_option('atservice', server_name, 0))
return type_step * type_id + server_index
def get_server_proc_id(server_name=None, server_index=None):
group_id = int(get_global_option(
'global', 'group_id', 1, 'SYSTEM_MACRO_GROUP_ID'))
group_step = int(get_global_option('global', 'group_step',
0x10000, 'SYSTEM_MACRO_GROUP_STEP'))
return group_id * group_step + get_server_group_inner_id(server_name, server_index)
def get_server_id():
global server_cache_id
global global_opts
if not server_cache_id is None:
return server_cache_id
if not global_opts.has_option('atservice', get_server_name()):
return 0
server_cache_id = get_server_proc_id()
return server_cache_id
def get_server_full_name():
global server_cache_full_name
if not server_cache_full_name is None:
return server_cache_full_name
server_cache_full_name = '{0}-{1}'.format(
get_server_name(), get_server_index())
return server_cache_full_name
def get_log_level():
return get_global_option('global', 'log_level', 'debug', 'SYSTEM_MACRO_CUSTOM_LOG_LEVEL')
def get_log_dir():
return get_global_option('global', 'log_dir', '../log', 'SYSTEM_MACRO_CUSTOM_LOG_DIR')
def get_server_atbus_shm():
global environment_check_shm
if environment_check_shm is None:
# check if it support shm
if not os.path.exists('/proc/sys/kernel/shmmax'):
environment_check_shm = False
else:
shm_max_sz = int(open('/proc/sys/kernel/shmmax', 'r').read())
environment_check_shm = shm_max_sz > 0
if not environment_check_shm:
return None
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
base_key = int(get_global_option('atsystem', 'shm_key_pool', 0x16000000, 'SYSTEM_MACRO_CUSTOM_SHM_KEY'))
shm_key_offset = int(get_global_option('atsystem', 'shm_key_offset', 0, 'SYSTEM_MACRO_CUSTOM_SHM_KEY_OFFSET'))
shm_key = base_key + shm_key_offset + get_server_group_inner_id(get_server_name(), get_server_index()) + port_offset
return 'shm://{0}'.format(hex(shm_key))
def disable_server_atbus_shm():
global environment_check_shm
environment_check_shm = False
def get_calc_listen_port(server_name=None, server_index=None, base_port='port'):
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
ret = int(get_global_option(
'server.{0}'.format(server_name), base_port, 0))
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
if ret == 0:
base_port = int(get_global_option(
'atsystem', 'listen_port', 12000, 'SYSTEM_MACRO_CUSTOM_BASE_PORT'))
type_step = int(get_global_option('global', 'type_step', 0x100))
type_id = int(get_global_option('atservice', server_name, 0))
return base_port + type_step * server_index + type_id + port_offset
else:
return ret + server_index + port_offset
def get_server_atbus_port():
return get_calc_listen_port()
def get_server_atbus_tcp():
if is_ip_v6_enabled():
if 'atproxy' == get_server_name():
return 'ipv6://{0}:{1}'.format(get_outer_ipv6(), get_server_atbus_port())
else:
return 'ipv6://{0}:{1}'.format(get_inner_ipv6(), get_server_atbus_port())
else:
if 'atproxy' == get_server_name():
return 'ipv4://{0}:{1}'.format(get_outer_ipv4(), get_server_atbus_port())
else:
return 'ipv4://{0}:{1}'.format(get_inner_ipv4(), get_server_atbus_port())
def get_server_atbus_unix():
h = hashlib.sha1(__file__.encode('utf-8')).hexdigest()
if os.path.exists('/tmp'):
default_base = '/tmp/atapp/{0}/'.format(h)
elif os.path.exists('/run/tmp'):
default_base = '/run/tmp/atapp/{0}/'.format(h)
elif os.path.exists('/'):
default_base = '/tmp/atapp/{0}/'.format(h)
else:
default_base = './'
dir_path = get_global_option(
'atsystem', 'unix_sock_dir', default_base, 'SYSTEM_MACRO_CUSTOM_UNIX_SOCK_DIR')
return 'unix://{0}{1}-{2:x}.sock'.format(dir_path, get_server_full_name(), get_server_id())
def get_server_atbus_listen():
global server_cache_ip
ret = []
res = get_server_atbus_shm()
if not res is None:
ret.append(res)
if 'support_unix_sock' not in server_cache_ip:
import socket
if 'AF_UNIX' in socket.__dict__:
# test unix sock, maybe defined but not available
test_file_path = 'project-utils-test-unixx-sock.sock'
try:
test_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
test_sock.bind(test_file_path)
test_sock.close()
server_cache_ip['support_unix_sock'] = True
except:
server_cache_ip['support_unix_sock'] = False
if os.path.exists(test_file_path):
os.remove(test_file_path)
else:
server_cache_ip['support_unix_sock'] = False
if 0 == len(ret) or False == server_cache_ip['support_unix_sock'] or 'atproxy' == get_server_name():
ret.append(get_server_atbus_tcp())
else:
ret.append(get_server_atbus_unix())
return ret
def disable_server_atbus_unix_sock():
global server_cache_ip
server_cache_ip['support_unix_sock'] = False
def get_server_proxy():
global server_proxy_addr
if 'atproxy' == get_server_name():
server_proxy_addr = get_server_atbus_tcp()
return ''
return server_proxy_addr
def get_server_subnets():
ret = []
for subnet in get_server_list('subnets', ['0/0']):
if subnet.isdigit():
ret.append('0/{0}'.format(subnet))
else:
ret.append(subnet)
return ret
def get_server_recv_buffer_size():
return get_global_option('atsystem', 'shm_channel_size', 8 * 1024 * 1024)
def get_server_send_buffer_size():
return get_global_option('atsystem', 'iostream_channel_size', 2 * 1024 * 1024)
def get_server_gateway_index(server_name=None, server_index=None, gateway_name=None):
if server_name is None:
server_name = get_server_name()
if server_index is None:
server_index = get_server_index()
if gateway_name is None:
gateway_name = 'atgateway'
gateway_section_name = 'server.{0}'.format(gateway_name)
step = int(get_global_option(gateway_section_name, 'index_type_number', 1))
offset = get_global_option(gateway_section_name, 'index_map_{0}'.format(server_name), None)
if offset is None:
raise Exception(
'index_map_{0} is not found in {1}'.format(server_name, gateway_section_name))
return step * server_index + int(offset)
def get_server_gateway_port(server_name, server_index, gateway_name=None, base_port='default_port'):
if gateway_name is None:
gateway_name = 'atgateway'
gateway_section_name = 'server.{0}'.format(gateway_name)
ret = int(get_global_option(gateway_section_name, base_port, 0))
port_offset = int(get_global_option('global', 'port_offset', 0, 'SYSTEM_MACRO_GLOBAL_PORT_OFFSET'))
if ret <= 0:
ret = int(get_global_option('server.{0}'.format(gateway_name), 'default_port', 8000))
return ret + get_server_gateway_index(server_name, server_index, gateway_name) + port_offset
def get_gateway_server_names(gateway_name=None):
global global_opts
ret = []
if gateway_name is None:
gateway_name = 'atgateway'
for maybe_svr_name in global_opts.options('server.{0}'.format(gateway_name)):
if maybe_svr_name[0:10] != "index_map_":
continue
ret.append(maybe_svr_name[10:])
return ret
def get_etcd_client_urls():
etcd_number = int(get_global_option('server.etcd', 'number', '0'))
if etcd_number <= 0:
return get_server_or_global_option('etcd', 'hosts', 'http://127.0.0.1:2379', 'SYSTEM_MACRO_CUSTOM_ETCD_HOST')
client_urls = []
for svr_index in get_service_index_range(etcd_number):
client_urls.append('http://{0}:{1}'.format(get_outer_ipv4(), get_calc_listen_port('etcd', svr_index, 'client_port')))
return ','.join(client_urls)
|
import keyboard
from utils.custom_mouse import mouse
from char import IChar
from pather import Pather
from logger import Logger
from screen import convert_abs_to_monitor, convert_screen_to_abs, grab
from config import Config
from utils.misc import wait, rotate_vec, unit_vector
import random
from pather import Location, Pather
import numpy as np
class Trapsin(IChar):
def __init__(self, skill_hotkeys: dict, pather: Pather):
Logger.info("Setting up Trapsin")
super().__init__(skill_hotkeys)
self._pather = pather
def pre_buff(self):
if Config().char["cta_available"]:
self._pre_buff_cta()
if self._skill_hotkeys["fade"]:
keyboard.send(self._skill_hotkeys["fade"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
if self._skill_hotkeys["shadow_warrior"]:
keyboard.send(self._skill_hotkeys["shadow_warrior"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
if self._skill_hotkeys["burst_of_speed"]:
keyboard.send(self._skill_hotkeys["burst_of_speed"])
wait(0.1, 0.13)
mouse.click(button="right")
wait(self._cast_duration)
def _left_attack(self, cast_pos_abs: tuple[float, float], spray: int = 10):
keyboard.send(Config().char["stand_still"], do_release=False)
if self._skill_hotkeys["skill_left"]:
keyboard.send(self._skill_hotkeys["skill_left"])
for _ in range(4):
x = cast_pos_abs[0] + (random.random() * 2*spray - spray)
y = cast_pos_abs[1] + (random.random() * 2*spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
mouse.press(button="left")
wait(0.2, 0.3)
mouse.release(button="left")
keyboard.send(Config().char["stand_still"], do_press=False)
def _right_attack(self, cast_pos_abs: tuple[float, float], spray: float = 10):
keyboard.send(self._skill_hotkeys["lightning_sentry"])
x = cast_pos_abs[0] + (random.random() * 2 * spray - spray)
y = cast_pos_abs[1] + (random.random() * 2 * spray - spray)
cast_pos_monitor = convert_abs_to_monitor((x, y))
mouse.move(*cast_pos_monitor)
def atk(num: int):
for _ in range(num):
mouse.press(button="right")
wait(0.20)
mouse.release(button="right")
wait(0.15)
atk(4)
keyboard.send(self._skill_hotkeys["death_sentry"])
atk(1)
def kill_pindle(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_pindle"] / 2))
pindle_pos_abs = convert_screen_to_abs(Config().path["pindle_end"][0])
cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 11)
self._left_attack(cast_pos_abs, 11)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("pindle_end", self)
else:
self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, force_tp=True)
return True
def kill_eldritch(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_eldritch"] / 2))
eld_pos_abs = convert_screen_to_abs(Config().path["eldritch_end"][0])
cast_pos_abs = [eld_pos_abs[0] * 0.9, eld_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 90)
self._left_attack(cast_pos_abs, 90)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
if self.capabilities.can_teleport_natively:
self._pather.traverse_nodes_fixed("eldritch_end", self)
else:
self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, timeout=0.6, force_tp=True)
return True
def kill_shenk(self) -> bool:
atk_len = max(1, int(Config().char["atk_len_shenk"] / 2))
shenk_pos_abs = self._pather.find_abs_node_pos(149, grab())
if shenk_pos_abs is None:
shenk_pos_abs = convert_screen_to_abs(Config().path["shenk_end"][0])
cast_pos_abs = [shenk_pos_abs[0] * 0.9, shenk_pos_abs[1] * 0.9]
for _ in range(atk_len):
self._right_attack(cast_pos_abs, 90)
self._left_attack(cast_pos_abs, 90)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, timeout=1.4, force_tp=True)
return True
def kill_nihlathak(self, end_nodes: list[int]) -> bool:
# Find nilhlatak position
atk_len = max(1, int(Config().char["atk_len_nihlathak"] / 2))
for i in range(atk_len):
nihlathak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], grab())
if nihlathak_pos_abs is None:
return False
cast_pos_abs = np.array([nihlathak_pos_abs[0] * 0.9, nihlathak_pos_abs[1] * 0.9])
self._left_attack(cast_pos_abs, 90)
self._right_attack(cast_pos_abs, 90)
# Do some tele "dancing" after each sequence
if i < atk_len - 1:
rot_deg = random.randint(-10, 10) if i % 2 == 0 else random.randint(170, 190)
tele_pos_abs = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 100
pos_m = convert_abs_to_monitor(tele_pos_abs)
self.pre_move()
self.move(pos_m)
# Move to items
wait(self._cast_duration, self._cast_duration + 0.2)
self._pather.traverse_nodes(end_nodes, self, timeout=0.8)
return True
if __name__ == "__main__":
import os
import keyboard
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))
keyboard.wait("f11")
from config import Config
from char import Trapsin
pather = Pather()
char = Trapsin(Config().trapsin, Config().char, pather)
|
import copy
import math
import numpy as np
from tqdm import tqdm
from utils.model_util import triad_to_matrix, nonzero_user_mean, nonzero_item_mean
# 相似度计算库
from scipy.stats import pearsonr
from sklearn.metrics.pairwise import cosine_similarity
class IPCCModel(object):
def __init__(self) -> None:
super().__init__()
self.matrix = None # QoS矩阵
self.u_mean = None # 每个用户的评分均值(用于计算修正的余弦相似度)
self.i_mean = None # 每个项目的评分均值
self.similarity_matrix = None # 项目相似度矩阵
self._nan_symbol = -1 # 缺失项标记(数据集中使用-1表示缺失项)
def _get_similarity_matrix(self, matrix, metric):
"""获取项目相似度矩阵
Args:
matrix (): QoS矩阵
metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)
"""
_m = copy.deepcopy(matrix)
_m[_m == self._nan_symbol] = 0 # 将缺失项用0代替,以便之后计算
n_items = matrix.shape[1]
similarity_matrix = np.zeros((n_items, n_items))
# 计算相似度矩阵
for i in tqdm(range(n_items), desc="生成相似度矩阵"):
for j in range(i + 1, n_items):
col_i = _m[:, i]
col_j = _m[:, j]
nonzero_i = np.nonzero(col_i)[0] # 非0元素对应的下标
nonzero_j = np.nonzero(col_j)[0]
intersect = np.intersect1d(nonzero_i,
nonzero_j) # 对项目i,j同时有评分的用户集合
if len(intersect) == 0:
sim = 0
else:
# 依据指定的相似度计算方法计算项目i,j的相似度
try:
if metric == 'PCC':
# 如果一个项目的评分向量中所有值都相等,则无法计算皮尔逊相关系数
if len(set(col_i[intersect])) == 1 or len(
set(col_j[intersect])) == 1:
sim = 0
else:
sim = pearsonr(col_i[intersect],
col_j[intersect])[0]
elif metric == 'COS':
sim = cosine_similarity(col_i[intersect],
col_j[intersect])
elif metric == 'ACOS':
sim = adjusted_cosine_similarity(
col_i, col_j, intersect, self.u_mean)
except Exception as e:
sim = 0
similarity_matrix[i][j] = similarity_matrix[j][i] = sim
return similarity_matrix
def _get_similarity_items(self, iid, topk=-1):
"""获取相似用户
Args:
iid (): 当前项目
topk (): 相似项目数量, -1表示不限制数量
Returns:
依照相似度从大到小排序, 与当前项目最为相似的前topk个相似项目
"""
assert isinstance(topk, int)
ordered_sim_iid = (
-self.similarity_matrix[iid]).argsort() # 按相似度从大到小排序后, 相似用户对应的索引
if topk == -1:
return ordered_sim_iid
else:
assert topk > 0
return ordered_sim_iid[:topk]
def get_similarity(self, iid_a, iid_b):
"""传入两个uid,获取这两个用户的相似度
"""
if iid_a == iid_b:
return float(1)
if iid_a + 1 > self.matrix.shape[1] or iid_b + 1 > self.matrix.shape[1]:
return 0
if self.similarity_matrix is None:
assert self.matrix is not None, "Please fit first e.g. model.fit()"
self._get_similarity_matrix(self.matrix)
return self.similarity_matrix[iid_a][iid_b]
def fit(self, triad, metric='PCC'):
"""训练模型
Args:
triad (): 数据三元组: (uid, iid, rating)
metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)
"""
self.matrix = triad_to_matrix(triad, self._nan_symbol) # 数据三元组转QoS矩阵
self.u_mean = nonzero_user_mean(self.matrix,
self._nan_symbol) # 根据QoS矩阵计算每个用户的评分均值
# FIXME 考虑i_mean为0的情况
self.i_mean = nonzero_item_mean(self.matrix,
self._nan_symbol) # 根据QoS矩阵计算每个项目的评分均值
self.similarity_matrix = self._get_similarity_matrix(
self.matrix, metric) # 根据QoS矩阵获取项目相似矩阵
def predict(self, triad, topK=-1):
y_list = [] # 真实评分
y_pred_list = [] # 预测评分
cold_boot_cnt = 0 # 冷启动统计
for row in tqdm(triad, desc="Predict... "):
uid, iid, rate = int(row[0]), int(row[1]), float(row[2])
# 冷启动: 新用户因为没有计算过相似用户, 因此无法预测评分
if iid + 1 > self.matrix.shape[1]:
cold_boot_cnt += 1
continue
i_mean = self.i_mean[iid]
similarity_items = self._get_similarity_items(iid, topK)
up = 0 # 分子
down = 0 # 分母
# 对于当前项目的每一个相似项目
for sim_iid in similarity_items:
sim_item_rate = self.matrix[uid][sim_iid] # 当前用户对相似项目的评分
similarity = self.get_similarity(iid, sim_iid)
# 如果当前用户对相似项目没有评分,则不进行计算
if sim_item_rate == self._nan_symbol:
continue
up += similarity * (sim_item_rate - self.i_mean[sim_iid]
) # 相似度 * (相似项目评分 - 相似项目评分均值)
down += similarity # 相似度的绝对值
if down != 0:
y_pred = i_mean + up / down
else:
y_pred = 0
y_pred_list.append(y_pred)
y_list.append(rate)
print(f"cold boot :{cold_boot_cnt / len(triad) * 100:4f}%")
return y_list, y_pred_list
def adjusted_cosine_similarity(x, y, intersect, u_mean):
"""修正的余弦相似度
Returns:
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
if len(intersect) < 2:
raise ValueError('there must be at least two non-zero entries')
x = np.asarray(x)
y = np.asarray(y)
multiply_sum = sum(
(x[i] - u_mean[i]) * (y[i] - u_mean[i]) for i in intersect)
pow_sum_x = sum(math.pow(x[i] - u_mean[i], 2) for i in intersect)
pow_sum_y = sum(math.pow(y[i] - u_mean[i], 2) for i in intersect)
return multiply_sum / math.sqrt(pow_sum_x * pow_sum_y)
if __name__ == "__main__":
triad = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 0, 1],
[1, 1, 3],
[1, 2, 4],
[2, 0, 2],
[2, 1, 3],
[2, 2, 5],
])
test = np.array([[0, 2, 3]])
ipcc = IPCCModel()
ipcc.fit(triad)
ipcc.predict(test, 20)
|
from date_class import Date
def test_init_repr(day, month, year):
obj = Date(day, month, year)
print(obj)
def test_get_day(d, m, y):
obj = Date(d, m, y)
print(obj.get_day())
def test_set_day(d, m, y, day):
obj = Date(d, m, y)
obj.set_day(day)
print(obj)
def test_get_month(d, m, y):
obj = Date(d, m, y)
print(obj.get_month())
def test_set_month(d, m, y, month):
obj = Date(d, m, y)
obj.set_month(month)
print(obj)
def test_get_year(d, m, y):
obj = Date(d, m, y)
print(obj.get_year())
def test_set_year(d, m, y, year):
obj = Date(d, m, y)
obj.set_year(year)
print(obj)
def test_add_day(d, m, y, day):
obj = Date(d, m, y)
obj.add_day(day)
print(obj)
def test_add_month(d, m, y, month):
obj = Date(d, m, y)
obj.add_month(month)
print(obj)
def test_add_year(d, m, y, year):
obj = Date(d, m, y)
obj.add_year(year)
print(obj)
def test_add_d_m_y(d, m, y, day, month, year):
obj = Date(d, m, y)
obj.add_day(day)
obj.add_month(month)
obj.add_year(year)
print(obj)
def test_leap_year(year):
obj = Date(29, 2, year)
print(obj)
|
# Copyright (c) 2015 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
class NetSharkException(Exception):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.