text
stringlengths 2
999k
|
|---|
"""
# Copyright 2021 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
from pathlib import Path
from imageops.utils import Utils
from imageops.logger import Logger
class Server(object):
"""
Backend server for imageops API
The request_id is the only input param which used to identify this request
"""
logger = Logger(__name__).get_logger()
def __init__(self, request_id=None):
"""
Init Server class
"""
if not request_id:
msg = 'Lacking request_id.'
self.logger.error(msg)
raise ValueError(msg)
self.request_id = str(request_id)
if not os.getenv('TMP_PATH'):
msg = 'No TMP_PATH found in env.'
self.logger.error(msg)
raise ValueError(msg)
self.tmp_path = os.getenv('TMP_PATH')
if not os.getenv('IMAGE_PATH'):
msg = 'No IMAGE_PATH found in env.'
self.logger.error(msg)
raise ValueError(msg)
self.image_path = os.getenv('IMAGE_PATH')
self.check_record_file = 'check_info.json'
self.compress_record_file = 'compress_status.log'
self.check_rc = {0: 'Check Completed, the image is (now) consistent',
1: 'Check completed, image is corrupted',
2: 'Check completed, image has leaked clusters, but is not corrupted',
3: 'Check failed',
4: 'Check in Progress',
5: 'Check Exiting because of not support this type of image',
6: 'Check Time Out'}
self.compress_rc = {0: 'Compress Completed',
1: 'Compress In Progress',
2: 'Compress Failed',
3: 'Compress Exiting because of No enouth space left',
4: 'Compress Time Out'}
def check_vm_image(self, input_image=None):
"""
Check the input vm image to get it's checksum and basic info such as type and size
"""
self.logger.info('Start to check VM image %s ...', input_image)
if not input_image:
msg = 'No image is given to do the check.'
self.logger.error(msg)
raise ValueError(msg)
image = Path(input_image)
if not image.is_file():
msg = 'Given image {} is not exist.'.format(input_image)
self.logger.error(msg)
raise ValueError(msg)
try:
check_record_path = os.path.join(self.tmp_path, self.request_id)
os.makedirs(check_record_path)
check_record_file = os.path.join(check_record_path, self.check_record_file)
check_info = {'checkResult': 4}
check_info['imageInfo'] = Utils.check_cmd_exec(input_image, check_record_file)
check_info['checksum'] = Utils.get_md5_checksum(input_image, check_record_file)
Utils.write_json_file(check_record_file, check_info)
status = 0
msg = 'Check In Progress'
except Exception as exception:
status = 1
msg = 'Check Failed'
check_info = {'checkResult': 99}
Utils.write_json_file(check_record_file, check_info)
self.logger.error(exception)
self.logger.info('Check image %s, status: %s, msg: %s', input_image, status, msg)
return status, msg
def get_check_status(self):
"""
Get the status of one check with the request ID
"""
self.logger.info('Start to get check status...')
check_info = {}
try:
check_record_file = os.path.join(self.tmp_path,
self.request_id,
self.check_record_file)
check_info = Utils.read_json_file(check_record_file)
self.logger.debug(check_info)
if not check_info.get('imageInfo'):
return 4, self.check_rc[4], check_info
image_info = check_info.get('imageInfo')
if image_info.get('filename'):
file_name = image_info.get('filename').split('/')[-1]
check_info['imageInfo']['filename'] = file_name
if check_info.get('checkResult') == 4 or not check_info.get('checksum'):
return 4, self.check_rc[4], check_info
if check_info.get('checkResult') == 99:
return 3, self.check_rc[3], check_info
if check_info.get('checkResult') == 100:
return 6, self.check_rc[6], check_info
if check_info.get('checkResult') == 63:
return 5, self.check_rc[5], check_info
if check_info.get('checkResult') == 0:
return 0, self.check_rc[0], check_info
if check_info.get('checkResult') == 2:
return 1, self.check_rc[1], check_info
if check_info.get('checkResult') == 3:
return 2, self.check_rc[2], check_info
return 3, self.check_rc[3], check_info
except IOError as io_exception:
self.logger.exception(io_exception)
return 3, '{}, {}'.format(self.check_rc[3], 'nonexistent request ID'), check_info
except Exception:
return 3, self.check_rc[3], check_info
def compress_vm_image(self, input_image=None, output_image=None):
"""
Compress the input vm image to get a slim one which is sparsify
Also can transfer raw image to qcow2 one
"""
self.logger.info('Start to compress vm image %s ...', input_image)
if not input_image:
msg = 'No image is given.'
self.logger.error(msg)
raise ValueError(msg)
if not output_image:
msg = 'No output image path is given.'
self.logger.error(msg)
raise ValueError(msg)
image = Path(input_image)
if not image.is_file():
msg = 'Image {} is not exist.'.format(input_image)
self.logger.error(msg)
raise ValueError(msg)
try:
compress_record_path = os.path.join(self.tmp_path, self.request_id)
os.makedirs(compress_record_path)
compress_record_file = os.path.join(compress_record_path, self.compress_record_file)
self.logger.info('Start to compress image %s ...', input_image)
if not Utils.check_compress_requires(input_image, self.tmp_path):
self.logger.error('Free disk space under %s is not enough to compress image %s',
self.tmp_path, input_image)
status = 1
msg = '{}'.format(self.compress_rc.get(3))
Utils.append_write_plain_file(compress_record_file, msg)
else:
self.logger.info('Free disk space under %s is enough to compress image %s',
self.tmp_path, input_image)
Utils.compress_cmd_exec(input_image, output_image, compress_record_file)
status = 0
msg = '{}'.format('Compress In Progress')
except Exception as exception:
self.logger.error(exception)
status = 1
msg = '{}'.format(self.compress_rc.get(2))
Utils.append_write_plain_file(compress_record_file, msg)
self.logger.info('Compress image %s with status: %s and msg: %s', input_image, status, msg)
return status, msg
def get_compress_status(self):
"""
Get the status of one compress with the request ID
"""
self.logger.info('Start to get status of compress image ...')
try:
compress_record_file = os.path.join(self.tmp_path,
self.request_id,
self.compress_record_file)
with open(compress_record_file, 'r') as compress_file:
for line in compress_file:
if self.compress_rc[0] in line:
self.logger.info(self.compress_rc[0])
return 0, self.compress_rc[0], 1
for item in [2, 3, 4]:
if self.compress_rc[item] in line:
self.logger.error(self.compress_rc[item])
return item, self.compress_rc[item], 0
except IOError as io_exception:
self.logger.exception(io_exception)
return 2, '{}, {}'.format(self.compress_rc[2], 'nonexistent request ID'), 0
except Exception as exception:
self.logger.exception(exception)
return 2, self.compress_rc[2], 0
try:
compress_rate = Utils.get_compress_rate(compress_record_file)
self.logger.info(self.compress_rc[1])
return 1, self.compress_rc[1], compress_rate
except Exception as exception:
self.logger.exception(exception)
return 2, self.compress_rc[2], 0
|
"""
Compute the plane wave decomposition for an incident broadband plane wave
on an open circular array using a modal beamformer of finite order.
"""
import numpy as np
import matplotlib.pyplot as plt
import micarray
from micarray.util import db
Nsf = 50 # order of the incident sound field
N = 30 # order of modal beamformer/microphone array
pw_angle = 1.23 * np.pi # incidence angle of plane wave
pol_pwd = np.linspace(0, 2*np.pi, 180, endpoint=False) # angles for plane wave decomposition
k = np.linspace(0, 20, 100) # wavenumber vector
r = 1 # radius of array
# get uniform grid (microphone positions) of order N
pol, weights = micarray.modal.angular.grid_equal_polar_angle(N)
# pressure on the surface of an open cylinder for an incident plane wave
Bn = micarray.modal.radial.circular_pw(Nsf, k, r, setup='open')
D = micarray.modal.radial.circ_diagonal_mode_mat(Bn)
Psi_p = micarray.modal.angular.cht_matrix(Nsf, pol)
Psi_pw = micarray.modal.angular.cht_matrix(Nsf, pw_angle)
p = np.matmul(np.matmul(Psi_p, D), np.conj(Psi_pw.T))
p = np.squeeze(p)
# incident plane wave exhibiting infinite spatial bandwidth
# p = np.exp(1j * k[:, np.newaxis]*r * np.cos(pol - pw_angle))
# plane wave decomposition using modal beamforming
Bn = micarray.modal.radial.circular_pw(N, k, r, setup='open')
Dn, _ = micarray.modal.radial.regularize(1/Bn, 3000, 'softclip')
D = micarray.modal.radial.circ_diagonal_mode_mat(Dn)
Psi_p = micarray.modal.angular.cht_matrix(N, pol, weights)
Psi_q = micarray.modal.angular.cht_matrix(N, pol_pwd)
A_pwd = np.matmul(np.matmul(Psi_q, D), np.conj(Psi_p.T))
q_pwd = np.squeeze(np.matmul(A_pwd, np.expand_dims(p, 2)))
q_pwd_t = np.fft.fftshift(np.fft.irfft(q_pwd, axis=0), axes=0)
# visualize plane wave decomposition (aka beampattern)
plt.figure()
plt.pcolormesh(k, pol_pwd/np.pi, db(q_pwd.T), vmin=-40)
plt.colorbar()
plt.xlabel(r'$kr$')
plt.ylabel(r'$\phi / \pi$')
plt.title('Plane wave docomposition by modal beamformer (frequency domain)')
plt.savefig('modal_beamforming_open_circular_array_fd.png')
plt.figure()
plt.pcolormesh(range(2*len(k)-2), pol_pwd/np.pi, db(q_pwd_t.T), vmin=-40)
plt.colorbar()
plt.ylabel(r'$\phi / \pi$')
plt.title('Plane wave docomposition by modal beamformer (time domain)')
plt.savefig('modal_beamforming_open_circular_array_td.png')
|
# file eulexistdb/manager.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from eulexistdb.db import ExistDB
from eulexistdb.query import QuerySet
class Manager(object):
"""
Connect an :class:`~eulexistdb.models.XmlModel` to an
:class:`~eulexistdb.db.ExistDB` for easy querying.
Typically each :class:`~eulexistdb.models.XmlModel` will
have one or more ``Manager`` members. Like Django ``Manager`` objects
these offer a convenient way to access model-based queries. Like Django
``Manager`` objects, developers can `derive a child class`_ and override
:meth:`get_query_set` to modify the default ``QuerySet``. Unlike Django,
this implementation does not currently provide a default ``Manager`` for
every ``XmlModel``.
Developers should consult :class:`eulexistdb.query.QuerySet` for a
complete list of its methods. ``Manager`` directly exposes these
methods, forwarding them to the ``QuerySet`` returned by its own
:meth:`get_query_set`.
.. _derive a child class: http://docs.djangoproject.com/en/1.1/topics/db/managers/#modifying-initial-manager-querysets
"""
def __init__(self, xpath):
self.xpath = xpath
# NOTE: model needs to be patched in to a real XmlModel class after
# the fact. currently this is handled by XmlModelType metaclass
# logic.
self.model = None
def get_query_set(self):
"""
Get the default :class:`eulexistdb.db.QuerySet` returned
by this ``Manager``. Typically this returns a ``QuerySet`` based on
the ``Manager``'s `xpath`, evaluated in the
``settings.EXISTDB_ROOT_COLLECTION`` on a default
:class:`eulexistdb.db.ExistDB`.
This is a convenient point for developers to customize an object's
managers. Deriving a child class from Manager and overriding or
extending this method is a handy way to create custom queries
accessible from an :class:`~eulexistdb.models.XmlModel`.
"""
if hasattr(settings, 'EXISTDB_FULLTEXT_OPTIONS'):
fulltext_opts = settings.EXISTDB_FULLTEXT_OPTIONS
else:
fulltext_opts = {}
return QuerySet(model=self.model, xpath=self.xpath, using=ExistDB(),
collection=settings.EXISTDB_ROOT_COLLECTION,
fulltext_options=fulltext_opts)
#######################
# PROXIES TO QUERYSET #
#######################
def count(self):
return self.get_query_set().count()
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def or_filter(self, *args, **kwargs):
return self.get_query_set().or_filter(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def only(self, *args, **kwargs):
return self.get_query_set().only(*args, **kwargs)
def also(self, *args, **kwargs):
return self.get_query_set().also(*args, **kwargs)
def distinct(self):
return self.get_query_set().distinct()
def all(self):
return self.get_query_set().all()
def get(self, *args, **kwargs):
return self.get_query_set().get(*args, **kwargs)
|
class Soma:
def __init__(self):
self.numeroDeCartas = list()
def set_numeroDeCartas(self, numero):
if numero == '':
numero = '1'
numero = numero[:]
self.numeroDeCartas.extend(numero)
def get_numeroDeCartas(self):
return self.numeroDeCartas
def ConverterPInt(self, converter):
convertidos = []
for c in converter:
convertidos.append(int(c))
return convertidos
def Somar(self):
return sum(self.ConverterPInt(self.get_numeroDeCartas()))
|
import torch, math
from torch.optim.optimizer import Optimizer
# RAdam + LARS
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size * group['lr'], exp_avg, denom)
else:
radam_step.add_(-radam_step_size * group['lr'], exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * group['lr'] * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * group['lr'] * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
|
# RUN: %PYTHON %s | FileCheck %s
import gc
import io
import itertools
from mlir.ir import *
def run(f):
print("\nTEST:", f.__name__)
f()
gc.collect()
assert Context._get_live_count() == 0
# Verify iterator based traversal of the op/region/block hierarchy.
# CHECK-LABEL: TEST: testTraverseOpRegionBlockIterators
def testTraverseOpRegionBlockIterators():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
op = module.operation
assert op.context is ctx
# Get the block using iterators off of the named collections.
regions = list(op.regions)
blocks = list(regions[0].blocks)
# CHECK: MODULE REGIONS=1 BLOCKS=1
print(f"MODULE REGIONS={len(regions)} BLOCKS={len(blocks)}")
# Should verify.
# CHECK: .verify = True
print(f".verify = {module.operation.verify()}")
# Get the regions and blocks from the default collections.
default_regions = list(op)
default_blocks = list(default_regions[0])
# They should compare equal regardless of how obtained.
assert default_regions == regions
assert default_blocks == blocks
# Should be able to get the operations from either the named collection
# or the block.
operations = list(blocks[0].operations)
default_operations = list(blocks[0])
assert default_operations == operations
def walk_operations(indent, op):
for i, region in enumerate(op):
print(f"{indent}REGION {i}:")
for j, block in enumerate(region):
print(f"{indent} BLOCK {j}:")
for k, child_op in enumerate(block):
print(f"{indent} OP {k}: {child_op}")
walk_operations(indent + " ", child_op)
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: func
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: %0 = "custom.addi"
# CHECK: OP 1: return
walk_operations("", op)
run(testTraverseOpRegionBlockIterators)
# Verify index based traversal of the op/region/block hierarchy.
# CHECK-LABEL: TEST: testTraverseOpRegionBlockIndices
def testTraverseOpRegionBlockIndices():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
def walk_operations(indent, op):
for i in range(len(op.regions)):
region = op.regions[i]
print(f"{indent}REGION {i}:")
for j in range(len(region.blocks)):
block = region.blocks[j]
print(f"{indent} BLOCK {j}:")
for k in range(len(block.operations)):
child_op = block.operations[k]
print(f"{indent} OP {k}: {child_op}")
walk_operations(indent + " ", child_op)
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: func
# CHECK: REGION 0:
# CHECK: BLOCK 0:
# CHECK: OP 0: %0 = "custom.addi"
# CHECK: OP 1: return
walk_operations("", module.operation)
run(testTraverseOpRegionBlockIndices)
# CHECK-LABEL: TEST: testBlockArgumentList
def testBlockArgumentList():
with Context() as ctx:
module = Module.parse(r"""
func @f1(%arg0: i32, %arg1: f64, %arg2: index) {
return
}
""", ctx)
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
assert len(entry_block.arguments) == 3
# CHECK: Argument 0, type i32
# CHECK: Argument 1, type f64
# CHECK: Argument 2, type index
for arg in entry_block.arguments:
print(f"Argument {arg.arg_number}, type {arg.type}")
new_type = IntegerType.get_signless(8 * (arg.arg_number + 1))
arg.set_type(new_type)
# CHECK: Argument 0, type i8
# CHECK: Argument 1, type i16
# CHECK: Argument 2, type i24
for arg in entry_block.arguments:
print(f"Argument {arg.arg_number}, type {arg.type}")
run(testBlockArgumentList)
# CHECK-LABEL: TEST: testOperationOperands
def testOperationOperands():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) {
%0 = "test.producer"() : () -> i64
"test.consumer"(%arg0, %0) : (i32, i64) -> ()
return
}""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
consumer = entry_block.operations[1]
assert len(consumer.operands) == 2
# CHECK: Operand 0, type i32
# CHECK: Operand 1, type i64
for i, operand in enumerate(consumer.operands):
print(f"Operand {i}, type {operand.type}")
run(testOperationOperands)
# CHECK-LABEL: TEST: testOperationOperandsSlice
def testOperationOperandsSlice():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1() {
%0 = "test.producer0"() : () -> i64
%1 = "test.producer1"() : () -> i64
%2 = "test.producer2"() : () -> i64
%3 = "test.producer3"() : () -> i64
%4 = "test.producer4"() : () -> i64
"test.consumer"(%0, %1, %2, %3, %4) : (i64, i64, i64, i64, i64) -> ()
return
}""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
consumer = entry_block.operations[5]
assert len(consumer.operands) == 5
for left, right in zip(consumer.operands, consumer.operands[::-1][::-1]):
assert left == right
# CHECK: test.producer0
# CHECK: test.producer1
# CHECK: test.producer2
# CHECK: test.producer3
# CHECK: test.producer4
full_slice = consumer.operands[:]
for operand in full_slice:
print(operand)
# CHECK: test.producer0
# CHECK: test.producer1
first_two = consumer.operands[0:2]
for operand in first_two:
print(operand)
# CHECK: test.producer3
# CHECK: test.producer4
last_two = consumer.operands[3:]
for operand in last_two:
print(operand)
# CHECK: test.producer0
# CHECK: test.producer2
# CHECK: test.producer4
even = consumer.operands[::2]
for operand in even:
print(operand)
# CHECK: test.producer2
fourth = consumer.operands[::2][1::2]
for operand in fourth:
print(operand)
run(testOperationOperandsSlice)
# CHECK-LABEL: TEST: testDetachedOperation
def testDetachedOperation():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
i32 = IntegerType.get_signed(32)
op1 = Operation.create(
"custom.op1", results=[i32, i32], regions=1, attributes={
"foo": StringAttr.get("foo_value"),
"bar": StringAttr.get("bar_value"),
})
# CHECK: %0:2 = "custom.op1"() ( {
# CHECK: }) {bar = "bar_value", foo = "foo_value"} : () -> (si32, si32)
print(op1)
# TODO: Check successors once enough infra exists to do it properly.
run(testDetachedOperation)
# CHECK-LABEL: TEST: testOperationInsertionPoint
def testOperationInsertionPoint():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""", ctx)
# Create test op.
with Location.unknown(ctx):
op1 = Operation.create("custom.op1")
op2 = Operation.create("custom.op2")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
ip = InsertionPoint.at_block_begin(entry_block)
ip.insert(op1)
ip.insert(op2)
# CHECK: func @f1
# CHECK: "custom.op1"()
# CHECK: "custom.op2"()
# CHECK: %0 = "custom.addi"
print(module)
# Trying to add a previously added op should raise.
try:
ip.insert(op1)
except ValueError:
pass
else:
assert False, "expected insert of attached op to raise"
run(testOperationInsertionPoint)
# CHECK-LABEL: TEST: testOperationWithRegion
def testOperationWithRegion():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
i32 = IntegerType.get_signed(32)
op1 = Operation.create("custom.op1", regions=1)
block = op1.regions[0].blocks.append(i32, i32)
# CHECK: "custom.op1"() ( {
# CHECK: ^bb0(%arg0: si32, %arg1: si32): // no predecessors
# CHECK: "custom.terminator"() : () -> ()
# CHECK: }) : () -> ()
terminator = Operation.create("custom.terminator")
ip = InsertionPoint(block)
ip.insert(terminator)
print(op1)
# Now add the whole operation to another op.
# TODO: Verify lifetime hazard by nulling out the new owning module and
# accessing op1.
# TODO: Also verify accessing the terminator once both parents are nulled
# out.
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32
return %1 : i32
}
""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
ip = InsertionPoint.at_block_begin(entry_block)
ip.insert(op1)
# CHECK: func @f1
# CHECK: "custom.op1"()
# CHECK: "custom.terminator"
# CHECK: %0 = "custom.addi"
print(module)
run(testOperationWithRegion)
# CHECK-LABEL: TEST: testOperationResultList
def testOperationResultList():
ctx = Context()
module = Module.parse(r"""
func @f1() {
%0:3 = call @f2() : () -> (i32, f64, index)
return
}
func private @f2() -> (i32, f64, index)
""", ctx)
caller = module.body.operations[0]
call = caller.regions[0].blocks[0].operations[0]
assert len(call.results) == 3
# CHECK: Result 0, type i32
# CHECK: Result 1, type f64
# CHECK: Result 2, type index
for res in call.results:
print(f"Result {res.result_number}, type {res.type}")
run(testOperationResultList)
# CHECK-LABEL: TEST: testOperationResultListSlice
def testOperationResultListSlice():
with Context() as ctx:
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
func @f1() {
"some.op"() : () -> (i1, i2, i3, i4, i5)
return
}
""")
func = module.body.operations[0]
entry_block = func.regions[0].blocks[0]
producer = entry_block.operations[0]
assert len(producer.results) == 5
for left, right in zip(producer.results, producer.results[::-1][::-1]):
assert left == right
assert left.result_number == right.result_number
# CHECK: Result 0, type i1
# CHECK: Result 1, type i2
# CHECK: Result 2, type i3
# CHECK: Result 3, type i4
# CHECK: Result 4, type i5
full_slice = producer.results[:]
for res in full_slice:
print(f"Result {res.result_number}, type {res.type}")
# CHECK: Result 1, type i2
# CHECK: Result 2, type i3
# CHECK: Result 3, type i4
middle = producer.results[1:4]
for res in middle:
print(f"Result {res.result_number}, type {res.type}")
# CHECK: Result 1, type i2
# CHECK: Result 3, type i4
odd = producer.results[1::2]
for res in odd:
print(f"Result {res.result_number}, type {res.type}")
# CHECK: Result 3, type i4
# CHECK: Result 1, type i2
inverted_middle = producer.results[-2:0:-2]
for res in inverted_middle:
print(f"Result {res.result_number}, type {res.type}")
run(testOperationResultListSlice)
# CHECK-LABEL: TEST: testOperationAttributes
def testOperationAttributes():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
"some.op"() { some.attribute = 1 : i8,
other.attribute = 3.0,
dependent = "text" } : () -> ()
""", ctx)
op = module.body.operations[0]
assert len(op.attributes) == 3
iattr = IntegerAttr(op.attributes["some.attribute"])
fattr = FloatAttr(op.attributes["other.attribute"])
sattr = StringAttr(op.attributes["dependent"])
# CHECK: Attribute type i8, value 1
print(f"Attribute type {iattr.type}, value {iattr.value}")
# CHECK: Attribute type f64, value 3.0
print(f"Attribute type {fattr.type}, value {fattr.value}")
# CHECK: Attribute value text
print(f"Attribute value {sattr.value}")
# We don't know in which order the attributes are stored.
# CHECK-DAG: NamedAttribute(dependent="text")
# CHECK-DAG: NamedAttribute(other.attribute=3.000000e+00 : f64)
# CHECK-DAG: NamedAttribute(some.attribute=1 : i8)
for attr in op.attributes:
print(str(attr))
# Check that exceptions are raised as expected.
try:
op.attributes["does_not_exist"]
except KeyError:
pass
else:
assert False, "expected KeyError on accessing a non-existent attribute"
try:
op.attributes[42]
except IndexError:
pass
else:
assert False, "expected IndexError on accessing an out-of-bounds attribute"
run(testOperationAttributes)
# CHECK-LABEL: TEST: testOperationPrint
def testOperationPrint():
ctx = Context()
module = Module.parse(r"""
func @f1(%arg0: i32) -> i32 {
%0 = constant dense<[1, 2, 3, 4]> : tensor<4xi32>
return %arg0 : i32
}
""", ctx)
# Test print to stdout.
# CHECK: return %arg0 : i32
module.operation.print()
# Test print to text file.
f = io.StringIO()
# CHECK: <class 'str'>
# CHECK: return %arg0 : i32
module.operation.print(file=f)
str_value = f.getvalue()
print(str_value.__class__)
print(f.getvalue())
# Test print to binary file.
f = io.BytesIO()
# CHECK: <class 'bytes'>
# CHECK: return %arg0 : i32
module.operation.print(file=f, binary=True)
bytes_value = f.getvalue()
print(bytes_value.__class__)
print(bytes_value)
# Test get_asm with options.
# CHECK: value = opaque<"_", "0xDEADBEEF"> : tensor<4xi32>
# CHECK: "std.return"(%arg0) : (i32) -> () -:4:7
module.operation.print(large_elements_limit=2, enable_debug_info=True,
pretty_debug_info=True, print_generic_op_form=True, use_local_scope=True)
run(testOperationPrint)
# CHECK-LABEL: TEST: testKnownOpView
def testKnownOpView():
with Context(), Location.unknown():
Context.current.allow_unregistered_dialects = True
module = Module.parse(r"""
%1 = "custom.f32"() : () -> f32
%2 = "custom.f32"() : () -> f32
%3 = addf %1, %2 : f32
""")
print(module)
# addf should map to a known OpView class in the std dialect.
# We know the OpView for it defines an 'lhs' attribute.
addf = module.body.operations[2]
# CHECK: <mlir.dialects._std_ops_gen._AddFOp object
print(repr(addf))
# CHECK: "custom.f32"()
print(addf.lhs)
# One of the custom ops should resolve to the default OpView.
custom = module.body.operations[0]
# CHECK: <_mlir.ir.OpView object
print(repr(custom))
# Check again to make sure negative caching works.
custom = module.body.operations[0]
# CHECK: <_mlir.ir.OpView object
print(repr(custom))
run(testKnownOpView)
# CHECK-LABEL: TEST: testSingleResultProperty
def testSingleResultProperty():
with Context(), Location.unknown():
Context.current.allow_unregistered_dialects = True
module = Module.parse(r"""
"custom.no_result"() : () -> ()
%0:2 = "custom.two_result"() : () -> (f32, f32)
%1 = "custom.one_result"() : () -> f32
""")
print(module)
try:
module.body.operations[0].result
except ValueError as e:
# CHECK: Cannot call .result on operation custom.no_result which has 0 results
print(e)
else:
assert False, "Expected exception"
try:
module.body.operations[1].result
except ValueError as e:
# CHECK: Cannot call .result on operation custom.two_result which has 2 results
print(e)
else:
assert False, "Expected exception"
# CHECK: %1 = "custom.one_result"() : () -> f32
print(module.body.operations[2])
run(testSingleResultProperty)
# CHECK-LABEL: TEST: testPrintInvalidOperation
def testPrintInvalidOperation():
ctx = Context()
with Location.unknown(ctx):
module = Operation.create("module", regions=2)
# This module has two region and is invalid verify that we fallback
# to the generic printer for safety.
block = module.regions[0].blocks.append()
# CHECK: // Verification failed, printing generic form
# CHECK: "module"() ( {
# CHECK: }) : () -> ()
print(module)
# CHECK: .verify = False
print(f".verify = {module.operation.verify()}")
run(testPrintInvalidOperation)
# CHECK-LABEL: TEST: testCreateWithInvalidAttributes
def testCreateWithInvalidAttributes():
ctx = Context()
with Location.unknown(ctx):
try:
Operation.create("module", attributes={None:StringAttr.get("name")})
except Exception as e:
# CHECK: Invalid attribute key (not a string) when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={42:StringAttr.get("name")})
except Exception as e:
# CHECK: Invalid attribute key (not a string) when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={"some_key":ctx})
except Exception as e:
# CHECK: Invalid attribute value for the key "some_key" when attempting to create the operation "module"
print(e)
try:
Operation.create("module", attributes={"some_key":None})
except Exception as e:
# CHECK: Found an invalid (`None`?) attribute value for the key "some_key" when attempting to create the operation "module"
print(e)
run(testCreateWithInvalidAttributes)
# CHECK-LABEL: TEST: testOperationName
def testOperationName():
ctx = Context()
ctx.allow_unregistered_dialects = True
module = Module.parse(r"""
%0 = "custom.op1"() : () -> f32
%1 = "custom.op2"() : () -> i32
%2 = "custom.op1"() : () -> f32
""", ctx)
# CHECK: custom.op1
# CHECK: custom.op2
# CHECK: custom.op1
for op in module.body.operations:
print(op.operation.name)
run(testOperationName)
# CHECK-LABEL: TEST: testCapsuleConversions
def testCapsuleConversions():
ctx = Context()
ctx.allow_unregistered_dialects = True
with Location.unknown(ctx):
m = Operation.create("custom.op1").operation
m_capsule = m._CAPIPtr
assert '"mlir.ir.Operation._CAPIPtr"' in repr(m_capsule)
m2 = Operation._CAPICreate(m_capsule)
assert m2 is m
run(testCapsuleConversions)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pypepa documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 18 15:33:13 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../pypepa'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.mathjax', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pypepa'
copyright = '2013, Dariusz Dwornikowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = 'latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pypepadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pypepa.tex', 'pypepa Documentation',
'Dariusz Dwornikowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pypepa', 'pypepa Documentation',
['Dariusz Dwornikowski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pypepa', 'pypepa Documentation',
'Dariusz Dwornikowski', 'pypepa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
from django.urls import path
from . import views
urlpatterns = [
path('StatsClass', views.index),
path('BasicProbability', views.basic_prob),
]
|
class DigitalSignatureScheme(object):
def get_public_key(self):
return self.public_key
def sign(self, messsage):
raise NotImplementedError
def verify(self, message, signature):
raise NotImplementedError
|
"""
Created by Michele Bianco, 9 July 2021
"""
import numpy as np, pkg_resources
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from tensorflow.python.ops import nn_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def sigmoid_balanced_cross_entropy_with_logits(_sentinel=None, labels=None, logits=None, beta=None, name=None):
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,labels, logits)
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %(logits.get_shape(), labels.get_shape()))
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
balanced_cross_entropy = relu_logits*(1.-beta)-logits*labels*(1.-beta)+math_ops.log1p(math_ops.exp(neg_abs_logits))*((1.-beta)*(1.-labels)+beta*labels)
return tf.reduce_mean(balanced_cross_entropy)
def balanced_cross_entropy(y_true, y_pred):
"""
To decrease the number of false negatives, set beta>1. To decrease the number of false positives, set beta<1.
"""
beta = tf.maximum(tf.reduce_mean(1 - y_true), tf.keras.backend.epsilon())
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
y_pred = K.log(y_pred / (1 - y_pred))
return sigmoid_balanced_cross_entropy_with_logits(logits=y_pred, labels=y_true, beta=beta)
def iou(y_true, y_pred):
"""
Return the Intersection over Union (IoU) for a given label.
Args:
y_true: the expected y values as a one-hot
y_pred: the predicted y values as a one-hot or softmax output
label: the label to return the IoU for
Returns:
the IoU for the given label
"""
intersection = K.sum(K.abs(y_true * y_pred))
#intersection = K.sum(y_true * y_pred)
union = K.sum(y_true) + K.sum(y_pred) - intersection
# avoid divide by zero - if the union is zero, return 1, otherwise, return the intersection over union
return K.switch(K.equal(union, 0), 1.0, intersection / union)
def dice_coef(y_true, y_pred, smooth=1):
"""
Dice = (2*|X & Y|)/ (|X|+ |Y|)
= 2*sum(|A*B|)/(sum(A^2)+sum(B^2))
ref: https://arxiv.org/pdf/1606.04797v1.pdf
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
################################################################
class segunet21cm:
def __init__(self, tta=1, verbose=False):
""" SegU-Net: segmentation of 21cm images with U-shape network (Bianco et al. 2021, https://arxiv.org/abs/2102.06713)
- tta (int): default 0 (super-fast, no pixel-error map) implement the error map
with time-test aumentated techique in the prediction process
- verbose (bool): default False, activate verbosity
Description:
tta = 0 : fast (~7 sec), it tends to be a few percent less accurate (<2%) then the other two cases, no pixel-error map (no TTA manipulation)
tta = 1 : medium (~17 sec), accurate and preferable than tta=0, with pixel-error map (3 samples)
tta = 2 : slow (~10 min), accurate, with pixel-error map (~100 samples)
Returns:
- X_seg (ndarray) : recovered binary field (1 = neutral and 0 = ionized regions)
- X_err (ndarray) : pixel-error map of the recovered binary field
Example:
$ from tools21cm import segmentation
$ seg = segmentation.segunet21cm(tta=1, verbose=True) # load model (need to be done once)
$ Xseg, Xseg_err = seg.prediction(x=dT3)
Print of the Network's Configuration file:
[TRAINING]
BATCH_SIZE = 64
AUGMENT = NOISESMT
IMG_SHAPE = 128, 128
CHAN_SIZE = 256
DROPOUT = 0.05
KERNEL_SIZE = 3
EPOCHS = 100
LOSS = balanced_cross_entropy
METRICS = iou, dice_coef, binary_accuracy, binary_crossentropy
LR = 1e-3
RECOMP = False
GPUS = 2
PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/inputs/data2D_128_030920/
[RESUME]
RESUME_PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/outputs/new/02-10T23-52-36_128slice/
BEST_EPOCH = 56
RESUME_EPOCH = 66
"""
self.TTA = tta
self.VERBOSE = verbose
if(self.TTA == 2):
# slow
self.MANIP = self.IndependentOperations(verbose=self.VERBOSE)
elif(self.TTA == 1):
# fast
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
elif(self.TTA == 0):
# super-fast
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
self.NR_MANIP = len(self.MANIP)
# load model
MODEL_NAME = pkg_resources.resource_filename('t2c', 'input_data/segunet_02-10T23-52-36_128slice_ep56.h5')
if (os.path.exists(MODEL_NAME)):
pass
else:
if(self.VERBOSE): print(' Download network weights: %s' %MODEL_NAME)
MODEL_EPOCH = 56
METRICS = {'balanced_cross_entropy':balanced_cross_entropy, 'iou':iou, 'dice_coef':dice_coef}
self.MODEL_LOADED = load_model(MODEL_NAME, custom_objects=METRICS)
if(self.VERBOSE): print(' Loaded model: %s' %MODEL_NAME)
def UniqueRows(self, arr):
""" Remove duplicate row array in 2D data
- arr (narray): array with duplicate row
Example:
>> d = np.array([[0,1,2],[0,1,2],[0,0,0],[0,0,2],[0,1,2]])
>> UniqueRows(d)
array([[0, 0, 0],
[0, 0, 2],
[0, 1, 2]])
"""
arr = np.array(arr)
if(arr.ndim == 2):
arr = np.ascontiguousarray(arr)
unique_arr = np.unique(arr.view([('', arr.dtype)]*arr.shape[1]))
new_arr = unique_arr.view(arr.dtype).reshape((unique_arr.shape[0], arr.shape[1]))
elif(arr.ndim == 1):
new_arr = np.array(list(dict.fromkeys(arr)))
return new_arr
def IndependentOperations(self, verbose=False):
''' How many unique manipulations (horzontal and vertical flip, rotation, etc...)
can we operate on a cube?
Each indipendent operation is considered as an additional rappresentation
of the same coeval data, so that it can be considered for errorbar with SegU-Net '''
data = np.array(range(3**3)).reshape((3,3,3))
func = [lambda a: a,
np.fliplr,
np.flipud,
lambda a: np.flipud(np.fliplr(a)),
lambda a: np.fliplr(np.flipud(a))]
axis = [0,1,2]
angl_rot = [0,1,2,3]
tot_manipl_data_flat = np.zeros((len(func)*len(axis)*len(angl_rot), data.size))
tot_operations = {'opt%d' %k:[] for k in range(0,len(func)*len(axis)*len(angl_rot))}
i = 0
for f in func:
cube = f(data)
for rotax in axis:
ax_tup = [0,1,2]
ax_tup.remove(rotax)
for rot in angl_rot:
tot_manipl_data_flat[i] = np.rot90(cube, k=rot, axes=ax_tup).flatten()
# function, axis of rotation, angle of rotation, slice index
tot_operations['opt%d' %i] = [f, rotax, rot]
i += 1
uniq_manipl_data_flat = self.UniqueRows(tot_manipl_data_flat).astype(int)
uniq_operations = {}
for iumdf, uniq_mdf in enumerate(uniq_manipl_data_flat):
for itmdf, tot_mdf in enumerate(tot_manipl_data_flat):
if(all(uniq_mdf == tot_mdf)):
uniq_operations['opt%d' %iumdf] = tot_operations['opt%d' %itmdf]
break
assert uniq_manipl_data_flat.shape[0] == len(uniq_operations)
if(verbose): print('tot number of (unique) manipulation we can do on a cube: %d' %(len(uniq_operations)))
return uniq_operations
def prediction(self, x):
img_shape = x.shape
if(self.TTA == 2):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 1):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 0):
X_tta = np.zeros((np.append(len(self.MANIP), img_shape)))
if(self.VERBOSE):
loop = tqdm(range(len(self.MANIP)))
else:
loop = range(len(self.MANIP))
for iopt in loop:
opt, rotax, rot = self.MANIP['opt%d' %iopt]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
cube = np.rot90(opt(x), k=rot, axes=ax_tup)
X = cube[np.newaxis, ..., np.newaxis]
for j in range(img_shape[0]):
if(self.TTA == 0):
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
else:
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP),:,j,:] = self.MODEL_LOADED.predict(X[:,:,j,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP)*2,:,:,j] = self.MODEL_LOADED.predict(X[:,:,:,j,:], verbose=0).squeeze()
for itta in range(X_tta.shape[0]):
opt, rotax, rot = self.MANIP['opt%d' %(itta%len(self.MANIP))]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
X_tta[itta] = opt(np.rot90(X_tta[itta], k=-rot, axes=ax_tup))
X_seg = np.round(np.mean(X_tta, axis=0))
X_err = np.std(X_tta, axis=0)
return X_seg, X_err
|
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class UserTruncated(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'external_id': 'str'
}
attribute_map = {
'id': 'id',
'external_id': 'externalId'
}
nulls = set()
def __init__(self, id=None, external_id=Undefined(), local_vars_configuration=None): # noqa: E501
"""UserTruncated - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._external_id = None
self.discriminator = None
if id is not None:
self.id = id
self.external_id = external_id
@property
def id(self):
"""Gets the id of this UserTruncated. # noqa: E501
The unique ID of the user. # noqa: E501
:return: The id of this UserTruncated. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UserTruncated.
The unique ID of the user. # noqa: E501
:param id: The id of this UserTruncated. # noqa: E501
:type: str
"""
self._id = id
@property
def external_id(self):
"""Gets the external_id of this UserTruncated. # noqa: E501
An optional ID that can also be used to retrieve the user. # noqa: E501
:return: The external_id of this UserTruncated. # noqa: E501
:rtype: str
"""
return self._external_id
@external_id.setter
def external_id(self, external_id):
"""Sets the external_id of this UserTruncated.
An optional ID that can also be used to retrieve the user. # noqa: E501
:param external_id: The external_id of this UserTruncated. # noqa: E501
:type: str
"""
if type(external_id) is Undefined:
external_id = None
self.nulls.discard("external_id")
elif external_id is None:
self.nulls.add("external_id")
else:
self.nulls.discard("external_id")
self._external_id = external_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserTruncated):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserTruncated):
return True
return self.to_dict() != other.to_dict()
|
import cv2 as cv
import numpy as np
import os
def preprocess(labels_path, sep_labels_path):
# list all files on labels_path
labels_filenames = os.listdir(labels_path)
count = 0
for label_filename in labels_filenames:
label_path = os.path.join(labels_path, label_filename)
print(f'segmenting {label_filename}')
masks = segment_labels(label_path)
for att in masks:
mask = masks[att]
path = f"{sep_labels_path}/{label_filename[:-4]}_{att}.png"
print(f'{count} - writing {path}')
cv.imwrite(path, mask)
count += 1
# cv.imwrite(f'{label_filename[:-4]}_{mask}', mask)
def segment_labels(label_path):
atts = {
"background": (0, 0, 0),
"mouth": (255, 0, 0),
"eyes": (0, 255, 0),
"nose": (0, 0, 255),
"face": (128, 128, 128),
"hair": (255, 255, 0),
"eyebrows": (255, 0, 255),
"ears": (0, 255, 255),
"teeth": (255, 255, 255),
"beard": (255, 192, 192),
"sunglasses": (0, 128, 128),
}
label = cv.imread(label_path)
mask = np.zeros(label.shape, dtype=np.uint8)
masks = {}
for att in atts:
color = atts[att]
mask = cv.inRange(label, color, color)
masks[att] = mask
# cv.imshow(att, mask)
# cv.waitKey(0)
# cv.imwrite(f"{sep_labels_path}/{label_path[:-4]}_{att}.png", mask)
return masks
# separate_masks("./labels.png")
preprocess("./organized_dataset/labels", "./organized_dataset/segmented_labels")
|
"""EPR Socket interface."""
from __future__ import annotations
import abc
import logging
from contextlib import contextmanager
from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, Union
from netqasm.logging.glob import get_netqasm_logger
from netqasm.qlink_compat import (
EPRRole,
EPRType,
LinkLayerOKTypeK,
LinkLayerOKTypeM,
LinkLayerOKTypeR,
RandomBasis,
TimeUnit,
)
from netqasm.sdk.build_epr import EprMeasBasis, basis_to_rotation
from netqasm.sdk.builder import EntRequestParams, EprKeepResult, EprMeasureResult
from netqasm.sdk.futures import RegFuture
from .qubit import FutureQubit, Qubit
if TYPE_CHECKING:
from netqasm.sdk import connection
T_LinkLayerOkList = Union[
List[LinkLayerOKTypeK], List[LinkLayerOKTypeM], List[LinkLayerOKTypeR]
]
class EPRSocket(abc.ABC):
"""EPR socket class. Used to generate entanglement with a remote node.
An EPR socket represents a connection with a single remote node through which
EPR pairs can be generated. Its main interfaces are the `create` and `recv`
methods. A typical use case for two nodes is that they both create an EPR socket
to the other node, and during the protocol, one of the nodes does `create`
operations on its socket while the other node does `recv` operations.
A `create` operation asks the network stack to initiate generation of EPR pairs
with the remote node. Depending on the type of generation, the result of this
operation can be qubit objects or measurement outcomes.
A `recv` operation asks the network stack to wait for the remote node to initiate
generation of EPR pairs. Again, the result can be qubit objects or measurement
outcomes.
Each `create` operation on one node must be matched by a `recv` operation on the
other node. Since "creating" and "receiving" must happen at the same time, a node
that is doing a `create` operation on its socket cannot advance until the other
node does the corresponding `recv`. This is different from classical network
sockets where a "send" operation (roughly anologous to `create` in an EPR socket)
does not block on the remote node receiving it.
An EPR socket is identified by a triple consisting of (1) the remote node ID,
(2) the local socket ID and (3) the remote socket ID.
Two nodes that want to generate EPR pairs with each other should make sure that the
IDs in their local sockets match.
"""
def __init__(
self,
remote_app_name: str,
epr_socket_id: int = 0,
remote_epr_socket_id: int = 0,
min_fidelity: int = 100,
):
"""Create an EPR socket. It still needs to be registered with the network
stack separately.
Registering and opening the EPR socket is currently done automatically by the
connection that uses this EPR socket, specifically when a context is opened
with that connection.
:param remote_app_name: name of the remote party (i.e. the role, like "client",
not necessarily the node name like "delft")
:param epr_socket_id: local socket ID, defaults to 0
:param remote_epr_socket_id: remote socket ID, defaults to 0. Note that this
must match with the local socket ID of the remote node's EPR socket.
:param min_fidelity: minimum desired fidelity for EPR pairs generated over this
socket, in percentages (i.e. range 0-100). Defaults to 100.
"""
self._conn: Optional[connection.BaseNetQASMConnection] = None
self._remote_app_name: str = remote_app_name
self._remote_node_id: Optional[
int
] = None # Gets set when the connection is set
self._epr_socket_id: int = epr_socket_id
self._remote_epr_socket_id: int = remote_epr_socket_id
if (
not isinstance(min_fidelity, int)
or (min_fidelity < 0)
or min_fidelity > 100
):
raise ValueError(
f"min_fidelity must be an integer in the range [0, 100], not {min_fidelity}"
)
self._min_fidelity: int = min_fidelity
self._logger: logging.Logger = get_netqasm_logger(
f"{self.__class__.__name__}({self._remote_app_name}, {self._epr_socket_id})"
)
@property
def conn(self) -> connection.BaseNetQASMConnection:
"""Get the underlying :class:`NetQASMConnection`"""
if self._conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
return self._conn
@conn.setter
def conn(self, conn: connection.BaseNetQASMConnection):
self._conn = conn
self._remote_node_id = self._get_node_id(app_name=self._remote_app_name)
@property
def remote_app_name(self) -> str:
"""Get the remote application name"""
return self._remote_app_name
@property
def remote_node_id(self) -> int:
"""Get the remote node ID"""
if self._remote_node_id is None:
raise RuntimeError("Remote Node ID has not been initialized")
return self._remote_node_id
@property
def epr_socket_id(self) -> int:
"""Get the EPR socket ID"""
return self._epr_socket_id
@property
def remote_epr_socket_id(self) -> int:
"""Get the remote EPR socket ID"""
return self._remote_epr_socket_id
@property
def min_fidelity(self) -> int:
"""Get the desired minimum fidelity"""
return self._min_fidelity
def create_keep(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
"""Ask the network stack to generate EPR pairs with the remote node and keep
them in memory.
A `create_keep` operation must always be matched by a `recv_keep` operation on
the remote node.
If `sequential` is False (default), this operation returns a list of Qubit
objects representing the local qubits that are each one half of the generated
pairs. These qubits can then be manipulated locally just like locally
initialized qubits, by e.g. applying gates or measuring them.
Each qubit also contains information about the entanglement generation that
lead to its creation, and can be accessed by its `entanglement_info` property.
A typical example for just generating one pair with another node would be:
.. code-block::
q = epr_socket.create_keep()[0]
# `q` can now be used as a normal qubit
If `sequential` is False (default), the all requested EPR pairs are generated
at once, before returning the results (qubits or entanglement info objects).
If `sequential` is True, a callback function (`post_routine`) should be
specified. After generating one EPR pair, this callback will be called, before
generating the next pair. This method can e.g. be used to generate many EPR
pairs (more than the number of physical qubits available), by measuring (and
freeing up) each qubit before the next pair is generated.
For example:
.. code-block::
outcomes = alice.new_array(num)
def post_create(conn, q, pair):
q.H()
outcome = outcomes.get_future_index(pair)
q.measure(outcome)
epr_socket.create_keep(number=num, post_routine=post_create, sequential=True)
:param number: number of EPR pairs to generate, defaults to 1
:param post_routine: callback function for each genated pair. Only used if
`sequential` is True.
The callback should take three arguments `(conn, q, pair)` where
* `conn` is the connection (e.g. `self`)
* `q` is the entangled qubit (of type `FutureQubit`)
* `pair` is a register holding which pair is handled (0, 1, ...)
:param sequential: whether to use callbacks after each pair, defaults to False
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param expect_phi_plus: whether to assume that the EPR pairs that are created
are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler
will make sure that if the physical link actually produced another Bell
state, the behavior seen by the application is still as if a Phi+ state
was actually produced.
:param min_fidelity_all_at_end: the minimum fidelity that *all* entangled
qubits should ideally still have at the moment the last qubit has been
generated. For example, when specifying `number=2` and
`min_fidelity_all_at_end=80`, the the program will automatically try to
make sure that both qubits have a fidelity of at least 80% when the
second qubit has been generated. It will attempt to do this by
automatically re-trying the entanglement generation if the fidelity
constraint is not satisfied. This is however an *attempt*, and not
a guarantee!.
:param max_tries: maximum number of re-tries should be made to try and achieve
the `min_fidelity_all_at_end` constraint.
:return: list of qubits created
"""
qubits, _ = self.conn.builder.sdk_create_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def create_keep_with_info(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
min_fidelity_all_at_end: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
"""Same as create_keep but also return the EPR generation information coming
from the network stack.
For more information see the documentation of `create_keep`.
:param number: number of pairs to generate, defaults to 1
:return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects
"""
qubits, info = self.conn._builder.sdk_create_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
min_fidelity_all_at_end=min_fidelity_all_at_end,
),
)
return qubits, info
def create_measure(
self,
number: int = 1,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
basis_local: EprMeasBasis = None,
basis_remote: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
rotations_remote: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
random_basis_remote: Optional[RandomBasis] = None,
) -> List[EprMeasureResult]:
"""Ask the network stack to generate EPR pairs with the remote node and
measure them immediately (on both nodes).
A `create_measure` operation must always be matched by a `recv_measure`
operation on the remote node.
This operation returns a list of Linklayer response objects. These objects
contain information about the entanglement generation and includes the
measurement outcome and basis used. Note that all values are `Future` objects.
This means that the current subroutine must be flushed before the values
become defined.
An example for generating 10 pairs with another node that are immediately
measured:
.. code-block::
# list of Futures that become defined when subroutine is flushed
outcomes = []
with NetQASMConnection("alice", epr_sockets=[epr_socket]):
ent_infos = epr_socket.create(number=10, tp=EPRType.M)
for ent_info in ent_infos:
outcomes.append(ent_info.measurement_outcome)
The basis to measure in can also be specified. There are 3 ways to specify a
basis:
* using one of the `EprMeasBasis` variants
* by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation
and another X-rotation. For example, setting `rotations_local` to (8, 0, 0)
means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is
applied to the qubit.
* using one of the `RandomBasis` variants, in which case one of the bases of
that variant is chosen at random just before measuring
NOTE: the node that initiates the entanglement generation, i.e. the one that
calls `create` on its EPR socket, also controls the measurement bases of the
receiving node (by setting e.g. `rotations_remote`). The receiving node cannot
change this.
:param number: number of EPR pairs to generate, defaults to 1
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param expect_phi_plus: whether to assume that the EPR pairs that are created
are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler
will make sure that if the physical link actually produced another Bell
state, the behavior seen by the application is still as if a Phi+ state
was actually produced.
:param basis_local: basis to measure in on this node for M-type requests
:param basis_remote: basis to measure in on the remote node for M-type requests
:param rotations_local: rotations to apply before measuring on this node
:param rotations_remote: rotations to apply before measuring on remote node
:param random_basis_local: random bases to choose from when measuring on this
node
:param random_basis_remote: random bases to choose from when measuring on
the remote node
:return: list of entanglement info objects per created pair.
"""
if basis_local is not None:
rotations_local = basis_to_rotation(basis_local)
if basis_remote is not None:
rotations_remote = basis_to_rotation(basis_remote)
return self.conn.builder.sdk_create_epr_measure(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
random_basis_local=random_basis_local,
random_basis_remote=random_basis_remote,
rotations_local=rotations_local,
rotations_remote=rotations_remote,
),
)
def create_rsp(
self,
number: int = 1,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
expect_phi_plus: bool = True,
basis_local: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
min_fidelity_all_at_end: Optional[int] = None,
) -> List[EprMeasureResult]:
"""Ask the network stack to do remote preparation with the remote node.
A `create_rsp` operation must always be matched by a `recv_erp` operation
on the remote node.
This operation returns a list of Linklayer response objects. These objects
contain information about the entanglement generation and includes the
measurement outcome and basis used. Note that all values are `Future` objects.
This means that the current subroutine must be flushed before the values
become defined.
An example for generating 10 pairs with another node that are immediately
measured:
.. code-block::
m: LinkLayerOKTypeM = epr_socket.create_rsp(tp=EPRType.R)[0]
print(m.measurement_outcome)
# remote node now has a prepared qubit
The basis to measure in can also be specified.
There are 3 ways to specify a basis:
* using one of the `EprMeasBasis` variants
* by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation
and another X-rotation. For example, setting `rotations_local` to (8, 0, 0)
means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is
applied to the qubit.
* using one of the `RandomBasis` variants, in which case one of the bases of
that variant is chosen at random just before measuring
:param number: number of EPR pairs to generate, defaults to 1
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param expect_phi_plus: whether to assume that the EPR pairs that are created
are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler
will make sure that if the physical link actually produced another Bell
state, the behavior seen by the application is still as if a Phi+ state
was actually produced.
:param basis_local: basis to measure in on this node for M-type requests
:param basis_remote: basis to measure in on the remote node for M-type requests
:param rotations_local: rotations to apply before measuring on this node
:param rotations_remote: rotations to apply before measuring on remote node
:param random_basis_local: random bases to choose from when measuring on this
node
:param random_basis_remote: random bases to choose from when measuring on
the remote node
:return: list of entanglement info objects per created pair.
"""
if basis_local is not None:
rotations_local = basis_to_rotation(basis_local)
return self.conn.builder.sdk_create_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
time_unit=time_unit,
max_time=max_time,
expect_phi_plus=expect_phi_plus,
random_basis_local=random_basis_local,
rotations_local=rotations_local,
min_fidelity_all_at_end=min_fidelity_all_at_end,
)
)
def create(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
tp: EPRType = EPRType.K,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
basis_local: EprMeasBasis = None,
basis_remote: EprMeasBasis = None,
rotations_local: Tuple[int, int, int] = (0, 0, 0),
rotations_remote: Tuple[int, int, int] = (0, 0, 0),
random_basis_local: Optional[RandomBasis] = None,
random_basis_remote: Optional[RandomBasis] = None,
) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeM]]:
"""Ask the network stack to generate EPR pairs with the remote node.
A `create` operation must always be matched by a `recv` operation on the remote
node.
If the type of request is Create and Keep (CK, or just K) and if `sequential`
is False (default), this operation returns a list of Qubit objects representing
the local qubits that are each one half of the generated pairs. These qubits
can then be manipulated locally just like locally initialized qubits, by e.g.
applying gates or measuring them.
Each qubit also contains information about the entanglement generation that
lead to its creation, and can be accessed by its `entanglement_info` property.
A typical example for just generating one pair with another node would be:
.. code-block::
q = epr_socket.create()[0]
# `q` can now be used as a normal qubit
If the type of request is Measure Directly (MD, or just M), this operation
returns a list of Linklayer response objects. These objects contain information
about the entanglement generation and includes the measurement outcome and
basis used. Note that all values are `Future` objects. This means that the
current subroutine must be flushed before the values become defined.
An example for generating 10 pairs with another node that are immediately
measured:
.. code-block::
# list of Futures that become defined when subroutine is flushed
outcomes = []
with NetQASMConnection("alice", epr_sockets=[epr_socket]):
ent_infos = epr_socket.create(number=10, tp=EPRType.M)
for ent_info in ent_infos:
outcomes.append(ent_info.measurement_outcome)
For "Measure Directly"-type requests, the basis to measure in can also be
specified. There are 3 ways to specify a basis:
* using one of the `EprMeasBasis` variants
* by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation
and another X-rotation. For example, setting `rotations_local` to (8, 0, 0)
means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is
applied to the qubit.
* using one of the `RandomBasis` variants, in which case one of the bases of
that variant is chosen at random just before measuring
NOTE: the node that initiates the entanglement generation, i.e. the one that
calls `create` on its EPR socket, also controls the measurement bases of the
receiving node (by setting e.g. `rotations_remote`). The receiving node cannot
change this.
If `sequential` is False (default), the all requested EPR pairs are generated
at once, before returning the results (qubits or entanglement info objects).
If `sequential` is True, a callback function (`post_routine`) should be
specified. After generating one EPR pair, this callback will be called, before
generating the next pair. This method can e.g. be used to generate many EPR
pairs (more than the number of physical qubits available), by measuring (and
freeing up) each qubit before the next pair is generated.
For example:
.. code-block::
outcomes = alice.new_array(num)
def post_create(conn, q, pair):
q.H()
outcome = outcomes.get_future_index(pair)
q.measure(outcome)
epr_socket.create(number=num, post_routine=post_create, sequential=True)
:param number: number of EPR pairs to generate, defaults to 1
:param post_routine: callback function for each genated pair. Only used if
`sequential` is True.
The callback should take three arguments `(conn, q, pair)` where
* `conn` is the connection (e.g. `self`)
* `q` is the entangled qubit (of type `FutureQubit`)
* `pair` is a register holding which pair is handled (0, 1, ...)
:param sequential: whether to use callbacks after each pair, defaults to False
:param tp: type of entanglement generation, defaults to EPRType.K. Note that
corresponding `recv` of the remote node's EPR socket must specify the
same type.
:param time_unit: which time unit to use for the `max_time` parameter
:param max_time: maximum number of time units (see `time_unit`) the Host is
willing to wait for entanglement generation of a single pair. If generation
does not succeed within this time, the whole subroutine that this request
is part of is reset and run again by the quantum node controller.
:param basis_local: basis to measure in on this node for M-type requests
:param basis_remote: basis to measure in on the remote node for M-type requests
:param rotations_local: rotations to apply before measuring on this node
(for M-type requests)
:param rotations_remote: rotations to apply before measuring on remote node
(for M-type requests)
:param random_basis_local: random bases to choose from when measuring on this
node (for M-type requests)
:param random_basis_remote: random bases to choose from when measuring on
the remote node (for M-type requests)
:return: For K-type requests: list of qubits created. For M-type requests:
list of entanglement info objects per created pair.
"""
self._logger.warning(
"EPRSocket.create() is deprecated. Use one of "
"create_keep, create_measure, or create_rsp instead."
)
if tp == EPRType.K:
return self.create_keep(
number=number,
post_routine=post_routine,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
)
elif tp == EPRType.M:
return self.create_measure(
number=number,
time_unit=time_unit,
max_time=max_time,
basis_local=basis_local,
basis_remote=basis_remote,
rotations_local=rotations_local,
rotations_remote=rotations_remote,
random_basis_local=random_basis_local,
random_basis_remote=random_basis_remote,
)
elif tp == EPRType.R:
return self.create_rsp(
number=number,
time_unit=time_unit,
max_time=max_time,
basis_local=basis_local,
random_basis_local=random_basis_local,
)
assert False
def create_context(
self,
number: int = 1,
sequential: bool = False,
time_unit: TimeUnit = TimeUnit.MICRO_SECONDS,
max_time: int = 0,
) -> ContextManager[Tuple[FutureQubit, RegFuture]]:
"""Create a context that is executed for each generated EPR pair consecutively.
Creates EPR pairs with a remote node and handles each pair by
the operations defined in a subsequent context. See the example below.
.. code-block::
with epr_socket.create_context(number=10) as (q, pair):
q.H()
m = q.measure()
NOTE: even though all pairs are handled consecutively, they are still
generated concurrently by the network stack. By setting `sequential` to True,
the network stack only generates the next pair after the context for the
previous pair has been executed, similar to using a callback (`post_routine`)
in the `create` method.
:param number: number of EPR pairs to generate, defaults to 1
:param sequential: whether to generate pairs sequentially, defaults to False
"""
return self.conn.builder.sdk_create_epr_context(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=sequential,
time_unit=time_unit,
max_time=max_time,
)
)
def recv_keep(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
"""Ask the network stack to wait for the remote node to generate EPR pairs,
which are kept in memory.
A `recv_keep` operation must always be matched by a `create_keep` operation on
the remote node. The number of generated pairs must also match.
For more information see the documentation of `create_keep`.
:param number: number of pairs to generate, defaults to 1
:param post_routine: callback function used when `sequential` is True
:param sequential: whether to call the callback after each pair generation,
defaults to False
:param min_fidelity_all_at_end: the minimum fidelity that *all* entangled
qubits should ideally still have at the moment the last qubit has been
generated. For example, when specifying `number=2` and
`min_fidelity_all_at_end=80`, the the program will automatically try to
make sure that both qubits have a fidelity of at least 80% when the
second qubit has been generated. It will attempt to do this by
automatically re-trying the entanglement generation if the fidelity
constraint is not satisfied. This is however an *attempt*, and not
a guarantee!.
:param max_tries: maximum number of re-tries should be made to try and achieve
the `min_fidelity_all_at_end` constraint.
:return: list of qubits created
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, _ = self.conn._builder.sdk_recv_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def recv_keep_with_info(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
"""Same as recv_keep but also return the EPR generation information coming
from the network stack.
For more information see the documentation of `recv_keep`.
:param number: number of pairs to generate, defaults to 1
:return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects
"""
qubits, info = self.conn._builder.sdk_recv_epr_keep(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=post_routine,
sequential=sequential,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits, info
def recv_measure(
self,
number: int = 1,
) -> List[EprMeasureResult]:
"""Ask the network stack to wait for the remote node to generate EPR pairs,
which are immediately measured (on both nodes).
A `recv_measure` operation must always be matched by a `create_measure`
operation on the remote node. The number and type of generation must also match.
For more information see the documentation of `create_measure`.
:param number: number of pairs to generate, defaults to 1
:param post_routine: callback function used when `sequential` is True
:param sequential: whether to call the callback after each pair generation,
defaults to False
:return: list of entanglement info objects per created pair.
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
return self.conn.builder.sdk_recv_epr_measure(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
),
)
def recv_rsp(
self,
number: int = 1,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> List[Qubit]:
"""Ask the network stack to wait for remote state preparation from another node.
A `recv_rsp` operation must always be matched by a `create_rsp` operation on
the remote node. The number and type of generation must also match.
For more information see the documentation of `create_rsp`.
:param number: number of pairs to generate, defaults to 1
:return: list of qubits created
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, _ = self.conn.builder.sdk_recv_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits
def recv_rsp_with_info(
self,
number: int = 1,
min_fidelity_all_at_end: Optional[int] = None,
max_tries: Optional[int] = None,
) -> Tuple[List[Qubit], List[EprKeepResult]]:
"""Same as recv_rsp but also return the EPR generation information coming
from the network stack.
For more information see the documentation of `recv_rsp`.
:param number: number of pairs to generate, defaults to 1
:return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects
"""
if self.conn is None:
raise RuntimeError("EPRSocket does not have an open connection")
qubits, infos = self.conn.builder.sdk_recv_epr_rsp(
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=False,
min_fidelity_all_at_end=min_fidelity_all_at_end,
max_tries=max_tries,
),
)
return qubits, infos
def recv(
self,
number: int = 1,
post_routine: Optional[Callable] = None,
sequential: bool = False,
tp: EPRType = EPRType.K,
) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeR]]:
"""Ask the network stack to wait for the remote node to generate EPR pairs.
A `recv` operation must always be matched by a `create` operation on the remote
node. See also the documentation of `create`.
The number and type of generation must also match.
In case of Measure Directly requests, it is the initiating node (that calls
`create`) which specifies the measurement bases. This should not and cannot be
done in `recv`.
For more information see the documentation of `create`.
:param number: number of pairs to generate, defaults to 1
:param post_routine: callback function used when `sequential` is True
:param sequential: whether to call the callback after each pair generation,
defaults to False
:param tp: type of entanglement generation, defaults to EPRType.K
:return: For K-type requests: list of qubits created. For M-type requests:
list of entanglement info objects per created pair.
"""
self._logger.warning(
"EPRSocket.recv() is deprecated. Use one of "
"recv_keep, recv_measure, or recv_rsp instead."
)
if tp == EPRType.K:
return self.recv_keep(
number=number,
post_routine=post_routine,
sequential=sequential,
)
elif tp == EPRType.M:
return self.recv_measure(number=number)
elif tp == EPRType.R:
return self.recv_rsp(number=number)
assert False
@contextmanager
def recv_context(
self,
number: int = 1,
sequential: bool = False,
):
"""Receives EPR pair with a remote node (see doc of :meth:`~.create_context`)"""
try:
# NOTE loop_register is the register used for looping over the generated pairs
(
pre_commands,
loop_register,
ent_results_array,
output,
pair,
) = self.conn.builder._pre_epr_context(
role=EPRRole.RECV,
params=EntRequestParams(
remote_node_id=self.remote_node_id,
epr_socket_id=self._epr_socket_id,
number=number,
post_routine=None,
sequential=sequential,
),
)
yield output, pair
finally:
self.conn.builder._post_epr_context(
pre_commands=pre_commands,
number=number,
loop_register=loop_register,
ent_results_array=ent_results_array,
pair=pair,
)
def _get_node_id(self, app_name: str) -> int:
return self.conn.network_info.get_node_id_for_app(app_name=app_name)
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The view layer of logic for the BM gCal Assistant.
The logic here defines the behavior of the webhook when messages are received
from users messaging through Business Messages.
"""
import base64
import datetime
import hashlib
import json
import os
import uuid
from businessmessages import businessmessages_v1_client as bm_client
from businessmessages.businessmessages_v1_messages import (
BusinessmessagesConversationsMessagesCreateRequest,
BusinessMessagesMessage, BusinessMessagesRepresentative,
BusinessMessagesSuggestion, BusinessMessagesSuggestedReply,
BusinessmessagesConversationsEventsCreateRequest, BusinessMessagesEvent,
BusinessMessagesAuthenticationRequest, BusinessMessagesAuthenticationRequestOauth
)
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from google_cal_app.models import Conversation
from googleapiclient.discovery import build
from oauth2client import client
from oauth2client.service_account import ServiceAccountCredentials
import requests
# The location of the service account credentials
SERVICE_ACCOUNT_LOCATION = 'resources/bm-agent-service-account-credentials.json'
# Set of commands the bot understands
CMD_LOGIN = 'login'
CMD_MY_DAY_SUMMARY = 'day-summary'
CMD_FOCUS_SPRINT_SLOTS = 'focus-sprints'
CMD_CANCEL_ALL_MEETINGS = 'cancel-all-meetings'
CMD_YES_CANCEL = 'yes-cancel'
CMD_NO_CANCEL = 'no-do-not-cancel'
# The representative type that all messages are sent as
BOT_REPRESENTATIVE = BusinessMessagesRepresentative(
representativeType=BusinessMessagesRepresentative
.RepresentativeTypeValueValuesEnum.BOT,
displayName='BM gCal Assistant',
avatarImage='https://lh3.googleusercontent.com/9PMLInqtfgnRnV-9QUgYj8W-ZAutv-49KsYmHthZayM9YnCsd01P0eNhbqtu9QoIF31tKzgwo-x1oCkVIQas5Q'
)
LARGE_DATE = datetime.datetime(9999, 12, 30, 12, 59, 59, 59)
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
@csrf_exempt
def callback(request):
"""Callback URL.
Processes messages sent from the user.
Args:
request (HttpRequest): The request object that django passes to the
function
Returns:
An :HttpResponse: containing browser renderable HTML.
"""
if request.method == 'POST':
request_data = request.body.decode('utf8').replace("'", '"')
request_body = json.loads(request_data)
print('request_body: %s', request_body)
# Extract the conversation id and message text
conversation_id = request_body.get('conversationId')
conv = get_conversation(conversation_id)
print('conversation_id: %s', conversation_id)
try:
display_name = request_body.get('context').get('userInfo').get(
'displayName')
except Exception as e:
print(e)
display_name = None
# Check that the message and text body exist
if 'message' in request_body and 'text' in request_body['message']:
message = request_body['message']['text']
print('message: %s', message)
route_message(message, conv)
elif 'suggestionResponse' in request_body:
message = request_body['suggestionResponse']['postbackData']
print('message: %s', message)
route_message(message, conv)
elif 'authenticationResponse' in request_body:
try:
auth_code = request_body.get('authenticationResponse').get('code')
redirect_uri = request_body.get('authenticationResponse').get('redirectUri')
print(f'redirect_uri extracted from authenticationResponse {redirect_uri}')
# Exchange auth_code with OAuth provider and get access_token
code_verifier = conv.code_verifier
if code_verifier is None or auth_code is None:
print('There was an error.')
else:
access_token = request_access_token(auth_code, code_verifier, redirect_uri)
# Save the access token in an encrypted format using save_token
send_day_summary_message(conv, access_token)
except Exception as e:
print(f'Login error: {e}')
elif 'userStatus' in request_body:
if 'isTyping' in request_body['userStatus']:
print('User is typing')
elif 'requestedLiveAgent' in request_body['userStatus']:
print('User requested transfer to live agent')
return HttpResponse('Response.')
return HttpResponse('This webhook expects a POST request.')
def request_access_token(auth_code, code_verifier, redirect_uri):
"""Requests access_token from identity provider.
Args:
auth_code (str): Authorization code to request access_token
code_verifier (str): pair of code_challenge and code_verifier for PKCE.
"""
obj = {
'client_secret': os.environ['OAUTH_CLIENT_SECRET'],
'client_id': os.environ['OAUTH_CLIENT_ID'],
'grant_type': 'authorization_code',
'code': auth_code,
'code_verifier': code_verifier,
'redirect_uri': redirect_uri
}
res = requests.post('https://oauth2.googleapis.com/token', data=obj)
res_dict = json.loads(res.text)
access_token = res_dict.get('access_token')
if access_token is None:
print(f'Could not find the access token: {res.content}')
return None
print(f'We found the access_token.')
return access_token
def get_conversation(conversation_id):
"""Returns a google_cal_app.Conversation object.
Args:
conversation_id (str): The unique id for this user and agent.
"""
conv = Conversation.objects.filter(id=conversation_id)
if not conv:
return Conversation(id=conversation_id).save()
else:
return conv[0]
def route_message(message, conv):
"""Routes the message received from the user to create a response.
Args:
message (str): The message text received from the user.
conv (Conversation): The unique id for this user and agent.
"""
normalized_message = message.lower()
print(f'Routing message: {normalized_message}')
if normalized_message == CMD_LOGIN:
invoke_login_chip(conv)
else:
echo_message(message, conv)
def fetch_events(access_token, today):
"""Fetches events from Calendar API.
Args:
access_token (str): The user's access_token to query data with.
today (datetime.Date): Date object representing todays date.
Returns:
event_items (list): A list of sorted event items.
"""
credentials = client.AccessTokenCredentials(
access_token, 'USER_AGENT')
service = build('calendar', 'v3', credentials=credentials)
events = service.events().list(
calendarId='primary',
timeMax=f'{today}T23:59:59-07:00',
timeMin=f'{today}T06:00:00-07:00').execute()
event_items = events.get('items')
event_items.sort(
key=lambda x: LARGE_DATE
if (x.get('start') is None or x.get('start').get('dateTime') is None)
else datetime.datetime.strptime(
x.get('start').get('dateTime')[:19], DATE_FORMAT))
print("Returning")
return event_items
def send_day_summary_message(conv, access_token):
"""Fetches calendar data with access_token and sends it to the conversation.
Args:
conv (Conversation): The unique id for this user and agent.
"""
try:
print("Send summary of my day")
today = str(datetime.datetime.now().date())
event_items = fetch_events(access_token, today)
print(f"Events: {event_items}")
event_set = set()
event_list_message = ''
for event in event_items:
try:
if event.get('status') == 'confirmed' and today in event.get(
'start').get('dateTime') and event.get(
'summary') not in event_set:
event_list_message = event_list_message + '- ' + event.get(
'summary') + '\n'
event_set.add(event.get('summary'))
except Exception as e:
print(f'Exception A: {e}')
if len(event_set) > 4:
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text='Looks like you have a lot of meetings today!')
send_message(message_obj, conv.id)
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text='Here\'s the list of items or your calendar...')
send_message(message_obj, conv.id)
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
suggestions=get_suggestions(),
text=event_list_message)
send_message(message_obj, conv.id)
except Exception as e:
print(f'Exception B: {e}')
def invoke_login_chip(conv, message=None):
"""Invokes the login chip within the conversation.
Args:
conv (Conversation): The unique id for this user and agent.
message (str): The message text received from the user.
"""
message = message or 'To see your calendar summary, please sign in!'
message_id = str(uuid.uuid4())
# Generate a code_verifier and code_challenge used in the OAuth 2.0 PKCE flow.
# code_challenge is shared with Google to send to kick start the auth flow
# with the identity provider. Then exchange the auth_code along with the
# code_verifier to the identity provider to get an access_token to make
# requests on behalf of the user.
random_val = str(uuid.uuid1()).encode()
base64_random = base64.urlsafe_b64encode(random_val)
code_verifier = base64_random.decode('utf-8')
hashed_code_verifier = hashlib.sha256(code_verifier.encode('utf-8')).digest()
utf8_decoded_verifier = base64.urlsafe_b64encode(hashed_code_verifier).decode(
'utf-8')
code_challenge = utf8_decoded_verifier.replace('=', '')
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
suggestions=get_auth_chip_suggestion(
os.environ['OAUTH_CLIENT_ID'],
code_challenge,
['profile','https://www.googleapis.com/auth/calendar.readonly']),
text=message,
fallback='Your device does not support suggestions')
send_message(message_obj, conv.id)
print(f'The code verifier is: {code_verifier}')
conv.code_verifier = code_verifier
conv.save()
def echo_message(message, conv):
"""Sends the message received from the user back to the user.
Args:
message (str): The message text received from the user.
conv (Conversation): The unique id for this user and agent.
"""
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text=f"Hey! Here's the message you sent:\n\n{message}"
)
send_message(message_obj, conv.id)
def send_message(message, conversation_id):
"""Posts a message to the Business Messages API.
Args:
message (obj): The message object payload to send to the user.
conversation_id (str): The unique id for this user and agent.
"""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_LOCATION,
scopes=['https://www.googleapis.com/auth/businessmessages'])
bm_credentials = bm_client.BusinessmessagesV1(credentials=credentials)
# Send the typing started event
create_request = BusinessmessagesConversationsEventsCreateRequest(
eventId=str(uuid.uuid4().int),
businessMessagesEvent=BusinessMessagesEvent(
representative=BOT_REPRESENTATIVE,
eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STARTED
),
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsEventsService(
client=bm_credentials).Create(request=create_request)
# Create the message request
create_request = BusinessmessagesConversationsMessagesCreateRequest(
businessMessagesMessage=message,
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsMessagesService(
client=bm_credentials).Create(request=create_request)
# Send the typing stopped event
create_request = BusinessmessagesConversationsEventsCreateRequest(
eventId=str(uuid.uuid4().int),
businessMessagesEvent=BusinessMessagesEvent(
representative=BOT_REPRESENTATIVE,
eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STOPPED
),
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsEventsService(
client=bm_credentials).Create(request=create_request)
def get_auth_chip_suggestion(client_id, code_challenge, scopes):
"""Returns an authorization chip
Arguments:
client_id (str): client_id from your client configuration with the
identity provider
code_challenge (str): code_challenge generated from the code_verifier for
use with PKCE in OAuth 2.0 access_token exchange
scopes (List): A list of scopes you want the access token to grant API
access to
Returns:
A :list: BusinessMessagesSuggestions invoking the auth chip
"""
return [
BusinessMessagesSuggestion(
authenticationRequest=BusinessMessagesAuthenticationRequest(
oauth=BusinessMessagesAuthenticationRequestOauth(
clientId=client_id, codeChallenge=code_challenge, scopes=scopes))),
]
def get_suggestions():
"""Creates a list of suggestions.
Returns:
A :list: A list of sample BusinessMessagesSuggestions.
"""
return [
BusinessMessagesSuggestion(
reply=BusinessMessagesSuggestedReply(
text='Let\'s do it again!', postbackData=CMD_LOGIN)),
]
def landing_placeholder(request):
"""Creates an HttpResponse for a web request at the root of the project.
Args:
request (HttpRequest): The django web request object
Returns:
An :HttpResponse: containing browser renderable HTML.
"""
return HttpResponse("""
<h1>Welcome to gCal BM Assistant</h1>
<br/><br/>
Check out the <a href="https://business-communications.sandbox.google.com/console/">
Business Communications Developer Console</a> to access this agent's
test URLs.
""")
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper to adapt a Distrax bijector for use in TFP."""
from typing import Any, Optional
import chex
from distrax._src.bijectors import bijector
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
Array = chex.Array
Bijector = bijector.Bijector
def tfp_compatible_bijector(
base_bijector: Bijector,
name: Optional[str] = None):
"""Create a TFP-compatible bijector from a Distrax bijector.
Given a Distrax bijector, return a wrapped bijector that behaves as a TFP
bijector, to be used in TFP meta-bijectors and the TransformedDistribution.
In particular, the wrapped bijector implements the methods
`[forward|inverse]_event_ndims`, `[forward|inverse]_event_shape`,
`[forward|inverse]_event_shape_tensor`, `[forward|inverse]_log_det_jacobian`,
and the properties `[forward|inverse]_min_event_ndims`. Other attributes are
delegated to the `base_bijector`.
The methods of the resulting object do not take a `name` argument,
unlike their TFP equivalents.
The `shape` methods are implemented by tracing the `forward` and `inverse`
methods of the bijector, applied to a zero tensor of the requested dtype. If
the `forward` or `inverse` methods are not traceable or cannot be applied to a
zero tensor, then we cannot guarantee the correctness of the result.
Args:
base_bijector: A Distrax bijector.
name: The bijector name.
Returns:
An object that behaves like a TFP bijector.
"""
name_ = name
class TFPCompatibleBijector(base_bijector.__class__):
"""Class to wrap a Distrax bijector."""
def __init__(self):
self._is_injective = True
self._is_permutation = False
self._parts_interact = False
self.dtype = None
self.has_static_min_event_ndims = True
self.forward_min_event_ndims = base_bijector.event_ndims_in
self.inverse_min_event_ndims = base_bijector.event_ndims_out
def __getattr__(self, name: str):
return getattr(base_bijector, name)
def forward_and_log_det(self, x: Array) -> Array:
"""See `Bijector.forward_and_log_det`."""
return base_bijector.forward_and_log_det(x)
@property
def name(self) -> str:
"""The name of the wrapped bijector."""
return name_ or f"TFPCompatible{base_bijector.name}"
def experimental_batch_shape(self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def experimental_batch_shape_tensor(
self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def forward_dtype(self, _: jnp.dtype) -> None:
"""Returns None, making no promise regarding dtypes."""
return None
def inverse_dtype(self, _: jnp.dtype) -> None:
"""Returns None, making no promise regarding dtypes."""
return None
def forward_event_ndims(self, event_ndims: int) -> int:
"""Returns the number of event dimensions of the output of `forward`."""
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
return base_bijector.event_ndims_out + extra_event_ndims
def inverse_event_ndims(self, event_ndims: int) -> int:
"""Returns the number of event dimensions of the output of `inverse`."""
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
return base_bijector.event_ndims_in + extra_event_ndims
def forward_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
"""Returns the shape of the output of `forward` as a `TensorShape`."""
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(forward_event_shape)
def inverse_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
"""Returns the shape of the output of `inverse` as a `TensorShape`."""
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(inverse_event_shape)
def forward_event_shape_tensor(self, event_shape) -> Array:
"""Returns the shape of the output of `forward` as a `jnp.array`."""
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return jnp.array(forward_event_shape, dtype=jnp.int32)
def inverse_event_shape_tensor(self, event_shape) -> Array:
"""Returns the shape of the output of `inverse` as a `jnp.array`."""
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return jnp.array(inverse_event_shape, dtype=jnp.int32)
def forward_log_det_jacobian(
self, x: Array, event_ndims: Optional[int] = None) -> Array:
"""See `Bijector.forward_log_det_jacobian`."""
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
fldj = base_bijector.forward_log_det_jacobian(x)
return math.sum_last(fldj, extra_event_ndims)
def inverse_log_det_jacobian(
self, y: Array, event_ndims: Optional[int] = None) -> Array:
"""See `Bijector.inverse_log_det_jacobian`."""
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
ildj = base_bijector.inverse_log_det_jacobian(y)
return math.sum_last(ildj, extra_event_ndims)
def _check_ndims(
self, direction: str, event_ndims: int, expected_ndims: int) -> int:
"""Checks that `event_ndims` are correct and returns any extra ndims."""
if event_ndims is not None and event_ndims < expected_ndims:
raise ValueError(f"{direction} `event_ndims` of {self.name} must be at "
f"least {expected_ndims} but was passed {event_ndims} "
f"instead.")
return 0 if event_ndims is None else event_ndims - expected_ndims
def _check_shape(
self, direction: str, event_shape: Any, expected_ndims: int):
"""Checks that `event_shape` is correct, raising ValueError otherwise."""
if len(event_shape) < expected_ndims:
raise ValueError(f"{direction} `event_shape` of {self.name} must have "
f"at least {expected_ndims} dimensions, but was "
f"{event_shape} which has only {len(event_shape)} "
f"dimensions instead.")
return TFPCompatibleBijector()
|
import pandas
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
from pylab import rcParams
df = pandas.read_csv('rewards_loc3.csv')
ucb,ts,ovr,egr,egr2,agr,agr2,efr,ac,aac,sft = df['ucb'],df['ts'],df['ovr'],\
df['egr'],df['egr2'],df['agr'],df['agr2'],df['efr'],df['ac'],df['aac'],df['sft']
#y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = np.mean(ucb), np.mean(ts) \
#,np.mean(ovr), np.mean(egr), np.mean(egr2) \
#,np.mean(agr), np.mean(agr2), np.mean(efr) \
#,np.mean(ac), np.mean(aac), np.mean(sft)
def get_mean_reward(reward_lst):
mean_rew=list()
for r in range(len(reward_lst)):
mean_rew.append(sum(reward_lst[:r+1]) / ((r+1)))
return mean_rew
y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(ucb), get_mean_reward(ts) \
,get_mean_reward(ovr), get_mean_reward(egr), get_mean_reward(egr2) \
,get_mean_reward(agr), get_mean_reward(agr2), get_mean_reward(efr) \
,get_mean_reward(ac), get_mean_reward(aac), get_mean_reward(sft)
x1, x2 = [index for index in range(len(ucb))], [index for index in range(len(ts))]
x3, x4 = [index for index in range(len(df['ovr']))], [index for index in range(len(df['egr']))]
x5, x6 = [index for index in range(len(df['egr2']))], [index for index in range(len(df['agr']))]
x7, x8 = [index for index in range(len(df['agr2']))], [index for index in range(len(df['efr']))]
x9, x10 = [index for index in range(len(df['ac']))], [index for index in range(len(df['aac']))]
x11 = [index for index in range(len(df['sft']))]
def CI_model(y, confidence = 0.95):
std_err_y = st.sem(y)
n_y = len(y)
h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1)
return h_y
h_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(ucb), CI_model(ts), CI_model(ovr),\
CI_model(egr), CI_model(egr2), CI_model(agr), CI_model(agr2), CI_model(efr), CI_model(ac), CI_model(aac), CI_model(sft)
plt.errorbar(x1, y1, yerr= h_y1, label='Bootstrapped Upper-Confidence Bound (C.I.=80%)')
plt.errorbar(x2, y2, yerr= h_y2, label='Bootstrapped Thompson Sampling')
plt.errorbar(x3, y3, yerr= h_y3, label='Separate Classifiers + Beta Prior')
plt.errorbar(x4, y4, yerr= h_y4, label='Epsilon-Greedy (p0=20%, decay=0.9999')
plt.errorbar(x5, y5, yerr= h_y5, label='Epsilon-Greedy (p0=20%, no decay')
plt.errorbar(x6, y6, yerr= h_y6, label='Adaptive Greedy (decaying threshold)')
plt.errorbar(x7, y7, yerr= h_y7, label='Adaptive Greedy (p0=30%, decaying percentile)')
plt.errorbar(x8, y8, yerr= h_y8, label='Explore First (n=1,500)')
plt.errorbar(x9, y9, yerr= h_y9, label='Active Explorer')
plt.errorbar(x10, y10, yerr= h_y10, label='Adaptive Active Greedy')
plt.errorbar(x11, y11, yerr= h_y11, label='Softmax Explorer')
#plt.plot(np.repeat(y.mean(axis=0).max(),len(rewards_sft)),linewidth=4,ls='dashed', label='Overall Best Arm (no context)')
ax = plt.subplot(111)
plt.xlabel('Rounds (models were updated every 50 rounds)', size=10)
plt.ylabel('Cummulative Mean Reward', size=10)
plt.title('Comparison of Online Contextual Bandit Policies in location 3')
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig("location_3.png", bbox_inches='tight', dpi = 600)
|
XXXXXXX FFFFXFFFFXXXXXXXXX
|
from datetime import datetime
from dino.config import UserKeys, RedisKeys, SessionKeys
from dino.db.rdbms.models import Channels
from dino.db.rdbms.models import Rooms
from test.base import BaseTest
from test.db import BaseDatabaseTest
class DatabaseSqliteTest(BaseDatabaseTest):
def setUp(self):
self.set_up_env('sqlite')
def tearDown(self):
from dino.db.rdbms.dbman import Database
from dino.db.rdbms.dbman import DeclarativeBase
db = Database(self.env)
con = db.engine.connect()
trans = con.begin()
for table in reversed(DeclarativeBase.metadata.sorted_tables):
con.execute(table.delete())
trans.commit()
con.close()
self.env.cache._flushall()
def test_get_user_infos(self):
self.db.set_user_info(BaseTest.USER_ID, {SessionKeys.gender.value: 'm', 'last_login': datetime.utcnow()})
self.db.set_user_info(BaseTest.OTHER_USER_ID, {SessionKeys.gender.value: 'w', 'last_login': datetime.utcnow()})
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.USER_ID))
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.OTHER_USER_ID))
infos = self.db.get_user_infos({BaseTest.USER_ID, BaseTest.OTHER_USER_ID})
self.assertEqual('m', infos[BaseTest.USER_ID][SessionKeys.gender.value])
self.assertEqual('w', infos[BaseTest.OTHER_USER_ID][SessionKeys.gender.value])
def test_set_two_owners_on_room(self):
self._test_set_two_owners_on_room()
def test_is_admin_before_create(self):
self._test_is_admin_before_create()
def test_is_admin_after_create(self):
self._test_is_admin_after_create()
def test_is_admin_after_create_set_admin(self):
self._test_is_admin_after_create_set_admin()
def test_channel_for_room_no_channel(self):
self._test_channel_for_room_no_channel()
def test_channel_for_room_with_channel_without_room(self):
self._test_channel_for_room_with_channel_without_room()
def test_channel_for_room_with_channel_with_room(self):
self._test_channel_for_room_with_channel_with_room()
def test_leave_room_not_joined(self):
self._test_leave_room_not_joined()
def test_leave_room_joined(self):
self._test_leave_room_joined()
def test_set_moderator_no_room(self):
self._test_set_moderator_no_room()
def test_set_moderator_with_room(self):
self._test_set_moderator_with_room()
def test_set_room_owner_no_room(self):
self._test_set_room_owner_no_room()
def test_set_room_owner_with_room(self):
self._test_set_room_owner_with_room()
def test_set_channel_owner_no_channel(self):
self._test_set_channel_owner_no_channel()
def test_set_channel_owner_with_channel(self):
self._test_set_channel_owner_with_channel()
def test_get_user_status_before_set(self):
self._test_get_user_status_before_set(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_offline(self):
self._test_set_user_offline(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_online(self):
self._test_set_user_online(UserKeys.STATUS_AVAILABLE)
def test_set_user_invisible(self):
self._test_set_user_invisible(UserKeys.STATUS_INVISIBLE)
def test_remove_current_rooms_for_user_before_joining(self):
self._test_remove_current_rooms_for_user_before_joining()
def test_remove_current_rooms_for_user_after_joining(self):
self._test_remove_current_rooms_for_user_after_joining()
def test_rooms_for_user_before_joining(self):
self._test_rooms_for_user_before_joining()
def test_create_existing_room_name(self):
self._test_create_existing_room_name()
def test_rooms_for_user_after_joining(self):
self._test_rooms_for_user_after_joining()
def test_rooms_for_channel_before_create_channel(self):
self._test_rooms_for_channel_before_create_channel()
def test_rooms_for_channel_after_create_channel_before_create_room(self):
self._test_rooms_for_channel_after_create_channel_before_create_room()
def test_rooms_for_channel_after_create_channel_after_create_room(self):
self._test_rooms_for_channel_after_create_channel_after_create_room()
def test_get_channels_before_create(self):
self._test_get_channels_before_create()
def test_get_channels_after_create(self):
self._test_get_channels_after_create()
def test_room_exists(self):
self._test_room_exists()
def test_create_room_no_channel(self):
self._test_create_room_no_channel()
def test_create_existing_channel(self):
self._test_create_existing_channel()
def test_create_channel(self):
self._test_create_channel()
def test_create_channel_again_to_make_sure_tables_cleared_after_each_test(self):
self._test_create_channel()
channels = self.db._session().query(Channels).filter(Channels.uuid == BaseDatabaseTest.CHANNEL_ID).all()
self.assertEqual(1, len(channels))
def test_create_channel_blank_name(self):
self._test_create_channel_blank_name()
def test_create_channel_exists(self):
self._test_create_channel_exists()
def test_create_room(self):
self._test_create_room()
rooms = self.db._session().query(Rooms).filter(Rooms.uuid == BaseDatabaseTest.ROOM_ID).all()
self.assertEqual(1, len(rooms))
def test_create_room_blank_name(self):
self._test_create_room_blank_name()
def test_create_existing_room(self):
self._test_create_existing_room()
def test_channel_exists_after_create(self):
self._test_channel_exists_after_create()
def test_channel_exists_before_create(self):
self._test_channel_exists_before_create()
def test_room_name_exists_before_create(self):
self._test_room_name_exists_before_create()
def test_room_name_exists_after_create(self):
self._test_room_name_exists_after_create()
def test_delete_one_non_existing_acl(self):
self._test_delete_one_non_existing_acl()
def test_add_one_extra_acl(self):
self._test_add_one_extra_acl()
def test_get_acl(self):
self._test_get_acl()
def test_set_acl(self):
self._test_set_acl()
def test_delete_one_acl(self):
self._test_delete_one_acl()
def test_set_room_allows_cross_group_messaging(self):
self._test_set_room_allows_cross_group_messaging()
def test_get_room_allows_cross_group_messaging_no_room(self):
self._test_get_room_allows_cross_group_messaging_no_room()
def test_get_room_allows_cross_group_messaging(self):
self._test_get_room_allows_cross_group_messaging()
def test_get_room_does_not_allow_cross_group_messaging(self):
self._test_get_room_does_not_allow_cross_group_messaging()
def test_room_allows_cross_group_messaging_no_room(self):
self._test_room_allows_cross_group_messaging_no_room()
def test_room_allows_cross_group_messaging(self):
self._test_room_allows_cross_group_messaging()
def test_room_does_not_allow_cross_group_messaging_no_room(self):
self._test_room_does_not_allow_cross_group_messaging_no_room()
def test_create_admin_room(self):
self._test_create_admin_room()
def test_is_super_user(self):
self._test_is_super_user()
def test_get_admin_room(self):
self._test_get_admin_room()
def test_set_owner_and_moderator(self):
self._test_set_owner_and_moderator()
def test_remove_channel_role(self):
self._test_remove_channel_role()
def test_remove_room_role(self):
self._test_remove_room_role()
def test_remove_super_user(self):
self._test_remove_super_user()
def test_get_super_users(self):
self._test_get_super_users()
def test_remove_owner(self):
self._test_remove_owner()
def test_remove_channel_owner(self):
self._test_remove_channel_owner()
def test_remove_admin(self):
self._test_remove_admin()
def test_remove_moderator(self):
self._test_remove_moderator()
def test_set_owner_is_unique(self):
self._test_set_owner_is_unique()
def test_set_owner_channel_is_unique(self):
self._test_set_owner_channel_is_unique()
def test_set_moderator_is_unique(self):
self._test_set_moderator_is_unique()
def test_set_admin_is_unique(self):
self._test_set_admin_is_unique()
def test_set_super_user_is_unique(self):
self._test_set_super_user_is_unique()
def test_remove_super_user_without_setting(self):
self._test_remove_super_user_without_setting()
def test_remove_owner_without_setting(self):
self._test_remove_owner_without_setting()
def test_remove_channel_owner_without_setting(self):
self._test_remove_channel_owner_without_setting()
def test_remove_admin_without_setting(self):
self._test_remove_admin_without_setting()
def test_remove_moderator_without_setting(self):
self._test_remove_moderator_without_setting()
def test_remove_other_role_channel(self):
self._test_remove_other_role_channel()
def test_remove_other_role_room(self):
self._test_remove_other_role_room()
def test_set_admin_no_such_channel(self):
self._test_set_admin_no_such_channel()
def test_remove_admin_no_such_channel(self):
self._test_remove_admin_no_such_room()
def test_remove_moderator_no_such_room(self):
self._test_remove_moderator_no_such_room()
def test_channel_name_exists(self):
self._test_channel_name_exists()
def test_channel_exists(self):
self._test_channel_exists()
def test_create_user(self):
self._test_create_user()
def test_users_in_room(self):
self._test_users_in_room()
def test_delete_acl_in_channel_for_action(self):
self._test_delete_acl_in_channel_for_action()
def test_delete_acl_in_room_for_action(self):
self._test_delete_acl_in_room_for_action()
def test_remove_owner_channel_no_channel(self):
self._test_remove_owner_channel_no_channel()
def test_remove_owner_channel_not_owner(self):
self._test_remove_owner_channel_not_owner()
def test_remove_owner_channel_is_owner(self):
self._test_remove_owner_channel_is_owner()
def test_create_user_exists(self):
self._test_create_user_exists()
def test_update_acl_in_room_for_action(self):
self._test_update_acl_in_room_for_action()
def test_update_acl_in_room_for_action_no_channel(self):
self._test_update_acl_in_room_for_action_no_channel()
def test_update_acl_in_room_for_action_no_room(self):
self._test_update_acl_in_room_for_action_no_room()
def test_update_acl_in_room_for_action_invalid_action(self):
self._test_update_acl_in_room_for_action_invalid_action()
def test_update_acl_in_room_for_action_invalid_type(self):
self._test_update_acl_in_room_for_action_invalid_type()
def test_update_acl_in_room_for_action_invalid_value(self):
self._test_update_acl_in_room_for_action_invalid_value()
def test_update_acl_in_channel_for_action(self):
self._test_update_acl_in_channel_for_action()
def test_update_acl_in_channel_for_action_no_channel(self):
self._test_update_acl_in_channel_for_action_no_channel()
def test_update_acl_in_channel_for_action_invalid_action(self):
self._test_update_acl_in_channel_for_action_invalid_action()
def test_update_acl_in_channel_for_action_invalid_type(self):
self._test_update_acl_in_channel_for_action_invalid_type()
def test_update_acl_in_channel_for_action_invalid_value(self):
self._test_update_acl_in_channel_for_action_invalid_value()
def test_is_banned_from_channel(self):
self._test_is_banned_from_channel()
def test_is_banned_from_room(self):
self._test_is_banned_from_room()
def test_is_banned_globally(self):
self._test_is_banned_globally()
def test_remove_global_ban(self):
self._test_remove_global_ban()
def test_remove_channel_ban(self):
self._test_remove_channel_ban()
def test_remove_room_ban(self):
self._test_remove_room_ban()
def test_was_banned_globally(self):
self._test_was_banned_globally()
def test_was_banned_from_room(self):
self._test_was_banned_from_room()
def test_was_banned_from_channel(self):
self._test_was_banned_from_channel()
def test_get_user_ban_status_channel(self):
self._test_get_user_ban_status_channel()
def test_get_user_ban_status_room(self):
self._test_get_user_ban_status_room()
def test_get_user_ban_status_global(self):
self._test_get_user_ban_status_global()
def test_get_banned_users_global_not_empty_after_ban(self):
self._test_get_banned_users_global_not_empty_after_ban()
def test_get_banned_users_global_is_empty(self):
self._test_get_banned_users_global_is_empty()
def test_get_banned_users_global_is_empty_if_expired(self):
self._test_get_banned_users_global_is_empty_if_expired()
def test_get_banned_users_channel_not_empty_after_ban(self):
self._test_get_banned_users_channel_not_empty_after_ban()
def test_get_banned_users_channel_is_empty(self):
self._test_get_banned_users_channel_is_empty()
def test_get_banned_users_channel_is_empty_if_expired(self):
self._test_get_banned_users_channel_is_empty_if_expired()
def test_get_banned_users_room_not_empty_after_ban(self):
self._test_get_banned_users_room_not_empty_after_ban()
def test_get_banned_users_room_is_empty(self):
self._test_get_banned_users_room_is_empty()
def test_get_banned_users_room_is_empty_if_expired(self):
self._test_get_banned_users_room_is_empty_if_expired()
def test_get_banned_users_is_empty(self):
self._test_get_banned_users_is_empty()
def test_get_banned_users_for_room(self):
self._test_get_banned_users_for_room()
def test_get_banned_users_for_channel(self):
self._test_get_banned_users_for_channel()
def test_get_banned_users_globally(self):
self._test_get_banned_users_globally()
def test_get_global_ban_timestamp_is_none(self):
self._test_get_global_ban_timestamp_is_none()
def test_get_global_ban_timestamp_not_none(self):
self._test_get_global_ban_timestamp_not_none()
def test_get_global_ban_timestamp_empty_if_expired(self):
self._test_get_global_ban_timestamp_not_empty_if_expired()
def test_get_channel_ban_timestamp_is_none(self):
self._test_get_channel_ban_timestamp_is_none()
def test_get_channel_ban_timestamp_not_none(self):
self._test_get_channel_ban_timestamp_not_none()
def test_get_channel_ban_timestamp_empty_if_expired(self):
self._test_get_channel_ban_timestamp_not_empty_if_expired()
def test_get_room_ban_timestamp_is_none(self):
self._test_get_room_ban_timestamp_is_none()
def test_get_room_ban_timestamp_not_none(self):
self._test_get_room_ban_timestamp_not_none()
def test_get_room_ban_timestamp_empty_if_expired(self):
self._test_get_room_ban_timestamp_not_empty_if_expired()
def test_get_acls_in_channel_for_action_no_channel(self):
self._test_get_acls_in_channel_for_action_no_channel()
def test_get_acls_in_channel_for_action_no_room(self):
self._test_get_acls_in_channel_for_action_no_room()
def test_get_all_acls_channel_is_empty(self):
self._test_get_all_acls_channel_is_empty()
def test_get_all_acls_channel_not_empty(self):
self._test_get_all_acls_channel_not_empty()
def test_get_all_acls_room_is_empty(self):
self._test_get_all_acls_room_is_empty()
def test_get_all_acls_room_not_empty(self):
self._test_get_all_acls_room_not_empty()
def test_channel_for_room_blank_room_id(self):
self._test_channel_for_room_blank_room_id()
def test_channel_for_room_before_create(self):
self._test_channel_for_room_before_create()
def test_channel_for_room_after_create(self):
self._test_channel_for_room_after_create()
def test_channel_for_room_cache(self):
self._test_channel_for_room_cache()
def test_get_username_before_set(self):
self._test_get_username_before_set()
def test_get_username_after_set(self):
self._test_get_username_after_set()
def test_rename_channel(self):
self._test_rename_channel()
def test_rename_channel_before_create(self):
self._test_rename_channel_before_create()
def test_rename_channel_empty_name(self):
self._test_rename_channel_empty_name()
def test_rename_room(self):
self._test_rename_room()
def test_rename_room_before_create_channel(self):
self._test_rename_room_before_create_channel()
def test_rename_room_before_create_room(self):
self._test_rename_room_before_create_room()
def test_rename_room_empty_name(self):
self._test_rename_room_empty_name()
def test_rename_room_already_exists(self):
self._test_rename_room_already_exists()
def test_remove_room(self):
self._test_remove_room()
def test_remove_room_before_create_channel(self):
self._test_remove_room_before_create_channel()
def test_remove_room_before_create_room(self):
self._test_remove_room_before_create_room()
def test_admin_room_for_channel_before_exists(self):
self._test_admin_room_before_exists()
def test_admin_room_for_channel_get_from_cache(self):
self._test_admin_room_get_from_cache()
def test_room_exists_from_cache(self):
self._test_room_exists_from_cache()
def test_get_user_status_from_cache(self):
self._test_get_user_status_from_cache()
def test_get_user_status_after_set(self):
self._test_get_user_status_after_set()
def test_set_user_invisible_twice_ignores_second(self):
self._test_set_user_invisible_twice_ignores_second()
def test_set_user_offline_twice_ignores_second(self):
self._test_set_user_offline_twice_ignores_second()
def test_set_user_online_twice_ignores_second(self):
self._test_set_user_online_twice_ignores_second()
def test_users_in_room_after_join(self):
self._test_users_in_room_after_join()
def test_set_user_offline_after_online(self):
self._test_set_user_offline_after_online()
def test_room_contains_before_create_channel(self):
self._test_room_contains_before_create_channel()
def test_room_contains_before_create_room(self):
self._test_room_contains_before_create_room()
def test_room_contains_after_create(self):
self._test_room_contains_after_create()
def test_room_contains_after_join(self):
self._test_room_contains_after_join()
def test_room_name_exists_from_cache_after_create(self):
self._test_room_name_exists_from_cache_after_create()
def test_rename_channel_exists(self):
self._test_rename_channel_exists()
def test_channel_for_room_from_cache(self):
self._test_channel_for_room_from_cache()
def test_leave_room_before_create(self):
self._test_leave_room_before_create()
def test_remove_moderator_twice(self):
self._test_remove_moderator_twice()
def test_set_owner_channel_after_removing_owner(self):
self._test_set_owner_channel_after_removing_owner()
def test_delete_acl_in_channel_for_action_invalid_action(self):
self._test_delete_acl_in_channel_for_action_invalid_action()
def test_delete_acl_in_room_for_action_invalid_action(self):
self._test_delete_acl_in_room_for_action_invalid_action()
def test_delete_acl_in_channel_for_action_after_create(self):
self._test_delete_acl_in_channel_for_action_after_create()
def test_delete_acl_in_room_for_action_after_create(self):
self._test_delete_acl_in_room_for_action_after_create()
def test_update_acl(self):
self._test_update_acl()
def test_get_all_acls_channel(self):
self._test_get_all_acls_channel()
def test_get_all_acls_channel_before_create(self):
self._test_get_all_acls_channel_before_create()
def test_get_all_acls_room(self):
self._test_get_all_acls_room()
def test_get_all_acls_room_before_create(self):
self._test_get_all_acls_room_before_create()
def test_update_last_read_for(self):
self._test_update_last_read_for()
def test_update_username(self):
self._test_update_username()
def test_get_room_name_from_cache(self):
self._test_get_room_name_from_cache()
def test_get_channel_name_from_cache(self):
self._test_get_channel_name_from_cache()
def test_is_banned_globally_after_clearing_cache(self):
self._test_is_banned_globally_after_clearing_cache()
def test_is_banned_globally_after_clearing_cache_if_expired(self):
self._test_is_banned_globally_after_clearing_cache_if_expired()
def test_is_banned_from_room_after_clearing_cache(self):
self._test_is_banned_from_room_after_clearing_cache()
def test_is_banned_from_room_after_clearing_cache_if_expired(self):
self._test_is_banned_from_room_after_clearing_cache_if_expired()
def test_is_banned_from_channel_after_clearing_cache(self):
self._test_is_banned_from_channel_after_clearing_cache()
def test_is_banned_from_channel_after_clearing_cache_if_expired(self):
self._test_is_banned_from_channel_after_clearing_cache_if_expired()
|
import pandas as pd
__author__ = 'slei'
class AddHeuristicTSP:
""" Finds the shortest path using a heuristic method """
def __init__(self, cities_df):
self.df = cities_df
self.edges = list((t.origin, t.destination) for t in df.itertuples())
self.distance = dict([((t.origin, t.destination), t.distance) for t in df.itertuples()])
self.cities = list(set(df['destination']))
self.cities_lst = []
self.tour_lst = []
self.distance_lst = []
self.tour_leg_distances_lst = []
self._final_df = None
self._shortest_distance = None
self._shortest_tour = None
def find_subtour(self, starting_city):
""" Given a starting city, finds a tour by selecting next shortest distance from list of unvisited cities """
tour = []
tour_distance_lst = [0]
cities_unvisited = list(set(self.df['destination']))
initial_city = starting_city
current_city = initial_city
tour.append(current_city)
cities_unvisited.pop(0)
total_distance = 0
count = 0
while len(cities_unvisited) > 0:
# remove any city that has already been visited from consideration
df_unvisited = self.df[self.df['destination'].isin(cities_unvisited)]
# filter for rows based on first criterion
is_current = df_unvisited['origin'] == current_city
df2 = df_unvisited[is_current]
# find the nearest city
index_min = df2['distance'].idxmin()
min_row = df2.loc[index_min]
d = min_row.distance
destination = min_row.destination
# update next city and tour and total distance
current_city = destination
total_distance = total_distance + d
tour_distance_lst.append(d)
# update city tracker lists
tour.append(current_city)
index_i = cities_unvisited.index(current_city)
cities_unvisited.pop(index_i)
count = count + 1
# check
print("next destination: ", destination)
print("distance: ", d)
print("total_distance: ", total_distance)
print("tour: ", tour)
print("tour_distance_lst: ", tour_distance_lst)
print("cities_unvisited: ", cities_unvisited)
print()
# adding the distance from last city back to initial city
last_city = tour[-1]
last_mile = (initial_city, last_city)
last_mile_distance = self.distance[last_mile]
tour.append(initial_city)
total_distance = total_distance + last_mile_distance
tour_distance_lst.append(last_mile_distance)
# check
print("last_mile: ", last_mile)
print("last_mile_distance: ", last_mile_distance)
print("tour: ", tour)
print("total_distance: ", total_distance)
print("tour_leg_distances_lst: ", tour_distance_lst)
# update lists
self.tour_lst.append(tour)
self.distance_lst.append(total_distance)
self.tour_leg_distances_lst.append(tour_distance_lst)
@property
def final_df(self):
""" Add description here"""
if self._final_df is None:
self._final_df = self._generate_final_df()
return self._final_df
def _generate_final_df(self):
for c in self.cities: # for every city in the dataset
print("city: ", c) # generate a tour for each
print("--------------------------------------------------------------------------------")
self.find_subtour(c)
print('********************************************************************************')
print()
soln_dict = {'city': self.cities, 'tour': self.tour_lst, 'tour_leg_distances': self.tour_leg_distances_lst,
'distance': self.distance_lst}
return pd.DataFrame(soln_dict)
@property
def shortest_distance(self):
""" Add description here"""
if self._shortest_distance is None:
return self._calculate_shortest_distance()
def _calculate_shortest_distance(self): # find the tour with the lowest distance
index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value
min_row_final = self.final_df.loc[index_min_final]
return min_row_final.distance
@property
def shortest_tour(self):
""" Add description here"""
if self._shortest_tour is None:
return self._generate_shortest_tour()
def _generate_shortest_tour(self):
index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value
min_row_final = self.final_df.loc[index_min_final]
return min_row_final.tour
# ********************************************************************************
# ********************************************************************************
if __name__ == '__main__':
df = pd.read_csv('city_data_add.csv')
tsp = AddHeuristicTSP(df)
tsp.final_df
print("final_df")
print(tsp.final_df)
print()
print("shortest_distance_final", tsp.shortest_distance)
print("shortest_tour_final", tsp.shortest_tour)
|
import numpy as np
from scipy.spatial.distance import euclidean
from typing import Union
import pandas
class CLOSE(object):
def __init__(self, data: pandas.DataFrame, measure: Union[str, callable] = 'mse', minPts: int = None, output: bool = False,
jaccard: bool = False, weighting: bool = False, exploitation_term: bool = False):
"""
Params:
data (pandas.DataFrame) - pandas dataframe with columns order 'object_id', 'time', 'cluster_id' containing cluster belongings,
features ..
Note: outliers should have negative labels/cluster_ids, these should be different for different times
Optional:
measure (str or callable) - for used quality measure, possible measures:
'sse', 'mse', 'mae', 'max', 'dbi', 'exploit'
minPts (int) - used minPts for density-based quality measure
output (boolean) - whether intermediate results should be printed
jaccard (boolean) - whether the jaccard index should be used for proportion
weighting (boolean) - whether the weighting function should be used for subsequence_score
exploitation_term (boolean) - whether the exploitation term should be included in CLOSE calculation
"""
self._data = data
self._column_names = data.columns.values
self._object_column_name = self._column_names[0]
self._time_column_name = self._column_names[1]
self._cluster_column_name = self._column_names[2]
self._jaccard = jaccard
self._weighting = weighting
self._exp_term = exploitation_term
self._minPts = minPts
self._output = output
self.pos_measures = {### Measures for Clusters
'sse': self.calc_sse, # NOTE: sse is not between 0 and 1
'mse': self.calc_mse, # NOTE: mse is only between 0 and 1, if data is normalized
'mae': self.calc_mae, # NOTE: mae is only between 0 and 1, if data is normalized
'max': self.calc_max_dist,
'dbi': self.calc_min_pts,
'None': self.return_zero,
### Measures for Time Clusterings
'exploit': self.calc_exploit_at_t}
if measure in self.pos_measures:
self.measure = self.pos_measures[measure]
elif callable(measure):
self.measure = measure
else:
self.measure = self.pos_measures['mse']
def rate_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
"""
Optional:
start_time (int) - time that should be considered as beginning
end_time (int) - time which should be rated up to
return_measures (boolean) - whether additional information such as average stability
and quality should be returned
Returns:
CLOSE score (float): rating of clustering regarding all clusters
(dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information
if 'return_measures' is True
"""
cluster_ratings = self.rate_clusters(start_time, end_time)
gr_clusters = self._data.groupby(self._cluster_column_name)
score = 0
avg_quality = 0
avg_stab = 0
for cluster in cluster_ratings:
cluster_objects = gr_clusters.get_group(cluster)[self._object_column_name].unique()
cluster_time = gr_clusters.get_group(cluster)[self._time_column_name].iloc[0]
feature_list = self.get_feature_list(cluster_objects, cluster_time)
measure = self.measure(feature_list)
avg_quality += measure
avg_stab += cluster_ratings[cluster]
score += (cluster_ratings[cluster] * (1 - measure))
num_clusters = len(cluster_ratings)
num_timestamps = self.get_num_timestamps(start_time, end_time)
if num_clusters <= 0:
if self._output:
print('Clustering has no Clusters!!')
return 0
avg_quality /= num_clusters
if self._output:
print('Average Quality: ', str(avg_quality))
avg_stab /= num_clusters
if self._output:
print('Average Stability: ', str(avg_stab))
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters)**2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': avg_stab,
'quality': avg_quality,
'pre-factor': (1 - (num_timestamps / num_clusters) ** 2)}
def rate_time_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]:
"""
Optional:
start_time (optional) - int: time that should be considered as beginning
end_time (optional) - int: time which should be rated up to
return_measures (boolean) - whether additional information such as average stability and quality should be returned
Returns:
CLOSE score (float) - rating of clustering regarding all time clusterings
(dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information
if 'return_measures' is True
"""
cluster_ratings = self.rate_clusters(start_time, end_time)
num_timestamps, timestamps = self.get_num_timestamps(start_time, end_time, return_timestamps=True)
score = 0
if return_measures:
quality = 0
stability = 0
for time in timestamps:
if not return_measures:
score += self.calc_t_clustering_rating(cluster_ratings, time)
else:
cur_scores = self.calc_t_clustering_rating(cluster_ratings, time, return_measures=True)
score += cur_scores['score']
quality += cur_scores['quality']
stability += cur_scores['stability']
if return_measures:
quality /= num_timestamps
stability /= num_timestamps
num_clusters = len(cluster_ratings)
if num_clusters <= 0:
if self._output:
print('Over-Time Clustering has no Clusters!!')
return 0
if self._exp_term:
exp_term = self.calc_exploit()
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term
else:
factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2)
if not return_measures:
return score * factor
else:
return {'stability_evaluation': score * factor,
'stability': stability,
'quality': quality,
'pre-factor': factor}
def calc_t_clustering_rating(self, cluster_ratings: dict, time: int, return_measures: bool = False) -> Union[float, dict]:
"""
Params:
cluster_ratings (dict) - {<object_id>: <rating>} with ratings of objects
time (int) - time that should be considered
Optional:
return_measures (boolean) - whether additional information such as average stability and quality should be returned
Output:
CLOSE score (float) - rating of clustering at considered time
(dict): with key 'score', 'stability', 'quality' with additional information if 'return_measures' is True
"""
avg_stab = 0
clusters_at_time = self._data[self._data[self._time_column_name] == time][self._cluster_column_name].unique()
clusters_at_time = np.delete(clusters_at_time, np.where(clusters_at_time < 0))
for cluster in clusters_at_time:
try:
avg_stab += cluster_ratings[cluster]
except:
continue
num_clusters = len(clusters_at_time)
if num_clusters <= 0:
if self._output:
print('Time Clustering at Time ', str(time), ' has no Clusters!!')
return 0
avg_stab /= num_clusters
if self._output:
print('Average Stability at Time ', str(time), ' : ', str(avg_stab))
quality = self.measure(time)
if self._output:
print('Quality of Clustering at Time ' , str(time), ' : ', str(quality))
t_clustering_score = avg_stab * quality
if not return_measures:
return t_clustering_score
else:
return {
'score': t_clustering_score,
'stability': avg_stab,
'quality': quality
}
def rate_clusters(self, start_time: int = None, end_time: int = None, id: Union[int, str, list] = None) -> dict:
"""
Optional:
start_time (int) - time that should be considered as beginning
end_time (int) - time which should be rated up to
id (int, str, list or None) - representing the cluster_ids that should be rated. If id is None,
all objects are rated
Returns:
ratings (dict) - {<cluster_id>: <rating>} with ratings of clusters
"""
ids_to_rate = self.get_ids_to_rate(id, self._cluster_column_name, start_time, end_time)
ids = ids_to_rate[:]
# don't rate outliers
for i in ids_to_rate:
if int(i) < 0:
ids.remove(i)
ratings = self.calc_cluster_rating(ids, start_time)
return ratings
def calc_cluster_rating(self, ids_to_rate: Union[list, np.ndarray], start_time: int = None) -> dict:
"""
Params:
ids_to_rate (array-like) - list of clusters that should be rated
Optional:
start_time (int) - time that should be considered as beginning
Returns:
ratings - dict {<cluster_id>: <rating>} with ratings of clusters
"""
if start_time is None:
start_time = np.min(self._data[self._time_column_name].unique())
ratings = {}
cluster_compositions = self.obtain_cluster_compositions()
gr_clusters = self._data.groupby(self._cluster_column_name)
# iterate over all cluster ids
for id in ids_to_rate:
time = gr_clusters.get_group(id)[self._time_column_name].iloc[0]
# rate the clusters of all timestamps except of the first one
if time != start_time:
num_merged_clusters = len(cluster_compositions[id])
obj_list = gr_clusters.get_group(id)[self._object_column_name].unique().tolist()
obj_ratings = self.calc_object_rating(cluster_compositions, obj_list, time)
score = 0
for obj in obj_ratings:
score += obj_ratings[obj]
try:
score /= len(obj_ratings)
except ZeroDivisionError:
if self._output:
print('Cluster ', str(id), ' has no non-outlier members.')
else:
continue
clusters = list(cluster_compositions[id].keys())
num_timestamps = len(self._data.loc[self._data[self._cluster_column_name].isin(clusters)]
[self._time_column_name].unique())
try:
div = num_merged_clusters / num_timestamps
score /= div
except ZeroDivisionError:
if self._output:
print("<<ZeroDivisionError - Cluster Score>> Cluster ID: ", str(id), " Merged Clusters: ", str(num_merged_clusters),
" Num Timestamps: ", str(num_timestamps))
else:
continue
ratings[id] = score
# clusters of the first timestamp have a stability of 1.0
else:
ratings[id] = 1.0
return ratings
def rate_object(self, id: Union[int, str, list] = None, start_time: int = None, end_time: int = None) -> dict:
"""
Optional:
id (int, str, list or None) - representing the data points that should be rated. If id is None,
all objects are rated
start_time (int) - time that should be considered as beginning
end_time (int) - representing the timestamp which should be rated up to
Returns:
ratings (dict) - {<object_id>: <rating>} with ratings of objects
"""
ids_to_rate = self.get_ids_to_rate(id, self._object_column_name)
if end_time is None:
end_time = np.max(self._data[self._time_column_name].unique())
cluster_compositions = self.obtain_cluster_compositions()
ratings = self.calc_object_rating(cluster_compositions, ids_to_rate, end_time, start_time)
return ratings
def calc_object_rating(self, cluster_composition: dict, ids_to_rate: Union[list, np.ndarray], end_time: int, start_time: int = None) -> dict:
"""
Params:
cluster_composition (dict) - {<cluster_id>: {<contained_cluster_id>: <proportion>}} containing the proportions of
clusters (contained_cluster_id) that belong to cluster (cluster_id)
ids_to_rate (array-like) - list of data points that should be rated
end_time (int) - representing the timestamp which should be rated up to
Optional:
start_time (int) - time that should be considered as beginning
Returns:
ratings - dict {<object_id>: <rating>} with ratings of objects
"""
ratings = {}
gr_clusters = self._data.groupby(self._object_column_name)
# iterate over object ids
for id in ids_to_rate:
cur_group = gr_clusters.get_group(id)
cur_group = cur_group[cur_group[self._time_column_name] <= end_time]
if start_time is not None:
cur_group = cur_group[cur_group[self._time_column_name] >= start_time]
try:
# id of the cluster of the last considered timestamp
last_cluster = cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name].iloc[
0]
except IndexError:
print(">>INDEXERROR - LAST CLUSTER<< ID: ", str(id), ", Start Time: ", str(start_time), ", End Time: ",
str(end_time))
continue
# if object is an outlier for the considered timestamp, it is skipped
if int(last_cluster) < 0:
continue
cluster_ids = cur_group[self._cluster_column_name].unique()
object_ratings = []
num_clusters = 0
has_outlier = False
for cluster in cluster_ids:
if cluster == last_cluster:
continue
# Add the proportion of clusters before last timestamp, that merged in last cluster
else:
# outliers get worst rating of 0.0
if int(cluster) < 0:
object_ratings.append(0.0)
has_outlier = True
else:
object_ratings.append(cluster_composition[last_cluster][cluster])
num_clusters += 1
if not has_outlier and len(object_ratings) == 0:
# print(str(id) + " has no data before t=" + str(end_time))
continue
if self._weighting:
try:
weighting_denominator = 0
for i in range(1, num_clusters + 1):
weighting_denominator += i
if num_clusters > 0:
object_rating = 0
for i in range(num_clusters):
object_rating += object_ratings[i] * ((i + 1) / weighting_denominator)
else:
continue
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
else:
try:
object_rating = np.sum(object_ratings)
object_rating /= num_clusters
except (TypeError, ZeroDivisionError):
# print(str(id) + " is not assigned to any cluster before t=" + str(end_time))
continue
ratings[id] = round(object_rating, 3)
return ratings
def calc_exploit(self) -> float:
"""
Returns:
exploitation_term (float) - exploitation term for whole clustering
"""
num_objects = len(self._data[self._object_column_name].unique())
num_no_outliers = len(self._data[self._data[self._cluster_column_name] >= 0][self._object_column_name].unique())
return num_no_outliers / num_objects
######## HELPER FUNCTIONS ########
def get_feature_list(self, objects: Union[list, np.ndarray], time: int) -> np.ndarray:
"""
Params:
objects (array-like) - list of objects_ids that belong to considered cluster
time (int) - time of cluster that is considered
Output:
feature_list (list) - list of lists containing the features of objects in the considered cluster
"""
feature_list = []
for obj in objects:
features = self._data[
(self._data[self._object_column_name] == obj) & (self._data[self._time_column_name] == time)]
try:
features = \
features.drop([self._object_column_name, self._cluster_column_name, self._time_column_name],
axis=1).iloc[0].tolist()
except IndexError:
print(">>INDEXERROR - FEATURE LIST<< ID: ", str(obj), ", Time: ", str(time))
continue
if len(features) <= 0:
print("No features found for object ", str(obj))
continue
feature_list.append(features)
return np.array(feature_list)
def get_num_timestamps(self, start_time: int, end_time: int, return_timestamps: bool = False) -> int:
"""
Params:
start_time (int) - first timestamp to be considered
end_time (int) - last timestamp to be considered
Optional:
return_timestamps (boolean) - list of all timestamps
Returns:
num_timestamps (int) - number of timestamps between start_time and end_time
"""
timestamp_list = self._data[self._time_column_name].unique()
if start_time is not None:
timestamp_list = [i for i in timestamp_list if i >= start_time]
if end_time is not None:
timestamp_list = [i for i in timestamp_list if i <= end_time]
num_timestamps = len(timestamp_list)
if not return_timestamps:
return num_timestamps
else:
return num_timestamps, timestamp_list
def get_ids_to_rate(self, id: Union[int, str, list], id_name: str, start_time: int = None, end_time: int = None) -> list:
"""
Params:
id (int, str, list or None) - representing the data points that should be rated. If id is None, all objects are rated
id_name (str) - either self._cluster_column_name or self._object_column_name, which ids to extract
Optional:
start_time (int) - first timestamp to be considered
end_time (int) - last timestamp to be considered
Returns:
ids_to_rate (list) - list of ids that should be rated
"""
if id is None:
data = self._data.copy()
if start_time is not None:
data = data[data[self._time_column_name] >= start_time]
if end_time is not None:
data = data[data[self._time_column_name] <= end_time]
ids_to_rate = data[id_name].unique().tolist()
elif isinstance(id, int) or isinstance(id, str):
ids_to_rate = [id]
elif isinstance(id, list):
ids_to_rate = id[:]
else:
raise Exception('id has to be int, str, list or None')
return ids_to_rate
def obtain_cluster_compositions(self) -> dict:
"""
Returns:
cluster_compositions (dict) - dict of dicts {<cluster_id>: {<cluster_id>: <proportion>}} with cluster compositions
Example:
{5: {1: 1.0, 2: 0.1, 4: 0.5}} describes that
100% of cluster 1, 10% of cluster 2 and 50% of cluster 4 belong to cluster 5
"""
cluster_compositions = {}
g_clusters = self._data.groupby([self._time_column_name, self._cluster_column_name])
if not self._jaccard:
cluster_members = self._data.groupby(self._cluster_column_name).count()
# iterate over all clusters - 'group' contains the time and cluster_id
# and 'objects' is the corresponding dataframe
for group, objects in g_clusters:
# Ignore outliers
if int(group[1]) < 0:
continue
objects = objects[self._object_column_name].values.tolist()
# temporal intersection
# select considered clusters with later timestamps than the current one to check which clusters the
# current one merged into and count, how many objects of the current cluster are in the considered clusters
# example of a series from the dataframe: [cluster_id, count] with [2, 10]
# meaning: 10 objects of the current cluster merged into the cluster with the id 2
temp_intersection = (self._data.loc[(self._data[self._object_column_name].isin(objects)) &
(self._data[self._time_column_name] > group[0])]).groupby(self._cluster_column_name).count()
# iterate over all clusters which the current cluster has merged into
# 'cluster' contains the cluster_id
# and 'con_objects' is the corresponding number of objects of the temporal intersection
for cluster, num_objects in temp_intersection.iterrows():
# Ignore outliers
if int(cluster) < 0:
continue
# for all considered clusters save the proportion of the current cluster that merged into the considered
# one
# example: {3: {2: 0.3}, 4: {2: 0.1}}
# meaning: 30% of (current) cluster 2 merged into (considered) cluster 3 and 10% into (considered) cluster 4
if cluster not in cluster_compositions:
cluster_compositions[cluster] = {}
if self._jaccard:
# cardinality of the union of both considered clusters
card_union = len(self._data.loc[(self._data[self._cluster_column_name] == cluster) |
(self._data[self._cluster_column_name] == group[1])]
[self._object_column_name].unique())
# jaccard distance
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(card_union), 3)
else:
cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) /
float(cluster_members.loc[group[1]].values[1]), 3)
if group[1] not in cluster_compositions:
cluster_compositions[group[1]] = {}
return cluster_compositions
######## QUALITY MEASURES ########
@staticmethod
def calc_sse(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
sse (float) - sum of squared errors to centroid of cluster
"""
centroid = np.average(feature_list, axis=0)
sse = np.sum(np.power(feature_list - centroid[None, :], 2))
return sse
def calc_mse(self, feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
mse (float) - mean squared error of cluster
"""
sse = self.calc_sse(feature_list)
return sse / len(feature_list)
@staticmethod
def calc_mae(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
mae (float) - mean average errors to centroid of cluster
"""
centroid = np.average(feature_list, axis=0)
mae = np.average(np.abs(feature_list - centroid[None, :]))
return mae
@staticmethod
def calc_max_dist(feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
max_dist (float) - maximal distance of cluster member to centroid of cluster
"""
max_dist = 0
for i in range(len(feature_list) - 1):
for j in range(i + 1, len(feature_list)):
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
if cur_dist > max_dist:
max_dist = cur_dist
max_dist /= 2 ** (1 / 2)
return max_dist
def calc_min_pts(self, feature_list: list) -> float:
"""
Params:
feature_list (list) - list of lists containing the features of objects in the considered cluster
Returns:
avg_dist (float) - average distance of cluster members to their minPts neighbor
"""
avg_dist = 0
for i in range(len(feature_list)):
dist_list = [10] * self._minPts
for j in range(len(feature_list)):
if i == j:
continue
cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j]))
for k in range(len(dist_list)):
if cur_dist < dist_list[k]:
dist_list.insert(k, cur_dist)
dist_list.pop(self._minPts)
avg_dist += dist_list[self._minPts - 1]
avg_dist /= len(feature_list)
return avg_dist
@staticmethod
def return_zero():
"""
Function is used if no quality measure should be used in CLOSE
This is the case when only the exploitation term is considered
Returns:
0
"""
return 0
def calc_exploit_at_t(self, time: int) -> float:
"""
Params:
time (int) - time to be considered
Returns:
rating (float) - exploitation rating of time clustering
"""
num_objects_at_t = len(self._data[self._data[self._time_column_name] == time][self._object_column_name].unique())
num_no_outliers = len(self._data[(self._data[self._time_column_name] == time) &
(self._data[self._cluster_column_name] >= 0)][self._object_column_name].unique())
return num_no_outliers / num_objects_at_t
|
import discord
from discord.ext import commands
from Modules import CONSTANT
from Modules.Checks import check_if_role_or_bot_spam
class Roles(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
@check_if_role_or_bot_spam()
async def role(self, ctx: commands.Context, role_type: str, *role_names):
"""
Add a role.
<role_type>: Use 'main', 'sub', or 'unit' to indicate which type of role you want.
Your main role will control your nametag colour.
[role_names...]: The name of the roles you want to add, names are not case-sensitive.
You can enter as many names as you want to.
Examples:
">role main Sally"
--- will add Sally as your main role and make your nametag yellow.
">role sub Mizzy"
--- will add Mizzy as a sub role without affecting your nametag colour.
Unit examples:
>role unit Cider
>role unit Bench
>role unit Keikoto
Unit roles work the same as sub roles, you can have many of them.
You can enter multiple role names for this command.
If you enter ">role main" with more than one role name, you will get the first valid role you entered.
Examples:
">role sub Sally Sakura Ruri Jun"
--- will add all these four sub roles to you.
">role main Sally Sakura Ruri Jun"
--- will only add Sally as the main role, if you had Sally as your main role, the operation does nothing.
Only the following roles may be added for 'main' and 'sub' roles:
Sally, Sakura, Ruri, Jun, Mizzy, Miyako, Kanaeru, Akane,
Nagomin, Miu, Meimei, Uta, Nicole, Chiharun, Reika,
Reinyan, Ayaka, Moe, Mikami, Rettan, Yuki, Ainacchi, Tsubomi,
Tamago, Gouda, Kaoruko, Nana, Miko, Komiya, Aida, Mukai
Only the following roles may be added for 'unit' roles:
>> Hareta Hi no Bench (use the word "Bench" to add),
>> Keikoto saisei keikaku (use the word "Keikoto"),
>> Ki no Nuketa Cider (use the word "Cider")
"""
role_names: list[str] = [x.capitalize() for x in role_names]
if not role_names:
await ctx.reply("Missing required arguments. ")
result_msgs: list[str] = []
if role_type in CONSTANT.ROLES_ID.keys():
for role_name in role_names:
if role_name in CONSTANT.GENERAL_ROLEABLES:
if role_type == "main":
role_ids: list[int] = [role.id for role in ctx.author.roles]
main_roles = list(set(role_ids) & set(CONSTANT.ROLES_ID["main"].values()))
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
elif main_roles:
result_msgs.append("You can't have more than one main role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
break
elif role_type == "sub":
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
elif role_name in CONSTANT.UNIT_ROLABLES:
if role_type == "unit": # verify that the type is actually unit
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name])
if role in ctx.author.roles:
result_msgs.append("You already have that role!")
else:
await ctx.author.add_roles(role)
result_msgs.append("Role added.")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
else:
result_msgs.append("Illegal role name. Type `>help role` for a list of acceptable role names. ")
else:
await ctx.reply("Illegal operation. Check your <role_type> input. ")
return
final_msg: str = ""
for name, result in zip(role_names, result_msgs):
final_msg += "**{}**: {} \n".format(name, result)
await ctx.reply(final_msg)
@commands.command()
@check_if_role_or_bot_spam()
async def unrole(self, ctx: commands.Context, role_type: str, *role_names):
"""
Delete a role.
<role_type>: Use 'main' or 'sub' to indicate which type of role you wish to delete.
If you delete your main role, your nametag colour will change to that of your highest sub role
until you add a new main role.
[role_names...]: The name of the role you want to delete, names are not case-sensitive.
You can enter as many names as you want to.
Example:
">unrole main Sally"
--- will remove Sally as your main role.
--- If you have Meimei as a sub role, your nametag colour will then be light blue
until you add a new main role.
Multiple role deletion works similarly as >role does, for more help, send ">help role".
"""
role_names: list[str] = [x.capitalize() for x in role_names]
if not role_names:
await ctx.reply("Missing required argument. ")
result_msgs: list[str] = []
for role_name in role_names:
if role_name in CONSTANT.GENERAL_ROLEABLES:
if role_type == 'main':
if role_name in CONSTANT.ROLES_ID["main"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name])
else:
result_msgs.append("Illegal role name for main roles. ")
continue
elif role_type == 'sub':
if role_name in CONSTANT.ROLES_ID["sub"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name])
else:
result_msgs.append("Illegal role name for sub roles. ")
continue
elif role_type == 'unit':
if role_name in CONSTANT.ROLES_ID["unit"].keys():
role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name])
else:
result_msgs.append("Illegal role name for unit roles. ")
continue
else:
await ctx.send("Invalid selection. ")
return
if role not in ctx.author.roles:
result_msgs.append("You don't have that role!")
else:
await ctx.author.remove_roles(role)
result_msgs.append("Role removed.")
else:
result_msgs.append("Illegal role name. Type `>help unrole` for a list of acceptable role names. ")
final_msg: str = ""
for name, result in zip(role_names, result_msgs):
final_msg += "**{}**: {} \n".format(name, result)
await ctx.reply(final_msg)
|
from django.db import models
from django.forms import ModelForm
from django.forms import TextInput
from .models import agendamento
#import datetime
#class frm_agendamento(forms.ModelForm):
#
# data_agendamento = forms.DateField(label="Data",initial=datetime.date.today)
# horario_inicio = forms.TimeField(label="Inicio",initial=datetime.datetime.now().strftime('%H:%M'))
# horario_fim = forms.TimeField(label="Fim", initial=datetime.datetime.now().strftime('%H:%M'))
#
# motivo = forms.CharField(
# label='Motivo', widget=forms.Textarea
# )
class frm_agendamento(ModelForm): #formulario baseado em modelo
class Meta:
model = agendamento
exclude = ('criado_em','google_link') #campo que nao sera usado no formulario
widgets = {
'data_agendamento': TextInput( attrs={'class':'form-control datepicker', 'data-date-format':'dd/mm/yyyy'}),
'horario_inicio': TextInput( attrs={'class':'form-control'}),
'horario_fim': TextInput( attrs={'class':'form-control'}),
'motivo': TextInput( attrs={'class':'form-control'})
}
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods for working with entity types in the ontology."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import typing
from typing import Optional, Tuple
from yamlformat.validator import base_lib
from yamlformat.validator import config_folder_lib
from yamlformat.validator import field_lib
from yamlformat.validator import findings_lib
ENTITY_TYPE_NAME_REGEX = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*(?:_[a-zA-Z0-9]+)*$')
FIELD_INCREMENT_STRIPPER_REGEX = re.compile(
r'(^[a-z][a-z0-9]*(?:_[a-z][a-z0-9]*)*)((?:_[0-9]+)+)$')
FieldParts = typing.NamedTuple('FieldParts',
[('namespace', str), ('field', str),
('increment', str)])
OptWrapper = typing.NamedTuple('OptWrapper', [('field', FieldParts),
('optional', bool)])
TypeParts = typing.NamedTuple('TypeParts', [('namespace', str),
('typename', str)])
EntityIdByEntry = typing.NamedTuple('EntityIdByEntry', [('namespace', str),
('typename', str)])
def SeparateFieldNamespace(qualified_field_name: str) -> Tuple[str, str]:
"""Returns the namespace and its field name as separate values or an Error.
Args:
qualified_field_name: a qualified field string like `HVAC/run_status`
Throws:
TypeError: if the field is not qualified
"""
fqf_parsed = qualified_field_name.split('/')
if len(fqf_parsed) == 1:
raise TypeError('Type improperly formatted, a namespace is missing: ',
fqf_parsed)
if len(fqf_parsed) > 2:
raise ValueError('Type improperly formatted, too many separators: ',
fqf_parsed)
return fqf_parsed[0], fqf_parsed[1]
def SeparateFieldIncrement(field_name) -> Tuple[str, str]:
"""Takes as an input a field_name (string) and returns a tuple of strings.
The first element is the standard field name and its increment when available.
For example: zone_occupancy_status_1 -> [zone_occupancy_status, 1]
Args:
field_name: the field name to parse.
Returns:
A tuple of string, the standard field name and its increment if available.
"""
field_name_part = field_name
increment_part = ''
match = FIELD_INCREMENT_STRIPPER_REGEX.match(field_name)
if match:
field_name_part = match.group(1)
increment_part = match.group(2)
return field_name_part, increment_part
class EntityTypeUniverse(findings_lib.Findings):
"""Helper class to represent the defined universe of EntityTypes.
Only contains valid EntityTypes.
Attributes;
namespace_folder_map: a map of namespace names to EntityTypeFolders.
type_namespaces_map: a map of type names to TypeNamespaces.
type_ids_map: maps type IDs to entity types. Contains all valid types w/IDs.
"""
def __init__(self, entity_type_folders):
"""Init.
Args:
entity_type_folders: list of EntityTypeFolder objects parsed from files.
"""
super(EntityTypeUniverse, self).__init__()
self.namespace_folder_map = {}
self.type_namespaces_map = {}
self.type_ids_map = {}
self._BuildNamespaceFolderMap(entity_type_folders)
self._BuildTypeMaps(
[folder.local_namespace for folder in entity_type_folders])
def GetEntityType(self, namespace_name, typename):
"""Finds entity_type by namespace and typename and returns it or None."""
if namespace_name not in self.type_namespaces_map:
return None
return self.type_namespaces_map[namespace_name].GetType(typename)
def GetNamespace(self, namespace_name):
"""Finds namespace in the universe by name and returns it or None."""
return self.type_namespaces_map.get(namespace_name, None)
def GetNamespaces(self):
"""Get the entity type namespace objects in this universe.
Returns:
A list of EntityTypeNamespace objects
"""
return list(self.type_namespaces_map.values())
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for folder in self.namespace_folder_map.values():
findings += folder.GetFindings(filter_old_warnings)
return findings
def _BuildTypeMaps(self, type_namespaces):
"""Creates a dict mapping namespace strings to TypeNamespace objects.
Sets the self.type_namespaces_map attribute of the class.
Args:
type_namespaces: a list of TypeNamespace objects.
Raises:
RuntimeError: if assumptions about internal data structures are violated.
"""
for type_namespace in type_namespaces:
self.type_namespaces_map[type_namespace.namespace] = type_namespace
for entity_type in type_namespace.valid_types_map.values():
if entity_type.uid:
if entity_type.uid in self.type_ids_map:
dup_id_entry = self.type_ids_map[entity_type.uid]
dup_id_type = self.GetEntityType(dup_id_entry.namespace,
dup_id_entry.typename)
if dup_id_type is None:
raise RuntimeError('Duplicate type with uid ' + entity_type.uid +
' should always be mapped')
entity_type.AddFinding(
findings_lib.DuplicateIdsError(type_namespace.namespace,
entity_type, dup_id_type))
dup_id_type.AddFinding(
findings_lib.DuplicateIdsError(dup_id_entry.namespace,
dup_id_type, entity_type))
self.type_ids_map[entity_type.uid] = EntityIdByEntry(
namespace=type_namespace.namespace, typename=entity_type.typename)
def _BuildNamespaceFolderMap(self, type_folders):
"""Creates a dict mapping namespace strings to EntityTypeFolder objects.
Sets the self.namespace_folder_map attribute of the class.
Args:
type_folders: a list of EntityTypeFolder objects.
"""
for folder in type_folders:
self.namespace_folder_map[folder.local_namespace.namespace] = folder
class EntityTypeFolder(config_folder_lib.ConfigFolder):
"""Class representing a namespace folder of entity types.
Class fully validates all entity types defined within the namespace folder,
collects issues found, and stores all valid entity types.
Attributes:
local_namespace: TypeNamespace object representing this namespace.
"""
def __init__(self, folderpath, field_universe=None):
"""Init.
Args:
folderpath: required string with full path to the folder containing entity
type files. Path should be relative to google3/ and have no leading or
trailing /.
field_universe: optional FieldsUniverse object.
"""
super(EntityTypeFolder, self).__init__(folderpath,
base_lib.ComponentType.ENTITY_TYPE)
self.local_namespace = TypeNamespace(self._namespace_name, field_universe)
def Finalize(self):
"""Call to complete entity creation after all types are added."""
self.local_namespace.QualifyParentNames()
def _AddFromConfigHelper(self, document, context):
for type_name in document:
new_type = self._ConstructType(type_name, document[type_name],
context.filepath)
self._AddType(new_type)
def _ConstructField(self, local_field_names, optional, output_array):
for qualified_field_name in local_field_names:
field_ns, raw_field_name = field_lib.SplitFieldName(qualified_field_name)
std_field_name, increment = SeparateFieldIncrement(raw_field_name)
# Field will look local if undefined, but we'll catch the error later
# Because we do explict existence checks and it will fail
# TODO(berkoben) refactor so validation happens in an order that
# prevents this logic lint
field_ns = self.local_namespace.GetQualifiedNamespace(
field_ns, std_field_name)
output_array.append(
OptWrapper(
field=FieldParts(
namespace=field_ns, field=std_field_name,
increment=increment),
optional=optional))
def _ConstructType(self, type_name, type_contents, filepath):
"""Reads a entity type config block and generates an EntityType object."""
description = ''
parents = None
local_field_names = None
opt_local_field_names = None
is_abstract = False
is_canonical = False
uid = None
expected_keys = set([
'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'id',
'is_canonical'
])
if 'description' in type_contents:
description = type_contents['description']
if 'implements' in type_contents:
parents = type_contents['implements']
if 'uses' in type_contents:
local_field_names = type_contents['uses']
if 'opt_uses' in type_contents:
opt_local_field_names = type_contents['opt_uses']
if 'is_abstract' in type_contents:
is_abstract = type_contents['is_abstract']
if 'is_canonical' in type_contents:
is_canonical = type_contents['is_canonical']
if 'id' in type_contents:
uid = type_contents['id']
# Generate tuples to represent each field
fq_lfn = []
if local_field_names:
self._ConstructField(local_field_names, False, fq_lfn)
if opt_local_field_names:
self._ConstructField(opt_local_field_names, True, fq_lfn)
entity_type = EntityType(
filepath=filepath,
typename=type_name,
description=description,
parents=parents,
local_field_tuples=fq_lfn,
is_abstract=is_abstract,
inherited_fields_expanded=False,
is_canonical=is_canonical,
uid=uid,
namespace=self.local_namespace)
# Add errors to type if there's anything extra in the block. We add to the
# entity type because an extra key here is likely a typo in a real key name
# that would result in information being lost from the type.
for key in type_contents:
if key not in expected_keys:
entity_type.AddFinding(
findings_lib.UnrecognizedKeyError(key, entity_type.file_context))
return entity_type
def _AddType(self, entity_type):
"""Adds entity_type if it is fully valid.
If formatting is correct, continues on to field validation.
Records all findings in object.
Args:
entity_type: EntityType object.
Returns:
True if the entity type was successfully validated and added. False
otherwise.
"""
if not entity_type.IsValid():
self.AddFindings(entity_type.GetFindings())
return False
return self.local_namespace.InsertType(entity_type)
class TypeNamespace(findings_lib.Findings):
"""Class representing a namespace of entity types.
Attributes:
namespace: string
valid_types_map: Dict mapping typename strings to EntityType objects.
"""
def __init__(self, namespace, field_universe=None):
super(TypeNamespace, self).__init__()
self.namespace = namespace
self._field_universe = field_universe
self.valid_types_map = {}
self._parents_qualified = False
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for entity_type in self.valid_types_map.values():
findings += entity_type.GetFindings(filter_old_warnings)
return findings
def GetType(self, typename):
return self.valid_types_map.get(typename, None)
def InsertType(self, entity_type):
"""Validate that declared fields are defined.
Adds type if valid and unique.
Findings for non-validated fields are applied to this TypeNamespace.
Args:
entity_type: entity to attempt to add.
Returns:
True if entity was added successfully.
Raises:
RuntimeError: if this is called after qualifying parent names
"""
if self._parents_qualified:
raise RuntimeError('Cannot add types after Qualifying parents')
if self._ValidateFields(entity_type):
typename = entity_type.typename
mapped_entity_type = self.valid_types_map.get(typename)
if mapped_entity_type is None:
self.valid_types_map[typename] = entity_type
return True
# entity_type is a duplicate type
self.AddFinding(
findings_lib.DuplicateEntityTypeDefinitionError(
self, entity_type, mapped_entity_type.file_context))
return False
return False
def GetQualifiedNamespace(self, field_ns, field_name):
"""Returns the namespace name for this field.
Args:
field_ns: namespace of field as parsed from the config
field_name: unqualified field name string
Returns:
The fully qualified field string.
"""
if not field_ns and self.IsLocalField(field_name):
return self.namespace
return field_ns
def _BuildQualifiedParentTuple(self, parent_name):
"""Creates the two-part parent tuple with a fully-qualified namespace.
Args:
parent_name: string as specified in the config file.
Returns:
A TypeParts tuple representing this parent.
"""
namespace_name = self.namespace
split = parent_name.split('/')
if len(split) != 2:
if not self.GetType(parent_name):
# parent is in the global namespace
namespace_name = ''
else:
namespace_name = split[0]
parent_name = split[1]
return TypeParts(namespace=namespace_name, typename=parent_name)
def QualifyParentNames(self):
"""Sets parents attribute of this namespace with fully qualified names."""
if self._parents_qualified:
return
for entity_type in self.valid_types_map.values():
fq_tuplemap = {}
for parent in entity_type.unqualified_parent_names:
fq_tuple = self._BuildQualifiedParentTuple(parent)
fq_name = '{0}/{1}'.format(fq_tuple.namespace, fq_tuple.typename)
fq_tuplemap[fq_name] = fq_tuple
entity_type.parent_names = fq_tuplemap
self._parents_qualified = True
def IsLocalField(self, field_name):
"""Returns true if this unqualified field is defined in the namespace.
Args:
field_name: an unqualified field name with no leading '/'
"""
if not self._field_universe:
return False
return self._field_universe.IsFieldDefined(field_name, self.namespace)
def _ValidateFields(self, entity):
"""Validates that all fields declared by entity are defined."""
# if field_universe is not defined just return true
if not self._field_universe:
return True
valid = True
for field_tuple in entity.local_field_names.values():
if not self._ValidateField(field_tuple.field, entity):
valid = False
return valid
def _ValidateField(self, field_tuple, entity):
"""Validates that field declared by entity is defined.
Field formatting has already been validated.
Findings are saved on the TypeNamespace.
Args:
field_tuple: tuple representing a fully qualified field
entity: EntityType
Returns:
True if field is defined.
"""
if not self._field_universe.IsFieldDefined(field_tuple.field,
field_tuple.namespace):
self.AddFinding(
findings_lib.UndefinedFieldError(entity, field_tuple.field))
return False
return True
def BuildQualifiedField(opt_tuple):
field_tuple = opt_tuple.field
return '{0}/{1}{2}'.format(field_tuple.namespace, field_tuple.field,
field_tuple.increment)
class EntityType(findings_lib.Findings):
"""Creates an EntityType object from a set of values describing the type.
Attributes:
file_context: FileContext object containing file info.
typename: string.
description: string.
parent_names: a list of parent typename strings.
local_field_names: the local set of standard field names
inherited_field_names: the set of inherited field names. Is always assigned
to an empty set at init, to be expanded later.
inherited_fields_expanded: boolean.
is_canonical: boolean indicating if this is a curated canonical type.
uid: the database ID string of this type if uploaded
namespace: a reference to the namespace object the entity belongs to
Returns:
An instance of the EntityType class.
"""
def __init__(self,
begin_line_number=0,
filepath='',
typename='',
description='',
parents=None,
local_field_tuples=None,
is_abstract=False,
inherited_fields_expanded=False,
is_canonical=False,
uid=None,
namespace=None):
"""Init.
Args:
begin_line_number: int. Starting line number for the entity type
definition.
filepath: string. google3 path to the file defining the type.
typename: required string.
description: required string.
parents: list of parent typename strings.
local_field_tuples: list of OptWrapper tuples
is_abstract: boolean indicating if this is an abstract type.
inherited_fields_expanded: boolean. Should be false at init.
is_canonical: boolean indicating if this is a curated canonical type.
uid: the database ID string of this type if uploaded
namespace: a reference to the namespace object the entity belongs to
"""
super(EntityType, self).__init__()
self.file_context = findings_lib.FileContext(
begin_line_number=begin_line_number, filepath=filepath)
self.typename = typename
self.description = description
self.namespace = namespace
self.local_field_names = {}
local_field_names = []
if local_field_tuples:
local_field_names = [
BuildQualifiedField(opt_parts) for opt_parts in local_field_tuples
]
for i, lfn in enumerate(local_field_names):
self.local_field_names[lfn] = local_field_tuples[i]
self.inherited_field_names = {}
self.inherited_fields_expanded = inherited_fields_expanded
if parents is None:
parents = []
self.parent_names = None
self.parent_name_tuples = None
self.unqualified_parent_names = parents
self._all_fields = None
self._has_optional_fields = None
self.is_abstract = is_abstract
self.is_canonical = is_canonical
self.uid = uid
# TODO(berkoben) update this method to use tuples if possible
self._ValidateType(local_field_names)
def HasOptionalFields(self, run_unsafe=False):
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type has not been expanded')
if self._has_optional_fields is not None:
return self._has_optional_fields
fields = self.GetAllFields()
for field in fields.values():
if field.optional:
self._has_optional_fields = True
return self._has_optional_fields
self._has_optional_fields = False
return self._has_optional_fields
def GetAllFields(self, run_unsafe=False):
"""Returns the expanded set of fields for this type.
Args:
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Returns:
A dictionary of fully qualified strings representing fields in the type to
OptWrapper tuples representing the contents of the field.
Raises:
RuntimeError: if fields have not yet been expanded.
"""
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type {0} has not been expanded'.format(self.typename))
if self._all_fields is None:
tmp = self.local_field_names.copy()
tmp.update(self.inherited_field_names)
if run_unsafe:
return tmp
self._all_fields = tmp
return self._all_fields
def HasFieldAsWritten(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> bool:
"""Returns true if a valid config file value maps to a field in the type.
Accepts a field name as written in a configuration file
referencing this type. The method applies context-aware namespace
omission (i.e. referencing a field without its namespace) to identify the
field regardless of the namespace and syntax variation.
Note: to minimize redundancy, this method simply wraps.
`GetFieldFromConfigText()`. If your application also needs the `Field` use
that method instead to eliminate redundant processing.
Args:
fieldname_as_written: string verbatim from a building or ontology config
run_unsafe: set true to allow calls before parent type fields are expanded
Returns:
True if the Field is defined on the type. False otherwise.
"""
return self.GetFieldFromConfigText(fieldname_as_written,
run_unsafe) is not None
def GetFieldFromConfigText(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
"""Returns `OptWrapper` provided string validates against the entity.
Accepts a field name as written in a configuration file
referencing this type. The method applies all shorthanding rules to identify
the field regardless of the namespace and syntax variation.
Args:
fieldname_as_written: string verbatim from a building or ontology config
run_unsafe: set true to allow calls before parent type fields are expanded
Returns:
`OptWrapper` if field is present, None otherwise
"""
try:
# Check the field as if it's fully qualified.
return self.GetField(fieldname_as_written, run_unsafe)
except TypeError:
pass
# Field is unqualified so it is either global or type-namespace-local
# Check for a locally defined field first using type's namespace
field = self._GetField(
self.namespace.namespace + '/' + fieldname_as_written, run_unsafe)
if not field:
# Check field as if it's in the global namespace
field = self._GetField('/' + fieldname_as_written, run_unsafe)
return field
def HasField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> bool:
"""Returns True if field string validates against the entity's fields.
Args:
fully_qualified_fieldname: a fully qualified names for example:
"HVAC/run_status_1".
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Throws:
TypeError: if the field is not fully qualified
"""
return self.GetField(fully_qualified_fieldname, run_unsafe) is not None
def GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
"""Returns `OptWrapper` if field string validates against the entity.
Args:
fully_qualified_fieldname: a fully qualified names for example:
"HVAC/run_status_1".
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Returns:
`OptWrapper` if field is present, None otherwise
Throws:
TypeError: if the field is not fully qualified
"""
# Throws an error in the case that this isn't a fully qualified field
_, _ = SeparateFieldNamespace(fully_qualified_fieldname)
return self._GetField(fully_qualified_fieldname, run_unsafe)
def _GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
return self.GetAllFields(run_unsafe).get(fully_qualified_fieldname)
def _ValidateType(self, local_field_names):
"""Validates that the entity type is formatted correctly.
Checks for formatting and duplicate fields and parents.
Records any errors found.
Args:
local_field_names: list of local field names for the type.
"""
# Make sure the typename is non-empty.
if not self.typename:
self.AddFinding(findings_lib.MissingTypenameError(self))
elif not isinstance(self.typename, str):
self.AddFinding(
findings_lib.IllegalKeyTypeError(self.typename, self.file_context))
elif not ENTITY_TYPE_NAME_REGEX.match(self.typename):
self.AddFinding(
findings_lib.InvalidTypenameError(self.typename, self.file_context))
# Make sure the type description is non-empty.
if not self.description:
self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self))
# Check for duplicate local fields.
# this check is case insensitive to catch dupes earlier in the event that
# we stop explicitly rejecting upper case characters
check_fields = set()
for field in local_field_names:
field_lower = field.lower()
if field_lower in check_fields:
self.AddFinding(findings_lib.DuplicateFieldError(self, field))
continue
check_fields.add(field_lower)
# TODO(berkoben): Add more checks to validate fields in isolation
# (in case we don't have a field set to check against)
# (i.e. check for chirality, formatting. Could use actual Field objects)
# Check formatting of field name
if len(field.split('/')) > 2:
self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field))
# Check for duplicate parent names.
parent_names_check = set()
for parent_name in self.unqualified_parent_names:
if parent_name in parent_names_check:
self.AddFinding(findings_lib.DuplicateParentError(self, parent_name))
continue
parent_names_check.add(parent_name)
# Check formatting of parent name
if len(parent_name.split('/')) > 2:
self.AddFinding(
findings_lib.UnrecognizedParentFormatError(self, parent_name))
# Enforce that the inherited_fields_expanded field is not set
if self.inherited_fields_expanded:
self.AddFinding(findings_lib.InheritedFieldsSetError(self))
|
# a = 2
print("check this file")
|
# technical
from .base_output import BaseOutput
# default
from .matplotlib_plot import MatplotlibPlot
from .extrema_printer import ExtremaPrinter
# with external dependencies
# import are respective __init__ methods
# hack-ish, but works (and I am not aware of a more proper way to do so)
from .bokeh_plot import BokehPlot
from .neptune_logger import NeptuneLogger
from .tensorboard_logger import TensorboardLogger
from .tensorboard_tf_logger import TensorboardTFLogger
# with external dependencies
from . import matplotlib_subplots
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Sikacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import SikacoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
class WalletHDTest(SikacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(tmpdir + "/node1/regtest/blocks")
shutil.rmtree(tmpdir + "/node1/regtest/chainstate")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
|
import torch.nn as nn
import torch
from torch.autograd import Variable
import math
import torch.utils.model_zoo as model_zoo
from commons.siam_mask.models.features import Features
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(Features):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# padding = (2 - stride) + (dilation // 2 - 1)
padding = 2 - stride
assert stride==1 or dilation==1, "stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if out.size() != residual.size():
print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, layer4=False, layer3=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 31x31, 15x15
self.feature_size = 128 * block.expansion
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x:x # identity
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x:x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
# layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dd))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
p0 = self.relu(x)
x = self.maxpool(p0)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
return p0, p1, p2, p3
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == '__main__':
net = resnet50()
print(net)
net = net.cuda()
var = torch.FloatTensor(1,3,127,127).cuda()
var = Variable(var)
net(var)
print('*************')
var = torch.FloatTensor(1,3,255,255).cuda()
var = Variable(var)
net(var)
|
import tensorflow as tf
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = get_available_gpus
def split_nest(nest, num_or_size_splits, axis=0):
"""Split nested structure.
Examples
--------
>>> split_nest({'a': shape(10, 20), 'b': shape(4, 15)}, 2, axis=0)
>>> [{'a': shape(5, 20), 'b': shape(2, 15)}, {'a': shape(5, 20), 'b': shape(2, 15)}]
"""
flatten = tf.nest.flatten(nest)
split_flatten = [tf.split(x, num_or_size_splits, axis=axis) for x in flatten]
return [tf.nest.pack_sequence_as(nest, x) for x in zip(*split_flatten)]
def parameter_server_strategy_run(devices, fn, split_args, split_kwargs=None):
split_kwargs = [{}] * len(devices) if split_kwargs is None else split_kwargs
assert len(devices) == len(split_args) == len(split_kwargs)
split_returns = []
for device, args, kwargs in zip(devices, split_args, split_kwargs):
with tf.device(device):
args = args if isinstance(args, (list, tuple)) else (args,)
split_returns.append(fn(*args, **kwargs))
return split_returns
parellel_run = parameter_server_strategy_run
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Parameters
----------
tower_grads:
List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns
-------
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
# import gevent.monkey
# gevent.monkey.patch_socket()
from pyEtherCAT import MasterEtherCAT
import time
import os
#============================================================================#
# C95用の簡易EtherCATパッケージです。
# 本来は細かいパケットに付いて理解を深めた上で仕組みを構築していきますが、
# 説明も実験も追いつかず、ひとまずGPIOで高速にON/OFF出来る部分だけを纏めました。
# 動作は Linux(RaspberryPi含む) にて Python3 で動作します。
# sudo python3 test03.py
#============================================================================#
# ここから簡易ライブラリ
#============================================================================#
def EtherCAT_Init(nic):
cat = MasterEtherCAT.MasterEtherCAT(nic) # ネットワークカードのアドレスを記載
return cat
def EtherCAT_SetUp(cat):
cat.EEPROM_SetUp(cat.ADP) # EEPROMの設定、特に変更不要
cat.EEPROM_Stasus(enable=0x00, command=0x04) # EEPROMの設定、特に変更不要
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0002 # 2h: 動作前ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0002 # 2h: 動作前ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0004 # 4h: 安全動作ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
ADDR = 0x0120 # AL 制御レジスタ
data = 0x0008 # 8h: 動作ステートを要求する
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIOMode(cat, data):
ADDR = 0x0F00 # デジタル I/O 出力データレジスタ
# data = 0x00FF # 出力データ
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[
data & 0xFF, (data >> 8) & 0xFF])
(DATA, WKC) = cat.socket_read()
print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8))
def EtherCAT_GPIO_Out(cat, data):
ADDR = 0x0F10
cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[data & 0xFF, (data >> 8) & 0xFF])
#(DATA,WKC) = cat.socket_read()
#============================================================================#
# ここまで 簡易ライブラリ
#============================================================================#
def main():
cat = EtherCAT_Init("eth0") # EtherCATのネットワーク初期設定
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 # PCから1台目は0、2台目以降は-1していく
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 - 1 # 例 これは2台目 繋がってなければ必要ない
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
#-- EtherCATのステートマシンを実行に移す処理
cat.ADP = 0x0000 - 2 # 例 これは3台目 繋がってなければ必要ない
EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定
EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力
# -- 1台目のLEDをシフトする
TIME = 0.1
cat.ADP = 0x0000
flag = 0
CNT = 0
try:
while 1:
# time.sleep(TIME)
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0xFFFF)
time.sleep(TIME)
# for i in range(16):
# time.sleep(TIME)
# EtherCAT_GPIO_Out(cat,0x0001<<i);
# for i in range(3):
cat.ADP = 0x0000 - 0
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 1
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
cat.ADP = 0x0000 - 2
EtherCAT_GPIO_Out(cat, 0x0000)
time.sleep(TIME)
# EtherCAT_GPIO_Out(cat,0x0000);
# for i in range(0xFFFF):
# EtherCAT_GPIO_Out(cat,i);
except KeyboardInterrupt:
EtherCAT_GPIO_Out(cat, 0x0000)
print("")
print("End.")
if __name__ == "__main__":
main()
|
"""Porcupine is a simple editor.
You are probably reading this because you want to learn how Porcupine
works or write fun plugins for it. I recommend getting started with the
plugin API documentation:
https://akuli.github.io/porcupine/
"""
import sys
import appdirs
version_info = (0, 99, 2) # this is updated with scripts/release.py
__version__ = "%d.%d.%d" % version_info
__author__ = "Akuli"
__copyright__ = "Copyright (c) 2017-2022 Akuli"
__license__ = "MIT"
if sys.platform in {"win32", "darwin"}:
# these platforms like path names like "Program Files" or "Application Support"
dirs = appdirs.AppDirs("Porcupine", "Akuli")
else:
dirs = appdirs.AppDirs("porcupine", "akuli")
# Must be after creating dirs
from porcupine import _state
# TODO: document get_*_panedwindow
get_main_window = _state.get_main_window
get_parsed_args = _state.get_parsed_args
get_horizontal_panedwindow = _state.get_horizontal_panedwindow
get_vertical_panedwindow = _state.get_vertical_panedwindow
get_tab_manager = _state.get_tab_manager
filedialog_kwargs = _state.filedialog_kwargs
quit = _state.quit
|
from .PCA import PCA
from .InvariantsMiner import InvariantsMiner
from .LogClustering import LogClustering
from .LR import LR
from .SVM import SVM
from .DecisionTree import DecisionTree
from .IsolationForest import IsolationForest
from .DeepLog import DeepLog
from .Autoencoder import Autoencoder
from .AutoencoderLSTM import AutoencoderLSTM
from .AutoencoderCascade import AutoencoderCascade
from .AutoencoderConvolutional import AutoencoderConv
|
import abc
class AbstractClassifier:
""" Abstract class with specific methods for classifier models (training, validation and test) """
def __init__(self):
pass
@abc.abstractmethod
def train(self, config, train_data):
"""
Classifier training.
:param config: Model configuration.
:param train_data: Train dataset with the textual information of each item and its label.
:return: A model trained with train_data according to config.
"""
pass
@abc.abstractmethod
def validation(self, config, val_data):
"""
:param config: Model configuration.
:param val_data: Validation dataset with the textual information of each item and its label.
:return: Validation metrics
"""
pass
@abc.abstractmethod
def test(self, config, test_data):
"""
Classifier testing.
:param config: Model configuration.
:param test_data: Test dataset with the textual information of each item and its label.
:return: Predictions of the model in the test_data, according to config.
"""
pass
|
import frappe
def execute():
# there is no more status called "Submitted", there was an old issue that used
# to set it as Submitted, fixed in this commit
frappe.db.sql("""
update
`tabPurchase Receipt`
set
status = 'To Bill'
where
status = 'Submitted'""")
|
a = <warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5 do not support backquotes, use repr() instead">`imp.acquire_lock()`</warning>
|
import os
import unittest
import tempfile
from unittest import mock
import uuid
import mlflow
import mlflow.db
import mlflow.store.db.base_sql_model
from mlflow.entities.model_registry import (
RegisteredModel,
ModelVersion,
RegisteredModelTag,
ModelVersionTag,
)
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import (
ErrorCode,
RESOURCE_DOES_NOT_EXIST,
INVALID_PARAMETER_VALUE,
RESOURCE_ALREADY_EXISTS,
)
from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore
from tests.helper_functions import random_str
DB_URI = "sqlite:///"
class TestSqlAlchemyStoreSqlite(unittest.TestCase):
def _get_store(self, db_uri=""):
return SqlAlchemyStore(db_uri)
def setUp(self):
self.maxDiff = None # print all differences on assert failures
fd, self.temp_dbfile = tempfile.mkstemp()
# Close handle immediately so that we can remove the file later on in Windows
os.close(fd)
self.db_url = "%s%s" % (DB_URI, self.temp_dbfile)
self.store = self._get_store(self.db_url)
def tearDown(self):
mlflow.store.db.base_sql_model.Base.metadata.drop_all(self.store.engine)
os.remove(self.temp_dbfile)
def _rm_maker(self, name, tags=None, description=None):
return self.store.create_registered_model(name, tags, description)
def _mv_maker(
self,
name,
source="path/to/source",
run_id=uuid.uuid4().hex,
tags=None,
run_link=None,
description=None,
):
return self.store.create_model_version(
name, source, run_id, tags, run_link=run_link, description=description
)
def _extract_latest_by_stage(self, latest_versions):
return {mvd.current_stage: mvd.version for mvd in latest_versions}
def test_create_registered_model(self):
name = random_str() + "abCD"
rm1 = self._rm_maker(name)
self.assertEqual(rm1.name, name)
self.assertEqual(rm1.description, None)
# error on duplicate
with self.assertRaisesRegex(
MlflowException, rf"Registered Model \(name={name}\) already exists"
) as exception_context:
self._rm_maker(name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS)
# slightly different name is ok
for name2 in [name + "extra", name.lower(), name.upper(), name + name]:
rm2 = self._rm_maker(name2)
self.assertEqual(rm2.name, name2)
# test create model with tags
name2 = random_str() + "tags"
tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
rm2 = self._rm_maker(name2, tags)
rmd2 = self.store.get_registered_model(name2)
self.assertEqual(rm2.name, name2)
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in tags})
self.assertEqual(rmd2.name, name2)
self.assertEqual(rmd2.tags, {tag.key: tag.value for tag in tags})
# create with description
name3 = random_str() + "-description"
description = "the best model ever"
rm3 = self._rm_maker(name3, description=description)
rmd3 = self.store.get_registered_model(name3)
self.assertEqual(rm3.name, name3)
self.assertEqual(rm3.description, description)
self.assertEqual(rmd3.name, name3)
self.assertEqual(rmd3.description, description)
# invalid model name will fail
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self._rm_maker(None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self._rm_maker("")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_get_registered_model(self):
name = "model_1"
tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
# use fake clock
with mock.patch("time.time") as mock_time:
mock_time.return_value = 1234
rm = self._rm_maker(name, tags)
self.assertEqual(rm.name, name)
rmd = self.store.get_registered_model(name=name)
self.assertEqual(rmd.name, name)
self.assertEqual(rmd.creation_timestamp, 1234000)
self.assertEqual(rmd.last_updated_timestamp, 1234000)
self.assertEqual(rmd.description, None)
self.assertEqual(rmd.latest_versions, [])
self.assertEqual(rmd.tags, {tag.key: tag.value for tag in tags})
def test_update_registered_model(self):
name = "model_for_update_RM"
rm1 = self._rm_maker(name)
rmd1 = self.store.get_registered_model(name=name)
self.assertEqual(rm1.name, name)
self.assertEqual(rmd1.description, None)
# update description
rm2 = self.store.update_registered_model(name=name, description="test model")
rmd2 = self.store.get_registered_model(name=name)
self.assertEqual(rm2.name, "model_for_update_RM")
self.assertEqual(rmd2.name, "model_for_update_RM")
self.assertEqual(rmd2.description, "test model")
def test_rename_registered_model(self):
original_name = "original name"
new_name = "new name"
self._rm_maker(original_name)
self._mv_maker(original_name)
self._mv_maker(original_name)
rm = self.store.get_registered_model(original_name)
mv1 = self.store.get_model_version(original_name, 1)
mv2 = self.store.get_model_version(original_name, 2)
self.assertEqual(rm.name, original_name)
self.assertEqual(mv1.name, original_name)
self.assertEqual(mv2.name, original_name)
# test renaming registered model also updates its model versions
self.store.rename_registered_model(original_name, new_name)
rm = self.store.get_registered_model(new_name)
mv1 = self.store.get_model_version(new_name, 1)
mv2 = self.store.get_model_version(new_name, 2)
self.assertEqual(rm.name, new_name)
self.assertEqual(mv1.name, new_name)
self.assertEqual(mv2.name, new_name)
# test accessing the model with the old name will fail
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={original_name} not found"
) as exception_context:
self.store.get_registered_model(original_name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# test name another model with the replaced name is ok
self._rm_maker(original_name)
# cannot rename model to conflict with an existing model
with self.assertRaisesRegex(
MlflowException, rf"Registered Model \(name={original_name}\) already exists"
) as exception_context:
self.store.rename_registered_model(new_name, original_name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS)
# invalid model name will fail
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.rename_registered_model(original_name, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.rename_registered_model(original_name, "")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_registered_model(self):
name = "model_for_delete_RM"
self._rm_maker(name)
self._mv_maker(name)
rm1 = self.store.get_registered_model(name=name)
mv1 = self.store.get_model_version(name, 1)
self.assertEqual(rm1.name, name)
self.assertEqual(mv1.name, name)
# delete model
self.store.delete_registered_model(name=name)
# cannot get model
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.get_registered_model(name=name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot update a delete model
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.update_registered_model(name=name, description="deleted")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot delete it again
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name} not found"
) as exception_context:
self.store.delete_registered_model(name=name)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# model versions are cascade deleted with the registered model
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name}, version=1\) not found"
) as exception_context:
self.store.get_model_version(name, 1)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def _list_registered_models(self, page_token=None, max_results=10):
result = self.store.list_registered_models(max_results, page_token)
for idx in range(len(result)):
result[idx] = result[idx].name
return result
def test_list_registered_model(self):
self._rm_maker("A")
registered_models = self.store.list_registered_models(max_results=10, page_token=None)
self.assertEqual(len(registered_models), 1)
self.assertEqual(registered_models[0].name, "A")
self.assertIsInstance(registered_models[0], RegisteredModel)
self._rm_maker("B")
self.assertEqual(set(self._list_registered_models()), set(["A", "B"]))
self._rm_maker("BB")
self._rm_maker("BA")
self._rm_maker("AB")
self._rm_maker("BBC")
self.assertEqual(
set(self._list_registered_models()), set(["A", "B", "BB", "BA", "AB", "BBC"])
)
# list should not return deleted models
self.store.delete_registered_model(name="BA")
self.store.delete_registered_model(name="B")
self.assertEqual(set(self._list_registered_models()), set(["A", "BB", "AB", "BBC"]))
def test_list_registered_model_paginated_last_page(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test flow with fixed max_results
returned_rms = []
result = self._list_registered_models(page_token=None, max_results=25)
returned_rms.extend(result)
while result.token:
result = self._list_registered_models(page_token=result.token, max_results=25)
self.assertEqual(len(result), 25)
returned_rms.extend(result)
self.assertEqual(result.token, None)
self.assertEqual(set(rms), set(returned_rms))
def test_list_registered_model_paginated_returns_in_correct_order(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test that pagination will return all valid results in sorted order
# by name ascending
result = self._list_registered_models(max_results=5)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[0:5])
result = self._list_registered_models(page_token=result.token, max_results=10)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[5:15])
result = self._list_registered_models(page_token=result.token, max_results=20)
self.assertNotEqual(result.token, None)
self.assertEqual(result, rms[15:35])
result = self._list_registered_models(page_token=result.token, max_results=100)
# assert that page token is None
self.assertEqual(result.token, None)
self.assertEqual(result, rms[35:])
def test_list_registered_model_paginated_errors(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test that providing a completely invalid page token throws
with self.assertRaisesRegex(
MlflowException, r"Invalid page token, could not base64-decode"
) as exception_context:
self._list_registered_models(page_token="evilhax", max_results=20)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test that providing too large of a max_results throws
with self.assertRaisesRegex(
MlflowException, r"Invalid value for request parameter max_results"
) as exception_context:
self._list_registered_models(page_token="evilhax", max_results=1e15)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# list should not return deleted models
self.store.delete_registered_model(name="RM{0:03}".format(0))
self.assertEqual(set(self._list_registered_models(max_results=100)), set(rms[1:]))
def test_get_latest_versions(self):
name = "test_for_latest_versions"
self._rm_maker(name)
rmd1 = self.store.get_registered_model(name=name)
self.assertEqual(rmd1.latest_versions, [])
mv1 = self._mv_maker(name)
self.assertEqual(mv1.version, 1)
rmd2 = self.store.get_registered_model(name=name)
self.assertEqual(self._extract_latest_by_stage(rmd2.latest_versions), {"None": 1})
# add a bunch more
mv2 = self._mv_maker(name)
self.assertEqual(mv2.version, 2)
self.store.transition_model_version_stage(
name=mv2.name, version=mv2.version, stage="Production", archive_existing_versions=False
)
mv3 = self._mv_maker(name)
self.assertEqual(mv3.version, 3)
self.store.transition_model_version_stage(
name=mv3.name, version=mv3.version, stage="Production", archive_existing_versions=False
)
mv4 = self._mv_maker(name)
self.assertEqual(mv4.version, 4)
self.store.transition_model_version_stage(
name=mv4.name, version=mv4.version, stage="Staging", archive_existing_versions=False
)
# test that correct latest versions are returned for each stage
rmd4 = self.store.get_registered_model(name=name)
self.assertEqual(
self._extract_latest_by_stage(rmd4.latest_versions),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=[])),
{"None": 1, "Production": 3, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["Production"])
),
{"Production": 3},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["production"])
),
{"Production": 3},
) # The stages are case insensitive.
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["pROduction"])
),
{"Production": 3},
) # The stages are case insensitive.
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["None", "Production"])
),
{"None": 1, "Production": 3},
)
# delete latest Production, and should point to previous one
self.store.delete_model_version(name=mv3.name, version=mv3.version)
rmd5 = self.store.get_registered_model(name=name)
self.assertEqual(
self._extract_latest_by_stage(rmd5.latest_versions),
{"None": 1, "Production": 2, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)),
{"None": 1, "Production": 2, "Staging": 4},
)
self.assertEqual(
self._extract_latest_by_stage(
self.store.get_latest_versions(name=name, stages=["Production"])
),
{"Production": 2},
)
def test_set_registered_model_tag(self):
name1 = "SetRegisteredModelTag_TestMod"
name2 = "SetRegisteredModelTag_TestMod 2"
initial_tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
self._rm_maker(name1, initial_tags)
self._rm_maker(name2, initial_tags)
new_tag = RegisteredModelTag("randomTag", "not a random value")
self.store.set_registered_model_tag(name1, new_tag)
rm1 = self.store.get_registered_model(name=name1)
all_tags = initial_tags + [new_tag]
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags})
# test overriding a tag with the same key
overriding_tag = RegisteredModelTag("key", "overriding")
self.store.set_registered_model_tag(name1, overriding_tag)
all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag]
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags})
# does not affect other models with the same key
rm2 = self.store.get_registered_model(name=name2)
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags})
# can not set tag on deleted (non-existed) registered model
self.store.delete_registered_model(name1)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name1} not found"
) as exception_context:
self.store.set_registered_model_tag(name1, overriding_tag)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# test cannot set tags that are too long
long_tag = RegisteredModelTag("longTagKey", "a" * 5001)
with self.assertRaisesRegex(
MlflowException,
r"Registered model value '.+' had length \d+, which exceeded length limit of 5000",
) as exception_context:
self.store.set_registered_model_tag(name2, long_tag)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test can set tags that are somewhat long
long_tag = RegisteredModelTag("longTagKey", "a" * 4999)
self.store.set_registered_model_tag(name2, long_tag)
# can not set invalid tag
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.set_registered_model_tag(name2, RegisteredModelTag(key=None, value=""))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.set_registered_model_tag(None, RegisteredModelTag(key="key", value="value"))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_registered_model_tag(self):
name1 = "DeleteRegisteredModelTag_TestMod"
name2 = "DeleteRegisteredModelTag_TestMod 2"
initial_tags = [
RegisteredModelTag("key", "value"),
RegisteredModelTag("anotherKey", "some other value"),
]
self._rm_maker(name1, initial_tags)
self._rm_maker(name2, initial_tags)
new_tag = RegisteredModelTag("randomTag", "not a random value")
self.store.set_registered_model_tag(name1, new_tag)
self.store.delete_registered_model_tag(name1, "randomTag")
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {tag.key: tag.value for tag in initial_tags})
# testing deleting a key does not affect other models with the same key
self.store.delete_registered_model_tag(name1, "key")
rm1 = self.store.get_registered_model(name=name1)
rm2 = self.store.get_registered_model(name=name2)
self.assertEqual(rm1.tags, {"anotherKey": "some other value"})
self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags})
# delete tag that is already deleted does nothing
self.store.delete_registered_model_tag(name1, "key")
rm1 = self.store.get_registered_model(name=name1)
self.assertEqual(rm1.tags, {"anotherKey": "some other value"})
# can not delete tag on deleted (non-existed) registered model
self.store.delete_registered_model(name1)
with self.assertRaisesRegex(
MlflowException, rf"Registered Model with name={name1} not found"
) as exception_context:
self.store.delete_registered_model_tag(name1, "anotherKey")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# can not delete tag with invalid key
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.delete_registered_model_tag(name2, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.delete_registered_model_tag(None, "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_create_model_version(self):
name = "test_for_update_MV"
self._rm_maker(name)
run_id = uuid.uuid4().hex
with mock.patch("time.time") as mock_time:
mock_time.return_value = 456778
mv1 = self._mv_maker(name, "a/b/CD", run_id)
self.assertEqual(mv1.name, name)
self.assertEqual(mv1.version, 1)
mvd1 = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.version, 1)
self.assertEqual(mvd1.current_stage, "None")
self.assertEqual(mvd1.creation_timestamp, 456778000)
self.assertEqual(mvd1.last_updated_timestamp, 456778000)
self.assertEqual(mvd1.description, None)
self.assertEqual(mvd1.source, "a/b/CD")
self.assertEqual(mvd1.run_id, run_id)
self.assertEqual(mvd1.status, "READY")
self.assertEqual(mvd1.status_message, None)
self.assertEqual(mvd1.tags, {})
# new model versions for same name autoincrement versions
mv2 = self._mv_maker(name)
mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version)
self.assertEqual(mv2.version, 2)
self.assertEqual(mvd2.version, 2)
# create model version with tags return model version entity with tags
tags = [ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value")]
mv3 = self._mv_maker(name, tags=tags)
mvd3 = self.store.get_model_version(name=mv3.name, version=mv3.version)
self.assertEqual(mv3.version, 3)
self.assertEqual(mv3.tags, {tag.key: tag.value for tag in tags})
self.assertEqual(mvd3.version, 3)
self.assertEqual(mvd3.tags, {tag.key: tag.value for tag in tags})
# create model versions with runLink
run_link = "http://localhost:3000/path/to/run/"
mv4 = self._mv_maker(name, run_link=run_link)
mvd4 = self.store.get_model_version(name, mv4.version)
self.assertEqual(mv4.version, 4)
self.assertEqual(mv4.run_link, run_link)
self.assertEqual(mvd4.version, 4)
self.assertEqual(mvd4.run_link, run_link)
# create model version with description
description = "the best model ever"
mv5 = self._mv_maker(name, description=description)
mvd5 = self.store.get_model_version(name, mv5.version)
self.assertEqual(mv5.version, 5)
self.assertEqual(mv5.description, description)
self.assertEqual(mvd5.version, 5)
self.assertEqual(mvd5.description, description)
# create model version without runId
mv6 = self._mv_maker(name, run_id=None)
mvd6 = self.store.get_model_version(name, mv6.version)
self.assertEqual(mv6.version, 6)
self.assertEqual(mv6.run_id, None)
self.assertEqual(mvd6.version, 6)
self.assertEqual(mvd6.run_id, None)
def test_update_model_version(self):
name = "test_for_update_MV"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.version, 1)
self.assertEqual(mvd1.current_stage, "None")
# update stage
self.store.transition_model_version_stage(
name=mv1.name, version=mv1.version, stage="Production", archive_existing_versions=False
)
mvd2 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd2.name, name)
self.assertEqual(mvd2.version, 1)
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd2.description, None)
# update description
self.store.update_model_version(
name=mv1.name, version=mv1.version, description="test model version"
)
mvd3 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd3.name, name)
self.assertEqual(mvd3.version, 1)
self.assertEqual(mvd3.current_stage, "Production")
self.assertEqual(mvd3.description, "test model version")
# only valid stages can be set
with self.assertRaisesRegex(
MlflowException, r"Invalid Model Version stage unknown"
) as exception_context:
self.store.transition_model_version_stage(
mv1.name, mv1.version, stage="unknown", archive_existing_versions=False
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# stages are case-insensitive and auto-corrected to system stage names
for stage_name in ["STAGING", "staging", "StAgInG"]:
self.store.transition_model_version_stage(
name=mv1.name,
version=mv1.version,
stage=stage_name,
archive_existing_versions=False,
)
mvd5 = self.store.get_model_version(name=mv1.name, version=mv1.version)
self.assertEqual(mvd5.current_stage, "Staging")
def test_transition_model_version_stage_when_archive_existing_versions_is_false(self):
name = "model"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mv2 = self._mv_maker(name)
mv3 = self._mv_maker(name)
# test that when `archive_existing_versions` is False, transitioning a model version
# to the inactive stages ("Archived" and "None") does not throw.
for stage in ["Archived", "None"]:
self.store.transition_model_version_stage(name, mv1.version, stage, False)
self.store.transition_model_version_stage(name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(name, mv2.version, "Production", False)
self.store.transition_model_version_stage(name, mv3.version, "Staging", False)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Staging")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Staging")
self.store.transition_model_version_stage(name, mv3.version, "Production", False)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Staging")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Production")
def test_transition_model_version_stage_when_archive_existing_versions_is_true(self):
name = "model"
self._rm_maker(name)
mv1 = self._mv_maker(name)
mv2 = self._mv_maker(name)
mv3 = self._mv_maker(name)
msg = (
r"Model version transition cannot archive existing model versions "
r"because .+ is not an Active stage"
)
# test that when `archive_existing_versions` is True, transitioning a model version
# to the inactive stages ("Archived" and "None") throws.
for stage in ["Archived", "None"]:
with self.assertRaisesRegex(MlflowException, msg):
self.store.transition_model_version_stage(name, mv1.version, stage, True)
self.store.transition_model_version_stage(name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(name, mv2.version, "Production", False)
self.store.transition_model_version_stage(name, mv3.version, "Staging", True)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Production")
self.assertEqual(mvd3.current_stage, "Staging")
self.assertEqual(mvd1.last_updated_timestamp, mvd3.last_updated_timestamp)
self.store.transition_model_version_stage(name, mv3.version, "Production", True)
mvd1 = self.store.get_model_version(name=name, version=mv1.version)
mvd2 = self.store.get_model_version(name=name, version=mv2.version)
mvd3 = self.store.get_model_version(name=name, version=mv3.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Archived")
self.assertEqual(mvd3.current_stage, "Production")
self.assertEqual(mvd2.last_updated_timestamp, mvd3.last_updated_timestamp)
for uncanonical_stage_name in ["STAGING", "staging", "StAgInG"]:
self.store.transition_model_version_stage(mv1.name, mv1.version, "Staging", False)
self.store.transition_model_version_stage(mv2.name, mv2.version, "None", False)
# stage names are case-insensitive and auto-corrected to system stage names
self.store.transition_model_version_stage(
mv2.name, mv2.version, uncanonical_stage_name, True
)
mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version)
mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version)
self.assertEqual(mvd1.current_stage, "Archived")
self.assertEqual(mvd2.current_stage, "Staging")
def test_delete_model_version(self):
name = "test_for_delete_MV"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name)
mv = self._mv_maker(name, tags=initial_tags)
mvd = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd.name, name)
self.store.delete_model_version(name=mv.name, version=mv.version)
# cannot get a deleted model version
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.get_model_version(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot update a delete
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.update_model_version(mv.name, mv.version, description="deleted!")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# cannot delete it again
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.delete_model_version(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def test_delete_model_version_redaction(self):
name = "test_for_delete_MV_redaction"
run_link = "http://localhost:5000/path/to/run"
run_id = "12345"
source = "path/to/source"
self._rm_maker(name)
mv = self._mv_maker(name, source=source, run_id=run_id, run_link=run_link)
mvd = self.store.get_model_version(name=name, version=mv.version)
self.assertEqual(mvd.run_link, run_link)
self.assertEqual(mvd.run_id, run_id)
self.assertEqual(mvd.source, source)
# delete the MV now
self.store.delete_model_version(name, mv.version)
# verify that the relevant fields are redacted
mvd_deleted = self.store._get_sql_model_version_including_deleted(
name=name, version=mv.version
)
self.assertIn("REDACTED", mvd_deleted.run_link)
self.assertIn("REDACTED", mvd_deleted.source)
self.assertIn("REDACTED", mvd_deleted.run_id)
def test_get_model_version_download_uri(self):
name = "test_for_update_MV"
self._rm_maker(name)
source_path = "path/to/source"
mv = self._mv_maker(name, source=source_path, run_id=uuid.uuid4().hex)
mvd1 = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd1.name, name)
self.assertEqual(mvd1.source, source_path)
# download location points to source
self.assertEqual(
self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path
)
# download URI does not change even if model version is updated
self.store.transition_model_version_stage(
name=mv.name, version=mv.version, stage="Production", archive_existing_versions=False
)
self.store.update_model_version(
name=mv.name, version=mv.version, description="Test for Path"
)
mvd2 = self.store.get_model_version(name=mv.name, version=mv.version)
self.assertEqual(mvd2.source, source_path)
self.assertEqual(
self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path
)
# cannot retrieve download URI for deleted model versions
self.store.delete_model_version(name=mv.name, version=mv.version)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found"
) as exception_context:
self.store.get_model_version_download_uri(name=mv.name, version=mv.version)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
def test_search_model_versions(self):
# create some model versions
name = "test_for_search_MV"
self._rm_maker(name)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
mv1 = self._mv_maker(name=name, source="A/B", run_id=run_id_1)
self.assertEqual(mv1.version, 1)
mv2 = self._mv_maker(name=name, source="A/C", run_id=run_id_2)
self.assertEqual(mv2.version, 2)
mv3 = self._mv_maker(name=name, source="A/D", run_id=run_id_2)
self.assertEqual(mv3.version, 3)
mv4 = self._mv_maker(name=name, source="A/D", run_id=run_id_3)
self.assertEqual(mv4.version, 4)
def search_versions(filter_string):
return [mvd.version for mvd in self.store.search_model_versions(filter_string)]
# search using name should return all 4 versions
self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3, 4]))
# search using run_id_1 should return version 1
self.assertEqual(set(search_versions("run_id='%s'" % run_id_1)), set([1]))
# search using run_id_2 should return versions 2 and 3
self.assertEqual(set(search_versions("run_id='%s'" % run_id_2)), set([2, 3]))
# search using the IN operator should return all versions
self.assertEqual(
set(
search_versions(
"run_id IN ('{run_id_1}','{run_id_2}')".format(
run_id_1=run_id_1, run_id_2=run_id_2
)
)
),
set([1, 2, 3]),
)
# search using the IN operator with bad lists should return exceptions
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected string value or punctuation, "
r"but got different type in list"
),
) as exception_context:
search_versions("run_id IN (1,2,3)")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using the IN operator with empty lists should return exceptions
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got empty list"
),
) as exception_context:
search_versions("run_id IN ()")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using an ill-formed IN operator correctly throws exception
with self.assertRaisesRegex(
MlflowException, r"Invalid clause\(s\) in filter string"
) as exception_context:
search_versions("run_id IN (")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(MlflowException, r"Invalid filter '.+'") as exception_context:
search_versions("run_id IN")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got ill-formed list"
),
) as exception_context:
search_versions("run_id IN (,)")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException,
(
r"While parsing a list in the query, "
r"expected a non-empty list of string values, "
r"but got ill-formed list"
),
) as exception_context:
search_versions("run_id IN ('runid1',,'runid2')")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using the IN operator is not allowed with other additional filters
with self.assertRaisesRegex(
MlflowException, r"Search filter '.+' contains multiple expressions"
) as exception_context:
search_versions(
"name='{name}]' AND run_id IN ('{run_id_1}','{run_id_2}')".format(
name=name, run_id_1=run_id_1, run_id_2=run_id_2
)
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# search using source_path "A/D" should return version 3 and 4
self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3, 4]))
# search using source_path "A" should not return anything
self.assertEqual(len(search_versions("source_path = 'A'")), 0)
self.assertEqual(len(search_versions("source_path = 'A/'")), 0)
self.assertEqual(len(search_versions("source_path = ''")), 0)
# delete mv4. search should not return version 4
self.store.delete_model_version(name=mv4.name, version=mv4.version)
self.assertEqual(set(search_versions("")), set([1, 2, 3]))
self.assertEqual(set(search_versions(None)), set([1, 2, 3]))
self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3]))
self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3]))
self.store.transition_model_version_stage(
name=mv1.name, version=mv1.version, stage="production", archive_existing_versions=False
)
self.store.update_model_version(
name=mv1.name, version=mv1.version, description="Online prediction model!"
)
mvds = self.store.search_model_versions("run_id = '%s'" % run_id_1)
assert 1 == len(mvds)
assert isinstance(mvds[0], ModelVersion)
assert mvds[0].current_stage == "Production"
assert mvds[0].run_id == run_id_1
assert mvds[0].source == "A/B"
assert mvds[0].description == "Online prediction model!"
def _search_registered_models(
self, filter_string, max_results=10, order_by=None, page_token=None
):
result = self.store.search_registered_models(
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
return [registered_model.name for registered_model in result], result.token
def test_search_registered_models(self):
# create some registered models
prefix = "test_for_search_"
names = [prefix + name for name in ["RM1", "RM2", "RM3", "RM4", "RM4A", "RM4a"]]
for name in names:
self._rm_maker(name)
# search with no filter should return all registered models
rms, _ = self._search_registered_models(None)
self.assertEqual(rms, names)
# equality search using name should return exactly the 1 name
rms, _ = self._search_registered_models("name='{}'".format(names[0]))
self.assertEqual(rms, [names[0]])
# equality search using name that is not valid should return nothing
rms, _ = self._search_registered_models("name='{}'".format(names[0] + "cats"))
self.assertEqual(rms, [])
# case-sensitive prefix search using LIKE should return all the RMs
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix))
self.assertEqual(rms, names)
# case-sensitive prefix search using LIKE with surrounding % should return all the RMs
rms, _ = self._search_registered_models("name LIKE '%RM%'")
self.assertEqual(rms, names)
# case-sensitive prefix search using LIKE with surrounding % should return all the RMs
# _e% matches test_for_search_ , so all RMs should match
rms, _ = self._search_registered_models("name LIKE '_e%'")
self.assertEqual(rms, names)
# case-sensitive prefix search using LIKE should return just rm4
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, [names[4]])
# case-sensitive prefix search using LIKE should return no models if no match
rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "cats"))
self.assertEqual(rms, [])
# confirm that LIKE is not case-sensitive
rms, _ = self._search_registered_models("name lIkE '%blah%'")
self.assertEqual(rms, [])
rms, _ = self._search_registered_models("name like '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, [names[4]])
# case-insensitive prefix search using ILIKE should return both rm5 and rm6
rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A"))
self.assertEqual(rms, names[4:])
# case-insensitive postfix search with ILIKE
rms, _ = self._search_registered_models("name ILIKE '%RM4a'")
self.assertEqual(rms, names[4:])
# case-insensitive prefix search using ILIKE should return both rm5 and rm6
rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "cats"))
self.assertEqual(rms, [])
# confirm that ILIKE is not case-sensitive
rms, _ = self._search_registered_models("name iLike '%blah%'")
self.assertEqual(rms, [])
# confirm that ILIKE works for empty query
rms, _ = self._search_registered_models("name iLike '%%'")
self.assertEqual(rms, names)
rms, _ = self._search_registered_models("name ilike '%RM4a'")
self.assertEqual(rms, names[4:])
# cannot search by invalid comparator types
with self.assertRaisesRegex(
MlflowException, r"Expected a quoted string value for attributes"
) as exception_context:
self._search_registered_models("name!=something")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# cannot search by run_id
with self.assertRaisesRegex(
MlflowException, r"Invalid attribute key '.+' specified"
) as exception_context:
self._search_registered_models("run_id='%s'" % "somerunID")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# cannot search by source_path
with self.assertRaisesRegex(
MlflowException, r"Invalid attribute key '.+' specified"
) as exception_context:
self._search_registered_models("source_path = 'A/D'")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# cannot search by other params
with self.assertRaisesRegex(
MlflowException, r"Invalid clause\(s\) in filter string"
) as exception_context:
self._search_registered_models("evilhax = true")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# delete last registered model. search should not return the first 5
self.store.delete_registered_model(name=names[-1])
self.assertEqual(self._search_registered_models(None, max_results=1000), (names[:-1], None))
# equality search using name should return no names
self.assertEqual(self._search_registered_models("name='{}'".format(names[-1])), ([], None))
# case-sensitive prefix search using LIKE should return all the RMs
self.assertEqual(
self._search_registered_models("name LIKE '{}%'".format(prefix)), (names[0:5], None)
)
# case-insensitive prefix search using ILIKE should return both rm5 and rm6
self.assertEqual(
self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A")),
([names[4]], None),
)
def test_parse_search_registered_models_order_by(self):
# test that "registered_models.name ASC" is returned by default
parsed = SqlAlchemyStore._parse_search_registered_models_order_by([])
self.assertEqual([str(x) for x in parsed], ["registered_models.name ASC"])
# test that the given 'name' replaces the default one ('registered_models.name ASC')
parsed = SqlAlchemyStore._parse_search_registered_models_order_by(["name DESC"])
self.assertEqual([str(x) for x in parsed], ["registered_models.name DESC"])
# test that an exception is raised when order_by contains duplicate fields
msg = "`order_by` contains duplicate fields:"
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp", "last_updated_timestamp"]
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(["timestamp", "timestamp"])
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["timestamp", "last_updated_timestamp"],
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp ASC", "last_updated_timestamp DESC"],
)
with self.assertRaisesRegex(MlflowException, msg):
SqlAlchemyStore._parse_search_registered_models_order_by(
["last_updated_timestamp", "last_updated_timestamp DESC"],
)
def test_search_registered_model_pagination(self):
rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)]
# test flow with fixed max_results
returned_rms = []
query = "name LIKE 'RM%'"
result, token = self._search_registered_models(query, page_token=None, max_results=5)
returned_rms.extend(result)
while token:
result, token = self._search_registered_models(query, page_token=token, max_results=5)
returned_rms.extend(result)
self.assertEqual(rms, returned_rms)
# test that pagination will return all valid results in sorted order
# by name ascending
result, token1 = self._search_registered_models(query, max_results=5)
self.assertNotEqual(token1, None)
self.assertEqual(result, rms[0:5])
result, token2 = self._search_registered_models(query, page_token=token1, max_results=10)
self.assertNotEqual(token2, None)
self.assertEqual(result, rms[5:15])
result, token3 = self._search_registered_models(query, page_token=token2, max_results=20)
self.assertNotEqual(token3, None)
self.assertEqual(result, rms[15:35])
result, token4 = self._search_registered_models(query, page_token=token3, max_results=100)
# assert that page token is None
self.assertEqual(token4, None)
self.assertEqual(result, rms[35:])
# test that providing a completely invalid page token throws
with self.assertRaisesRegex(
MlflowException, r"Invalid page token, could not base64-decode"
) as exception_context:
self._search_registered_models(query, page_token="evilhax", max_results=20)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test that providing too large of a max_results throws
with self.assertRaisesRegex(
MlflowException, r"Invalid value for request parameter max_results"
) as exception_context:
self._search_registered_models(query, page_token="evilhax", max_results=1e15)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
self.assertIn(
"Invalid value for request parameter max_results", exception_context.exception.message
)
def test_search_registered_model_order_by(self):
rms = []
# explicitly mock the creation_timestamps because timestamps seem to be unstable in Windows
for i in range(50):
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=i):
rms.append(self._rm_maker("RM{:03}".format(i)).name)
# test flow with fixed max_results and order_by (test stable order across pages)
returned_rms = []
query = "name LIKE 'RM%'"
result, token = self._search_registered_models(
query, page_token=None, order_by=["name DESC"], max_results=5
)
returned_rms.extend(result)
while token:
result, token = self._search_registered_models(
query, page_token=token, order_by=["name DESC"], max_results=5
)
returned_rms.extend(result)
# name descending should be the opposite order of the current order
self.assertEqual(rms[::-1], returned_rms)
# last_updated_timestamp descending should have the newest RMs first
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100
)
self.assertEqual(rms[::-1], result)
# timestamp returns same result as last_updated_timestamp
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp DESC"], max_results=100
)
self.assertEqual(rms[::-1], result)
# last_updated_timestamp ascending should have the oldest RMs first
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp ASC"], max_results=100
)
self.assertEqual(rms, result)
# timestamp returns same result as last_updated_timestamp
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC"], max_results=100
)
self.assertEqual(rms, result)
# timestamp returns same result as last_updated_timestamp
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp"], max_results=100
)
self.assertEqual(rms, result)
# name ascending should have the original order
result, _ = self._search_registered_models(
query, page_token=None, order_by=["name ASC"], max_results=100
)
self.assertEqual(rms, result)
# test that no ASC/DESC defaults to ASC
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp"], max_results=100
)
self.assertEqual(rms, result)
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=1):
rm1 = self._rm_maker("MR1").name
rm2 = self._rm_maker("MR2").name
with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=2):
rm3 = self._rm_maker("MR3").name
rm4 = self._rm_maker("MR4").name
query = "name LIKE 'MR%'"
# test with multiple clauses
result, _ = self._search_registered_models(
query,
page_token=None,
order_by=["last_updated_timestamp ASC", "name DESC"],
max_results=100,
)
self.assertEqual([rm2, rm1, rm4, rm3], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC", "name DESC"], max_results=100
)
self.assertEqual([rm2, rm1, rm4, rm3], result)
# confirm that name ascending is the default, even if ties exist on other fields
result, _ = self._search_registered_models(
query, page_token=None, order_by=[], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
# test default tiebreak with descending timestamps
result, _ = self._search_registered_models(
query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100
)
self.assertEqual([rm3, rm4, rm1, rm2], result)
# test timestamp parsing
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\tASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\r\rASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp\nASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp ASC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
# validate order by key is case-insensitive
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp asc"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp aSC"], max_results=100
)
self.assertEqual([rm1, rm2, rm3, rm4], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp desc", "name desc"], max_results=100
)
self.assertEqual([rm4, rm3, rm2, rm1], result)
result, _ = self._search_registered_models(
query, page_token=None, order_by=["timestamp deSc", "name deSc"], max_results=100
)
self.assertEqual([rm4, rm3, rm2, rm1], result)
def test_search_registered_model_order_by_errors(self):
query = "name LIKE 'RM%'"
# test that invalid columns throw even if they come after valid columns
with self.assertRaisesRegex(
MlflowException, r"Invalid order by key '.+' specified"
) as exception_context:
self._search_registered_models(
query,
page_token=None,
order_by=["name ASC", "creation_timestamp DESC"],
max_results=5,
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test that invalid columns with random text throw even if they come after valid columns
with self.assertRaisesRegex(
MlflowException, r"Invalid order_by clause '.+'"
) as exception_context:
self._search_registered_models(
query,
page_token=None,
order_by=["name ASC", "last_updated_timestamp DESC blah"],
max_results=5,
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_set_model_version_tag(self):
name1 = "SetModelVersionTag_TestMod"
name2 = "SetModelVersionTag_TestMod 2"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name1)
self._rm_maker(name2)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
self._mv_maker(name1, "A/B", run_id_1, initial_tags)
self._mv_maker(name1, "A/C", run_id_2, initial_tags)
self._mv_maker(name2, "A/D", run_id_3, initial_tags)
new_tag = ModelVersionTag("randomTag", "not a random value")
self.store.set_model_version_tag(name1, 1, new_tag)
all_tags = initial_tags + [new_tag]
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags})
# test overriding a tag with the same key
overriding_tag = ModelVersionTag("key", "overriding")
self.store.set_model_version_tag(name1, 1, overriding_tag)
all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag]
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags})
# does not affect other model versions with the same key
rm1mv2 = self.store.get_model_version(name1, 2)
rm2mv1 = self.store.get_model_version(name2, 1)
self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags})
self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags})
# can not set tag on deleted (non-existed) model version
self.store.delete_model_version(name1, 2)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name1}, version=2\) not found"
) as exception_context:
self.store.set_model_version_tag(name1, 2, overriding_tag)
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# test cannot set tags that are too long
long_tag = ModelVersionTag("longTagKey", "a" * 5001)
with self.assertRaisesRegex(
MlflowException,
r"Model version value '.+' had length \d+, which exceeded length limit of 5000",
) as exception_context:
self.store.set_model_version_tag(name1, 1, long_tag)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# test can set tags that are somewhat long
long_tag = ModelVersionTag("longTagKey", "a" * 4999)
self.store.set_model_version_tag(name1, 1, long_tag)
# can not set invalid tag
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.set_model_version_tag(name2, 1, ModelVersionTag(key=None, value=""))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name or version
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.set_model_version_tag(None, 1, ModelVersionTag(key="key", value="value"))
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Model version must be an integer"
) as exception_context:
self.store.set_model_version_tag(
name2, "I am not a version", ModelVersionTag(key="key", value="value")
)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
def test_delete_model_version_tag(self):
name1 = "DeleteModelVersionTag_TestMod"
name2 = "DeleteModelVersionTag_TestMod 2"
initial_tags = [
ModelVersionTag("key", "value"),
ModelVersionTag("anotherKey", "some other value"),
]
self._rm_maker(name1)
self._rm_maker(name2)
run_id_1 = uuid.uuid4().hex
run_id_2 = uuid.uuid4().hex
run_id_3 = uuid.uuid4().hex
self._mv_maker(name1, "A/B", run_id_1, initial_tags)
self._mv_maker(name1, "A/C", run_id_2, initial_tags)
self._mv_maker(name2, "A/D", run_id_3, initial_tags)
new_tag = ModelVersionTag("randomTag", "not a random value")
self.store.set_model_version_tag(name1, 1, new_tag)
self.store.delete_model_version_tag(name1, 1, "randomTag")
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in initial_tags})
# testing deleting a key does not affect other model versions with the same key
self.store.delete_model_version_tag(name1, 1, "key")
rm1mv1 = self.store.get_model_version(name1, 1)
rm1mv2 = self.store.get_model_version(name1, 2)
rm2mv1 = self.store.get_model_version(name2, 1)
self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"})
self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags})
self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags})
# delete tag that is already deleted does nothing
self.store.delete_model_version_tag(name1, 1, "key")
rm1mv1 = self.store.get_model_version(name1, 1)
self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"})
# can not delete tag on deleted (non-existed) model version
self.store.delete_model_version(name2, 1)
with self.assertRaisesRegex(
MlflowException, rf"Model Version \(name={name2}, version=1\) not found"
) as exception_context:
self.store.delete_model_version_tag(name2, 1, "key")
assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
# can not delete tag with invalid key
with self.assertRaisesRegex(
MlflowException, r"Tag name cannot be None"
) as exception_context:
self.store.delete_model_version_tag(name1, 2, None)
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
# can not use invalid model name or version
with self.assertRaisesRegex(
MlflowException, r"Registered model name cannot be empty"
) as exception_context:
self.store.delete_model_version_tag(None, 2, "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
with self.assertRaisesRegex(
MlflowException, r"Model version must be an integer"
) as exception_context:
self.store.delete_model_version_tag(name1, "I am not a version", "key")
assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import itertools
import os
import shutil
import tempfile
import mock
from nose.tools import raises, assert_raises
try:
from . import parse_s3
from digits.tools.mock_s3_walker import MockS3Walker
import_failed = False
except ImportError:
import_failed = True
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestUnescape():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_hello(self):
assert parse_s3.unescape('hello') == 'hello'
def test_space(self):
assert parse_s3.unescape('%20') == ' '
class TestValidateS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.mock_walker = MockS3Walker()
def test_non_existent_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'nonexistentbucket', '')
assert not result
def test_empty_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'emptybucket', '')
assert not result
def test_valid_endpoint(self):
result = parse_s3.validate_s3(self.mock_walker, 'validbucket', '')
assert result
class TestValidateOutputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except IOError:
pass
def test_missing_file(self):
assert parse_s3.validate_output_file(None) is True, 'all new files should be valid'
def test_file(self):
assert parse_s3.validate_output_file(os.path.join(self.tmpdir, 'output.txt')) is True
@mock.patch('os.access')
def test_local_file(self, mock_access):
mock_access.return_value = True
assert parse_s3.validate_output_file('not-a-file.txt') is True, 'relative paths should be accepted'
@mock.patch('os.access')
def test_not_writeable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_output_file(self.tmpfile) is False, 'should not succeed without write permission'
def test_existing_file(self):
assert parse_s3.validate_output_file(self.tmpfile) is False
def test_nonexistent_dir(self):
assert parse_s3.validate_output_file(
os.path.join(
os.path.abspath('not-a-dir'),
'output.txt'
)
) is False
class TestValidateInputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
_handle, cls.tmpfile = tempfile.mkstemp()
os.close(_handle)
@classmethod
def tearDownClass(cls):
os.remove(cls.tmpfile)
def test_missing_file(self):
assert parse_s3.validate_input_file('not-a-file.txt') is False, 'should not pass on missing file'
@mock.patch('os.access')
def test_not_readable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_input_file(self.tmpfile) is False, 'should not succeed without read permission'
class TestValidateRange():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_no_range(self):
assert parse_s3.validate_range(0) is True
def test_min_less(self):
assert parse_s3.validate_range(-1, min_value=0) is False
def test_min_equal(self):
assert parse_s3.validate_range(0, min_value=0) is True
def test_min_more(self):
assert parse_s3.validate_range(1, min_value=0) is True
def test_max_less(self):
assert parse_s3.validate_range(9, max_value=10) is True
def test_max_equal(self):
assert parse_s3.validate_range(10, max_value=10) is True
def test_max_more(self):
assert parse_s3.validate_range(11, max_value=10) is False
def test_allow_none_true(self):
assert parse_s3.validate_range(None, allow_none=True) is True
def test_allow_none_false(self):
assert parse_s3.validate_range(None, allow_none=False) is False
def test_string(self):
assert parse_s3.validate_range('foo') is False
@mock.patch('digits.tools.parse_s3.validate_output_file')
@mock.patch('digits.tools.parse_s3.validate_input_file')
class TestCalculatePercentages():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
@raises(AssertionError)
def test_making_0(self, mock_input, mock_output):
parse_s3.calculate_percentages(None, None, None, None, None, None, None)
def test_making_1(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected_outputs = [
('train_file', (100, 0, 0)),
('val_file', (0, 100, 0)),
('test_file', (0, 0, 100))
]
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({supplied: ''})
output = parse_s3.calculate_percentages(**args)
assert output == expected, 'expected output of {}, got {}'.format(output, expected)
def test_making_2(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
permutes = itertools.combinations(['train', 'val', 'test'], 2)
expected_outputs = itertools.izip(permutes, itertools.repeat((32, 68)))
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({k + '_file': '' for k in supplied})
args.update({'percent_' + k: v for k, v in itertools.izip(supplied, expected)})
# Tricky line. itertools returns combinations in sorted order, always.
# The order of the returned non-zero values should always be correct.
output = [x for x in parse_s3.calculate_percentages(**args) if x != 0]
assert output == list(expected), 'expected output of {}, got {}'.format(output, expected)
def test_making_3_all_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = (25, 30, 45)
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=45
) == expected, 'Calculate percentages should return identical values of {}'.format(expected)
def test_making_3_2_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = 45
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=None
)[2] == expected, 'Calculate percentages should calculate third value of {}'.format(expected)
@raises(AssertionError)
def test_making_out_of_range(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
# should raise AssertionError because percentages not between 0-100 are invalid
parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=-1,
val_file=None, percent_val=None,
test_file=None, percent_test=None
)
class TestParseWebListing():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_non_url(self):
for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']:
yield self.check_url_raises, url
def check_url_raises(self, url):
assert_raises(Exception, parse_s3.parse_web_listing, url)
def test_mock_url(self):
for content, dirs, files in [
# Nothing
('', [], []),
# Apache 2.2.22
(
'<head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="cat1/">cat1/</a></td><td>01-Jan-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="cat2/">cat2/</a></td><td>02-Feb-2015 23:45</td><td> - </td></tr>\n \
<tr><td><a href="cat.jpg">cat.jpg</a></td><td>03-Mar-2015 1:23</td><td> 1 </td></tr>\n \
</table</body>\n',
['cat1/', 'cat2/'],
['cat.jpg'],
),
# Apache 2.4.7
(
'<html><head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="dog/">dog/</a></td><td>01-01-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="dog1.jpeg">dog1.jpeg</a></td><td>02-02-2015 23:45</td><td> 1 </td></tr>\n \
<tr><td><a href="dog2.png">dog2.png</a></td><td>03-03-2015 1:23</td><td> 2 </td></tr>\n \
</table</body></html>\n',
['dog/'],
['dog1.jpeg', 'dog2.png'],
),
# Nginx
(
'<html><head></head><body>\n \
<a href="bird.jpg">bird.jpg</a> 01-Jan-1999 01:23 1\n \
<a href="birds/">birds/</a> 02-Feb-1999 12:34 -',
['birds/'],
['bird.jpg'],
),
]:
with mock.patch('digits.tools.parse_s3.requests') as mock_requests:
response = mock.Mock()
response.status_code = mock_requests.codes.ok
response.content = content
mock_requests.get.return_value = response
yield self.check_listing, (dirs, files)
def check_listing(self, rc):
assert parse_s3.parse_web_listing('any_url') == rc
class TestSplitIndices():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_indices(self):
for size in [5, 22, 32]:
for percent_b in range(0, 100, 31):
for percent_c in range(0, 100 - percent_b, 41):
yield self.check_split, size, percent_b, percent_c
def check_split(self, size, pct_b, pct_c):
ideala = size * float(100 - pct_b - pct_c) / 100.0
idealb = size * float(100 - pct_c) / 100.0
idxa, idxb = parse_s3.three_way_split_indices(size, pct_b, pct_c)
assert abs(ideala - idxa) <= 2, 'split should be close to {}, is {}'.format(ideala, idxa)
assert abs(idealb - idxb) <= 2, 'split should be close to {}, is {}'.format(idealb, idxb)
class TestParseS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_all_train(self):
classes = range(10)
mock_walker = MockS3Walker(classes)
try:
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
parse_s3.parse_s3(mock_walker, 'validbucket', 'train/', labels_file[1],
percent_train=100, train_file=train_file[1], percent_val=0, percent_test=0)
with open(labels_file[1]) as infile:
parsed_classes = [line.strip() for line in infile]
expected_classes = [str(i) for i in classes]
assert parsed_classes == expected_classes, '%s != %s' % (parsed_classes, classes)
finally:
shutil.rmtree(tmpdir)
def test_neg_all_train(self):
try:
classes = range(1)
mock_walker = MockS3Walker(classes)
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
assert not parse_s3.parse_s3(mock_walker, 'invalidbucket', 'train/', labels_file[1], percent_train=100,
train_file=train_file[1], percent_val=0, percent_test=0)
finally:
shutil.rmtree(tmpdir)
|
import re
import time
import typing
import logging
from calendar import monthrange
from datetime import datetime
from collections import Iterable
from heapq import heappush, heappop
from . import types # noqa
from . exceptions import BrokerError
from . interfaces import App, Plugin, Logger
from . utils import cached_property, rewind
class MasterLogger(Plugin):
def __init__(self,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.logger = logger
def register_master_handlers(self):
level = self.logger.level
ret = {}
if level <= logging.ERROR:
ret['task_exception'] = self.on_task_exception
ret['task_unknown'] = self.on_task_unknown
ret['worker_error'] = self.on_worker_error
ret['broker_error'] = self.on_broker_error
if level <= logging.INFO:
ret['worker_start'] = self.on_worker_start
ret['task_start'] = self.on_task_start
ret['task_done'] = self.on_task_done
ret['task_interrupt'] = self.on_task_interrupt
if level <= logging.DEBUG:
ret['task_expires'] = self.on_task_expires
return ret
def on_worker_start(self, w, **kwargs):
self.logger.info('worker process [%d] started.', w.pid)
def on_task_start(self, w, task_name, task_request, **kwargs):
self.logger.info('[%d] - received task: %s[%s].',
w.pid, task_name, task_request['id'])
def on_task_done(self, w, task_name, task_request, running_time, **kwargs):
self.logger.info('[%d] - task %s[%s] successed in %ss.',
w.pid, task_name, task_request['id'], running_time)
def on_task_interrupt(self, w, task_name, task_request, running_time,
**kwargs):
self.logger.info('[%d] - task %s[%s] killed in %ss.',
w.pid, task_name, task_request['id'], running_time)
def on_task_expires(self, w, task_name, task_request, **kwargs):
self.logger.debug('[%d] - task %s[%s] expires.',
w.pid, task_name, task_request['id'])
def on_task_unknown(self, w, task_name, **kwargs):
self.logger.error('[%d] - received unregistered task `%s`.',
w.pid, task_name)
def on_task_exception(self, w, task_name, task_request, exc, traceback,
running_time, **kwargs):
self.logger.error('[%d] - task %s[%s] raised exception: %s\n%s',
w.pid, task_name, task_request['id'], repr(exc),
traceback)
def on_broker_error(self, w, **kwargs):
self.logger.error('[%d] - broker error', w.pid)
def on_worker_error(self, w, exc, traceback, **kwargs):
self.logger.error('[%d] - got exception: %s\n%s',
w.pid, repr(exc), traceback)
class TaskKiller(Plugin):
def __init__(self,
app: App,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.app = app
self.logger = logger
self.running_tasks = set() # type: typing.Set[types.TaskId]
self.heap = [] # type: typing.List[typing.Tuple[float, types.TaskId]]
def run_in_master(self, curtime):
if not self.heap:
return
while self.heap and self.heap[0][0] <= curtime:
tm, task_id = heappop(self.heap)
if task_id not in self.running_tasks:
continue
self.logger.debug('[taskkiller] - kill task %s due to time limit',
task_id)
if self.heap:
return self.heap[0][0] - time.time()
def on_task_start(self, w,
task_name,
task_id,
task_headers,
start_time,
**kwargs):
limit = task_headers.get('time_limit')
if limit is None:
return
self.running_tasks.add(task_id)
heappush(self.heap, (start_time + limit, task_id))
def on_task_done(self, w, task_name, task_id):
if task_id not in self.running_tasks:
return
self.running_tasks.remove(task_id)
class CronBeat(Plugin):
def __init__(self,
app: App,
schedule: str,
error_timeout: int,
logger: typing.Union[logging.Logger, Logger],
**kwargs) -> None:
self.app = app
self.logger = logger
self.error_timeout = error_timeout
self.schedule = schedule
self.next_run = 0
@classmethod
def add_console_args(cls, parser) -> None:
parser.add_argument('--schedule',
dest='schedule',
default='schedule.py',
help='Schedule rules')
def get_applied_conf(self):
return {
'schedule': self.schedule
}
@cached_property
def heap(self):
dct = {'crontab': crontab}
with open(self.schedule, 'rt') as f:
rules = eval(f.read(), dct)
if not isinstance(rules, dict):
raise TypeError('Must be a dict')
start = datetime.now()
heap = []
for key, entry in rules.items():
if not entry.get('task'):
raise TypeError('`task` must be set')
schedule = entry.get('schedule')
if not isinstance(schedule, crontab):
raise TypeError('`schedule` must be a crontab')
schedule = schedule.start(start)
heappush(heap, (next(schedule).timestamp(), schedule, entry))
return heap
def master_idle(self, curtime):
if not self.heap:
return
if self.next_run > curtime:
return self.next_run - curtime
task_sent = False
while self.heap and self.heap[0][0] <= curtime:
_, schedule, entry = self.heap[0]
try:
self.app.send_task(entry['task'],
args=entry.get('args', ()),
kwargs=entry.get('kwargs', {}))
except BrokerError:
self.logger.error('[beat] - cant send task, retry in %ss.',
self.error_timeout)
self.next_run = self.error_timeout + curtime
return self.error_timeout
else:
self.logger.debug('[beat] - %s sent.', entry['task'])
heappop(self.heap)
heappush(self.heap, (
next(schedule).timestamp(), schedule, entry))
task_sent = True
if self.heap:
self.next_run = self.heap[0][0]
timeout = self.next_run - curtime
if task_sent:
self.logger.debug('[beat] - next task in %fs.', timeout)
return timeout
class crontab_parser:
"""Parser for Crontab expressions."""
_range = r'(\d+?)-(\d+)'
_steps = r'/(\d+)'
_number = r'(\d+)'
_star = r'\*'
def __init__(self, min_, max_):
self.max_ = max_
self.min_ = min_
self.pats = (
(re.compile('^' + self._range + self._steps + '$'),
self._range_steps),
(re.compile('^' + self._range + '$'),
self._expand_range),
(re.compile('^' + self._star + self._steps + '$'),
self._star_steps),
(re.compile('^' + self._star + '$'),
self._expand_star),
(re.compile('^' + self._number + '$'),
self._expand_range)
)
def parse(self, spec):
acc = set()
for part in spec.split(','):
if not part:
raise ValueError('empty part')
acc |= set(self._parse_part(part))
return sorted(acc)
def _parse_part(self, part):
for regex, handler in self.pats:
m = regex.match(part)
if m:
return handler(m.groups())
raise ValueError('invalid filter: %r' % part)
def _expand_range(self, toks):
fr = self._expand_number(toks[0])
if len(toks) > 1:
to = self._expand_number(toks[1])
if to < fr:
raise ValueError('invalid range')
return list(range(fr, to + 1))
return [fr]
def _range_steps(self, toks):
if len(toks) != 3 or not toks[2]:
raise ValueError('empty filter')
return self._expand_range(toks[:2])[::int(toks[2])]
def _star_steps(self, toks):
if not toks or not toks[0]:
raise ValueError('empty filter')
return self._expand_star()[::int(toks[0])]
def _expand_star(self, *args):
return list(range(self.min_, self.max_ + self.min_ + 1))
def _expand_number(self, s):
try:
i = int(s)
except ValueError:
raise ValueError('invalid number: %r' % s)
if i > self.max_:
raise ValueError(
'invalid end range: {0} > {1}.'.format(i, self.max_))
if i < self.min_:
raise ValueError(
'invalid beginning range: {0} < {1}.'.format(i, self.min_))
return i
class crontab:
def __init__(self,
minute='*',
hour='*',
day_of_month='*',
month_of_year='*',
day_of_week='*'):
self._orig_minute = minute
self._orig_hour = hour
self._orig_day_of_week = day_of_week
self._orig_day_of_month = day_of_month
self._orig_month_of_year = month_of_year
self.hour = self._expand_spec(hour, 0, 23)
self.minute = self._expand_spec(minute, 0, 59)
self.day_of_week = self._expand_spec(day_of_week, 0, 6)
self.day_of_month = self._expand_spec(day_of_month, 1, 31)
self.month_of_year = self._expand_spec(month_of_year, 1, 12)
def __repr__(self):
return ('<crontab: {0._orig_minute} {0._orig_hour} '
'{0._orig_day_of_week} {0._orig_day_of_month} '
'{0._orig_month_of_year}>').format(self)
@staticmethod
def _expand_spec(cronspec, min_, max_):
"""Expand cron specification."""
if isinstance(cronspec, int):
result = [cronspec]
elif isinstance(cronspec, str):
result = crontab_parser(min_, max_).parse(cronspec)
elif isinstance(cronspec, (list, tuple, set)):
result = sorted(cronspec)
elif isinstance(cronspec, Iterable):
result = sorted(cronspec)
else:
raise TypeError("Argument cronspec needs to be of any of the "
"following types: int, str, or an iterable type. "
"%r was given." % type(cronspec))
for number in result:
if not isinstance(number, int):
raise ValueError("Argument cronspec needs to be an int: "
"%r was given." % type(number))
for number in [result[0], result[-1]]:
if result[0] < min_ or result[0] > max_:
raise ValueError(
"Invalid crontab pattern. Valid range is {%d}-{%d}. "
"'%r' was found." % (min_, max_, result[0]))
return result
def start(self, start_date=None):
y = start_date.year
complete, (month_of_year, day_of_month, hour, minute) = rewind(
start_date.timetuple()[1:5], (
self.month_of_year,
self.day_of_month,
self.hour,
self.minute
))
if complete:
y += 1
while 1:
for m in month_of_year:
max_d = monthrange(y, m)[1]
for d in day_of_month:
if d > max_d:
break
for h in hour:
for mi in minute:
yield datetime(y, m, d, h, mi)
minute = self.minute
hour = self.hour
day_of_month = self.day_of_month
month_of_year = self.month_of_year
y += 1
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .api import RendezvousHandler, RendezvousParameters
from .api import rendezvous_handler_registry as handler_registry
from .dynamic_rendezvous import create_handler
def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import static_tcp_rendezvous
return static_tcp_rendezvous.create_rdzv_handler(params)
def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler:
from . import etcd_rendezvous
return etcd_rendezvous.create_rdzv_handler(params)
def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler:
from .etcd_rendezvous_backend import create_backend
from .etcd_store import EtcdStore
backend = create_backend(params)
store = EtcdStore(backend.client, "/torch/elastic/store")
return create_handler(store, backend, params)
def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler:
from .c10d_rendezvous_backend import create_backend
backend = create_backend(params)
return create_handler(backend.store, backend, params)
def _register_default_handlers() -> None:
handler_registry.register("etcd", _create_etcd_handler)
handler_registry.register("etcd-v2", _create_etcd_v2_handler)
handler_registry.register("c10d", _create_c10d_handler)
handler_registry.register("static", _create_static_handler)
def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler:
"""
This method is used to obtain a reference to a :py:class`RendezvousHandler`.
Custom rendezvous handlers can be registered by
::
from torch.distributed.elastid.rendezvous import rendezvous_handler_registry
from torch.distributed.elastic.rendezvous.registry import get_rendezvous_handler
def create_my_rdzv(params: RendezvousParameters):
return MyCustomRdzv(params)
rendezvous_handler_registry.register("my_rdzv_backend_name", create_my_rdzv)
my_rdzv_handler = get_rendezvous_handler("my_rdzv_backend_name", RendezvousParameters)
"""
return handler_registry.create_handler(params)
|
#!/usr/bin/env python3
from app import app, db, functions
from app.functions import Color
import subprocess
import os
import shlex
import shutil
from config import Config
from datetime import datetime
from cryptography.fernet import InvalidToken
from app.cipher import CipherTest, Cipher, new_cipher_key, encrypt, decrypt
from app.workspace import Workspace
from app.role import Role
from app.user import User
from app.profile import Profile
from app.list import List, Person
from app.email import Email
from app.page import Page
from app.domain import Domain
from app.campaign import Campaign, Campaignpages, WorkerCampaignSchema
from app.result import Result, Form, Event
from app.server import Server
from app.apikey import APIKey
import urllib3
# suppress insecure requests warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# objects to initialize 'flask shell' with
@app.shell_context_processor
def make_shell_context():
return {
'db': db,
'User': User,
'Profile': Profile,
'Role': Role,
'Workspace': Workspace,
'List': List,
'Person': Person,
'Email': Email,
'Page': Page,
'Domain': Domain,
'Campaign': Campaign,
'Result': Result,
'Server': Server,
'APIKey': APIKey,
'Form': Form,
'Campaignpages': Campaignpages,
'Event': Event
}
def init_cipher():
passphrase = ''
print(f'{Color.gray}[*] redlure encrypts sensitive database fields{Color.end}')
print(f'{Color.gray}[*] Enter a passphrase that will be used in generating the key\n{Color.end}')
while passphrase == '':
passphrase = input(f'{Color.gray}[+] Passphrase: {Color.red}').encode()
print(f'\n[!] WARNING: Do not lose your passphrase - doing so will result in losing access to parts of your database{Color.end}')
new_cipher_key(passphrase)
input(f'\n{Color.gray}[+] Press enter to continue: {Color.end}')
def get_cipher():
cipher_text = CipherTest.query.first().value
str = cipher_text.decode()
print(f'{Color.gray}{str[:len(str)//2]}\n{str[len(str)//2:]}{Color.end}\n')
passphrase = input(f'{Color.gray}[+] Enter the cipher passphrase: {Color.red}').encode()
new_cipher_key(passphrase)
try:
plain_text = decrypt(cipher_text)
print(f'[+] {plain_text.decode()}\n{Color.end}')
except InvalidToken:
print(f'\n[!] Decryption failed - invalid passphrase{Color.end}')
exit()
def init_db():
if os.path.isdir('migrations'):
shutil.rmtree('migrations')
print(f'\n{Color.red}[*] Creating database{Color.end}')
proc = subprocess.Popen(shlex.split('flask db init'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
proc = subprocess.Popen(shlex.split('flask db migrate'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
proc = subprocess.Popen(shlex.split('flask db upgrade'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
proc.wait()
print(f'{Color.red}[+] Initializing database values\n{Color.end}')
general_ws = Workspace(name='General')
db.session.add(general_ws)
db.session.commit()
administrator = Role(name='redlure admin', role_type='Administrator')
general_ws = Workspace.query.filter_by(id=1, name='General').first()
if general_ws is not None:
administrator.workspaces.append(general_ws)
db.session.add(administrator)
db.session.commit()
admin = User(username='admin', role_id=1)
admin.set_password('redlure')
db.session.add(admin)
db.session.commit()
encrypted_val = encrypt(b'Bingo. Welcome to redlure')
cipher_test = CipherTest(value=encrypted_val)
db.session.add(cipher_test)
db.session.commit()
key = APIKey()
# check for scheduled campaigns that need to be rentered into the queue
def check_campaigns():
campaigns = Campaign.query.filter_by(status='Scheduled').all()
for campaign in campaigns:
if datetime.now() < campaign.start_time:
#schema = WorkerCampaignSchema()
#campaign_data = schema.dump(campaign)
campaign.cast()
else:
campaign.status = 'Start time missed (server outage)'
db.session.commit()
def gen_certs():
proc = subprocess.Popen(shlex.split('openssl req -x509 -newkey rsa:4096 -nodes -subj "/" -out redlure-cert.pem -keyout redlure-key.pem -days 365'))
proc.wait()
def banner():
print(f'''
{Color.red} .___{Color.gray}.__ {Color.end}
{Color.red}_______ ____ __| _/{Color.gray}| | __ _________ ____ {Color.end}
{Color.red}\_ __ \_/ __ \ / __ | {Color.gray}| | | | \_ __ \_/ __ \ {Color.end}
{Color.red} | | \/\ ___// /_/ | {Color.gray}| |_| | /| | \/\ ___/ {Color.end}
{Color.red} |__| \___ >____ | {Color.gray}|____/____/ |__| \___ >{Color.end}
{Color.red} \/ \/ {Color.gray} \/ {Color.end}
''')
if __name__ == '__main__':
banner()
# SECRET_KEY is required
if Config.SECRET_KEY == '':
print('[!] A secret key is required - set the SECRET_KEY attribute in config.py')
print(f'[!] New suggested random secret key: {os.urandom(24)}')
exit()
# check if db exists yet
if not os.path.isfile('redlure.db'):
init_cipher()
init_db()
else:
get_cipher()
check_campaigns()
# generate certs if they dont exist
if Config.CERT_PATH == 'redlure-cert.pem' and Config.KEY_PATH == 'redlure-key.pem':
if not os.path.isfile('redlure-cert.pem') or not os.path.isfile('redlure-key.pem'):
gen_certs()
# start the server
app.logger.info('redlure-console starting up')
#server = subprocess.Popen(['gunicorn', 'app:app', '-b 0.0.0.0:5000', '--certfile', Config.CERT_PATH, '--keyfile', Config.KEY_PATH])
#server.wait()
app.run(host='0.0.0.0', ssl_context=(Config.CERT_PATH, Config.KEY_PATH), use_reloader=False)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, Kei Okada
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Copyright holder. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import sys
import time
import rospy
import rostest
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from std_msgs.msg import Float64
CLASSNAME = 'rwt_robot_monitor'
class TestRwtRobotMonitor(unittest.TestCase):
def sin_cb(self, msg):
self.sin_msg = msg
self.sin_msg_received = self.sin_msg_received + 1
def __init__(self, *args):
super(TestRwtRobotMonitor, self).__init__(*args)
rospy.init_node('test_rwt_robot_monitor')
def setUp(self):
parser = argparse.ArgumentParser()
parser.add_argument('--no-headless', action='store_true',
help='start webdriver with headless mode')
args, unknown = parser.parse_known_args()
self.sin_msg = None
self.sin_msg_received = 0
rospy.Subscriber('/sin', Float64, self.sin_cb)
self.url_base = rospy.get_param("url_roswww_testserver")
opts = webdriver.firefox.options.Options()
if not args.no_headless:
opts.add_argument('-headless')
self.browser = webdriver.Firefox(options=opts)
self.wait = webdriver.support.ui.WebDriverWait(self.browser, 10)
# maximize screen
self.browser.find_element_by_tag_name("html").send_keys(Keys.F11)
def tearDown(self):
try:
self.browser.close()
self.browser.quit()
except:
pass
def set_ros_websocket_port_settings(self):
self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-settings")))
settings = self.browser.find_element_by_id("button-ros-master-settings")
self.assertIsNotNone(settings, "Object id=button-ros-master-settings not found")
settings.click()
self.wait.until(EC.presence_of_element_located((By.ID, "input-ros-master-uri")))
uri = self.browser.find_element_by_id("input-ros-master-uri")
self.assertIsNotNone(uri, "Object id=input-ros-master-uri not found")
uri.clear();
uri.send_keys('ws://localhost:9090/')
self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-connect")))
connect = self.browser.find_element_by_id("button-ros-master-connect")
self.assertIsNotNone(connect, "Object id=button-ros-master-connect")
connect.click()
def test_rwt_robot_monitor_plotter(self):
url = '%s/rwt_robot_monitor/plotter.html' % (self.url_base)
rospy.logwarn("Accessing to %s" % url)
self.browser.get(url)
# check settings
self.set_ros_websocket_port_settings()
# wait for /First/pref1a topic
topic_text = ''
while topic_text == '':
time.sleep(1)
self.wait.until(EC.presence_of_element_located((By.ID, "name-select")))
topic = self.browser.find_element_by_id("name-select")
self.assertIsNotNone(topic, "Object id=name-select not found")
topic_text = topic.text
self.assertTrue(u'/First/pref1a' in topic_text)
Select(topic).select_by_value('/First/pref1a')
# wait for test topic
topic_text = ''
while topic_text == '':
time.sleep(1)
self.wait.until(EC.presence_of_element_located((By.ID, "plot-field-select")))
topic = self.browser.find_element_by_id("plot-field-select")
self.assertIsNotNone(topic, "Object id=plot-field-select not found")
topic_text = topic.text
self.assertTrue(u'test' in topic_text)
Select(topic).select_by_value('test')
self.wait.until(EC.presence_of_element_located((By.ID, "add-button")))
add = self.browser.find_element_by_id("add-button")
self.assertIsNotNone(add, "Object id=add-button")
add.click()
# check plot is updated
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "g.y")))
y_axis = self.browser.find_element_by_css_selector("g.y")
self.assertIsNotNone(y_axis, "Object id=y_axis")
y_axis_value = y_axis.text
loop = 0
y_axis_value_updated = 0
while loop < 60:
loop = loop + 1
time.sleep(1)
y_axis = self.browser.find_element_by_css_selector("g.y")
rospy.logwarn("check if tick updated {} < {} ({})".format(y_axis_value, y_axis.text, y_axis_value_updated))
if y_axis_value != y_axis.text:
y_axis_value_updated = y_axis_value_updated + 1
if y_axis_value_updated >= 2:
break
y_axis_value = y_axis.text
self.assertNotEqual(y_axis_value, y_axis.text)
if __name__ == '__main__':
try:
rostest.run('rwt_robot_monitor', CLASSNAME, TestRwtRobotMonitor, sys.argv)
except KeyboardInterrupt:
pass
print("{} exiting".format(CLASSNAME))
|
import unittest
from almdrlib.session import Session
import re
MOCK_AUTH = {
"authentication": {
"user": {
"id": "589B64BB-AE91-4FA9-A6D8-37AC6759BB5D",
"account_id": "2",
"created": {
"at": 1443713420,
"by": "693BA145-78C0-4C77-AC1A-5385461839CD"
},
"modified": {
"at": 1610707251,
"by": "system"
}
},
"account": {
"id": "2",
"name": "Alert Logic, Inc."
},
"token": "123",
}
}
class NameSpace:
def __init__(self, **kwargs):
self.__dict__ = kwargs
class MockResponse():
elapsed = NameSpace(total_seconds=lambda: 123)
def __init__(self, code_body):
(self.code, self.body) = code_body
def json(self):
return self.body
def status_code(self):
return self.code
def raise_for_status(self):
return None
class MockSession():
def __init__(self, map):
self.map = map
def post(self, url):
return MockResponse(self.get_status_body(url))
def request(self, method, url, **kwargs):
print("URL", url)
return MockResponse(self.get_status_body(url))
def get_status_body(self, url):
for k, v in self.map.items():
if re.match(k, url):
return v
return 200, {}
class TestConf(unittest.TestCase):
def test_globalep(self):
session = Session(global_endpoint="http://api.aesolo.com:8100")
assert session.get_url("aetuner", "1234567") == "http://api.aesolo.com:8100"
session = Session(global_endpoint="production")
session._session = MockSession({r".*aims/v1/authenticate":
(200, MOCK_AUTH),
r".*residency/default/services/aetuner/endpoint":
(200, {"aetuner":"api.alertlogic.com"})})
assert session.get_url("aetuner", "1234567") == "https://api.alertlogic.com"
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-9 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix homeserver.
"""
import json
import os
import sys
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 5):
print("Synapse requires Python 3.5 or above.")
sys.exit(1)
# Twisted and canonicaljson will fail to import when this file is executed to
# get the __version__ during a fresh install. That's OK and subsequent calls to
# actually start Synapse will import these libraries fine.
try:
from twisted.internet import protocol
from twisted.internet.protocol import Factory
from twisted.names.dns import DNSDatagramProtocol
protocol.Factory.noisy = False
Factory.noisy = False
DNSDatagramProtocol.noisy = False
except ImportError:
pass
# Use the standard library json implementation instead of simplejson.
try:
from canonicaljson import set_json_library
set_json_library(json)
except ImportError:
pass
__version__ = "1.18.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
# running the packaging tox test.
from synapse.util.patch_inline_callbacks import do_patch
do_patch()
|
import numpy as np
import sys
import gpflow
import VFF
from time import time
from config import *
dim = sys.argv[1]
rep = sys.argv[2]
print('vff: dimension {}, replicate {}'.format(dim, r))
# data
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, 0))
# full_gp
def prodkern(dim):
return gpflow.kernels.Prod([gpflow.kernels.Matern32(1, active_dims=[i], lengthscales=lengthscale)
for i in range(dim)])
k = prodkern(dim)
m = gpflow.gpr.GPR(data['Xtrain'], data['Ytrain'], kern=k)
m.likelihood.variance = noise_var
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
file = open("results/full.csv","a")
file.write("{}, {}, {}, {}".format(dim, rep, marg_lik, mean_log_pred))
file.close()
##########################
# kron
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
a, b = -0.5 * np.ones(dim), 1.5 * np.ones(dim)
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=prodkern(dim).kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
# only optimize q(u)
m.kerns.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/kron.csv')
##########################
# kron_opt
results = pd.DataFrame()
for dim in dimensions:
a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim)
k = prodkern(dim)
for r in range(repeats):
print('kron_opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
for M in num_freqs:
if (2*M-1)**dim:
m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b,
kerns=k.kern_list,
likelihood=gpflow.likelihoods.Gaussian(),
use_two_krons=True)
m.likelihood.variance = noise_var
# build kronecker GP model
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
results.to_csv('results/kron_opt.csv')
##########################
# Sparse
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('Sparse replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
# build sparse GP model
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
start = time()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/sparse_kmeans.csv')
##########################
# Sparse GP opt
results = pd.DataFrame()
for dim in dimensions:
for r in range(repeats):
print('sparse opt replicate ',r,'/',repeats)
data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r))
num_inducing = (2*num_freqs-1)**dim
for M in num_inducing:
if M < 500:
# build sparse GP model
Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_
m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim))
m.likelihood.variance = noise_var
# only optimize Z
m.kern.fixed = True
m.likelihood.fixed = True
start = time()
m.optimize()
marg_lik = m.compute_log_likelihood().squeeze()
mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest']))
t = time() - start
results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik,
mean_log_pred=mean_log_pred, time=t,
num_inducing=M),
ignore_index=True)
# do this inside the loop so we can get partial results if something crashes
results.to_csv('results/sparse_opt.csv')
##########################
#
|
import pandas as pd
from datanator.util import rna_halflife_util
import datetime
import datanator.config.core
import datetime
from pymongo.collation import Collation, CollationStrength
class Halflife(rna_halflife_util.RnaHLUtil):
def __init__(self, cache_dir=None, server=None, src_db=None, protein_col=None,
authDB=None, readPreference=None, username=None, password=None,
verbose=None, max_entries=None, des_db=None, rna_col=None):
"""Init
Args:
cache_dir (:obj:`str`, optional): Cache directory for logs. Defaults to None.
server (:obj:`str`, optional): MongoDB server address. Defaults to None.
db (:obj:`str`, optional): Database where initial uniprot collection resides. Defaults to None.
collection_str (:obj:`str`, optional): name of collection. Defaults to None.
authDB (:obj:`str`, optional): MongoDB authentication database. Defaults to None.
readPreference (:obj:`str`, optional): MongoDB read preference. Defaults to None.
username (:obj:`str`, optional): MongoDB username. Defaults to None.
password (:obj:`str`, optional): MongoDB password. Defaults to None.
verbose (:obj:`bool`, optional): Wheter to display verbose messages. Defaults to None.
max_entries (:obj:`int`, optional): Number of records to be processed. Defaults to None.
uniprot_col_db (:obj:`int`, optional): Database to which new uniprot records will be inserted. Defaults to None.
"""
super().__init__(server=server, username=username, password=password, src_db=src_db,
des_db=des_db, protein_col=protein_col, rna_col=rna_col, authDB=authDB, readPreference=readPreference,
max_entries=max_entries, verbose=verbose)
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
def fill_uniprot(self, url, sheet_name, usercols='B:D', skiprows=[0,1,2],
insertion=True):
"""Fill uniprot colleciton with ordered_locus_name
from excel sheet
Args:
url (:obj:`str`): URL for Excel sheet.
sheet_name (:obj:`str`): sheet name within Excel.
usecols (:obj:`int` or :obj:`list` or :obj:`str`): Return a subset of the columns.
skiprows (:obj:`list`): rows to skip (0-indexed)
insertion (:obj:`bool`): whether to insert new records to uniprot collection.
Return:
(:obj:`pandas.DataFrame`): Dataframe
"""
df = self.make_df(url, sheet_name, usecols=usercols, skiprows=skiprows,
names=['ordered_locus_name', 'half_life', 'r_squared'])
row_count = len(df.index)
if insertion:
for index, row in df.iterrows():
if index == self.max_entries:
break
if index % 10 == 0 and self.verbose:
print("Inserting locus {} out of {} into uniprot collection.".format(index, row_count))
oln = row['ordered_locus_name']
self.fill_uniprot_by_oln(oln)
return df
def fill_rna_halflife(self, df, species):
"""load data into rna_halflife collection
Args:
df (:obj:`pandas.DataFrame`): dataframe to be loaded into the database
species (:obj:`list`): species name and ncbi_id
"""
row_count = len(df.index)
for i, row in df.iterrows():
if i == self.max_entries:
break
if i % 10 == 0 and self.verbose:
print("Processing locus {} out {}".format(i, row_count))
halflives = {}
oln = row['ordered_locus_name']
halflives['halflife'] = row['half_life'] * 60
halflives['r_squared'] = row['r_squared']
halflives['unit'] = 's'
halflives['reference'] = [{'doi': '10.1093/nar/gks1019', 'pubmed_id': '23125364'}]
halflives['growth_medium'] = 'Middlebrook 7H9 with the ADC supplement (Difco) and 0.05% Tween80, at 37 degree celcius.'
halflives['ordered_locus_name'] = oln
halflives['species'] = species[0]
halflives['ncbi_taxonomy_id'] = species[1]
gene_name, protein_name = self.uniprot_query_manager.get_gene_protein_name_by_oln(oln)
if gene_name is not None: # record exists in uniprot collection with gene_name
self.rna_hl_collection.update_one({'gene_name': gene_name},
{'$set': {'modified': datetime.datetime.utcnow()},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
elif (gene_name is None and protein_name is not None and
protein_name != 'Uncharacterized protein'): # record exists in uniprot collection with non-filler protein_name
self.rna_hl_collection.update_one({'protein_name': protein_name},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
query = {'halflives.ordered_locus_name': oln}
doc = self.rna_hl_collection.find_one(filter=query, collation=self.collation)
if doc is not None:
self.rna_hl_collection.update_one({'halflives.ordered_locus_name': oln},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
doc = {'halflives': [halflives], 'modified': datetime.datetime.utcnow(),
'gene_name': gene_name, 'protein_name': protein_name}
self.rna_hl_collection.insert_one(doc)
def main():
src_db = 'datanator'
des_db = 'datanator'
rna_col = 'rna_halflife'
protein_col = 'uniprot'
username = datanator.config.core.get_config()[
'datanator']['mongodb']['user']
password = datanator.config.core.get_config(
)['datanator']['mongodb']['password']
server = datanator.config.core.get_config(
)['datanator']['mongodb']['server']
src = Halflife(server=server, src_db=src_db,
protein_col=protein_col, authDB='admin', readPreference='nearest',
username=username, password=password, verbose=True, max_entries=float('inf'),
des_db=des_db, rna_col=rna_col)
url = 'https://oup.silverchair-cdn.com/oup/backfile/Content_public/Journal/nar/41/1/10.1093/nar/gks1019/2/gks1019-nar-00676-a-2012-File003.xlsx?Expires=1578425844&Signature=ZRFUxLdn4-vaBt5gQci~0o56KqyR9nJj9i32ig5X6YcfqiJeV3obEq8leHGdDxx6w~KABgewiQ66HTB7gmuG~2GL-YgxPKYSjt17WrYMkc-0ibw6TMlTvWZZfvw-lPe~wvpmVfNEXnTbP7jHyNLu9jeJ6yhoXvgIyQtzA5PbEI1fyXEgeZzOKMltmITqL3g3APsPsagCTC66rwrBT23Aghh6D314uilT2DZHCc68MH2nyV~qAhFqIQiOj-7VTEKqkDPvPYvuE2KNKXdvW23gk100YV~58ozbt8ijRz5Gr5gPtE~f1Ab5l260EIbWHJNabMRleInJQqUIDPFN4C38PQ__&Key-Pair-Id=APKAIE5G5CRDK6RD3PGA'
# df = src.fill_uniprot(url, 'Supplementary Table 1', insertion=False)
# src.fill_rna_halflife(df, ['Mycobacterium tuberculosis H37Rv', 83332])
df = src.fill_uniprot(url, 'Supplementary Table 2', skiprows=list(range(0,6)))
src.fill_rna_halflife(df, ['Mycolicibacterium smegmatis MC2 155', 246196])
if __name__ == '__main__':
main()
|
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Flow, Document
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.parsers import set_client_cli_parser
from typing import Dict
from jina import DocumentArray, Executor, requests
class DumpExecutor(Executor):
@requests
def dump(self, docs: DocumentArray, parameters: Dict, **kwargs):
shards = int(parameters['shards'])
dump_path = parameters['dump_path']
shard_size = len(docs) / shards
os.makedirs(dump_path, exist_ok=True)
for i in range(shards):
dump_file = f'{dump_path}/{i}.ndjson'
docs_to_be_dumped = docs[int(i * shard_size) : int((i + 1) * shard_size)]
docs_to_be_dumped.save(dump_file)
class ErrorExecutor(Executor):
@requests
def dump(self, docs: DocumentArray, **kwargs):
if len(docs) > 0:
assert False
class ReloadExecutor(Executor):
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# backwards compatibility
assert 'dump_path' in kwargs['runtime_args'].keys()
if dump_path is not None:
shard_id = getattr(self.runtime_args, 'pea_id', None)
shard_dump_path = os.path.join(dump_path, f'{shard_id}.ndjson')
self._docs = DocumentArray.load(shard_dump_path)
else:
self._docs = DocumentArray()
@requests
def search(self, docs: DocumentArray, **kwargs):
docs.clear()
docs.extend(self._docs)
class MergeExecutor(Executor):
@requests
def merge(self, docs_matrix: DocumentArray, **kwargs):
merged_docs = DocumentArray()
for docs in docs_matrix:
merged_docs.extend(docs)
return merged_docs
def get_client(port):
args = set_client_cli_parser().parse_args(
['--host', 'localhost', '--port', str(port)]
)
return Client(args)
def get_documents(count=10, emb_size=7):
for i in range(count):
yield Document(
id=i,
text=f'hello world {i}',
embedding=np.random.random(emb_size),
tags={'tag_field': f'tag data {i}'},
)
def path_size(dump_path):
return (
sum(
f.stat().st_size
for f in Path(dump_path).glob('**/*')
if f.is_file()
)
/ 1e6
)
@pytest.mark.repeat(20)
@pytest.mark.parametrize('shards', [5, 3, 1])
@pytest.mark.parametrize('nr_docs', [7])
@pytest.mark.parametrize('emb_size', [10])
def test_dump_reload(tmpdir, shards, nr_docs, emb_size, times_to_index=2):
"""showcases using replicas + dump + rolling update with independent clients"""
with Flow().add(uses=DumpExecutor, name='dump_exec').add(
uses=ErrorExecutor, name='error_exec'
) as flow_dump:
merge_executor = MergeExecutor if shards > 1 else None
with Flow().add(
uses=ReloadExecutor,
name='reload_exec',
replicas=2,
shards=shards,
uses_after=merge_executor,
) as flow_reload:
for run_number in range(times_to_index):
dump_path = os.path.join(tmpdir, f'dump-{run_number}')
client_dbms = get_client(flow_dump.port_expose)
client_query = get_client(flow_reload.port_expose)
docs = list(
get_documents(
count=nr_docs * (run_number + 1),
emb_size=emb_size,
)
)
with TimeContext(f'### dumping {len(docs)} docs'):
client_dbms.post(
on='/dump',
inputs=docs,
target_peapod='dump_exec',
parameters={'dump_path': dump_path, 'shards': shards},
)
print(f'### dump path size: {path_size(dump_path)} MBs')
with TimeContext(f'### rolling update on {len(docs)}'):
# flow object is used for ctrl requests
flow_reload.rolling_update('reload_exec', dump_path)
for _ in range(5):
result = client_query.post(
on='/search', inputs=[Document()], return_results=True
)
assert len(docs) == len(result[0].docs)
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from threading import Thread
import functools
_thread_by_func = {}
class TimeoutException(Exception):
"""
Raised when a function runtime exceeds the limit set.
"""
pass
class ThreadMethod(Thread):
"""
Descendant of `Thread` class.
Run the specified target method with the specified arguments.
Store result and exceptions.
From: https://code.activestate.com/recipes/440569/
"""
def __init__(self, target, args, kwargs):
Thread.__init__(self)
self.setDaemon(True)
self.target, self.args, self.kwargs = target, args, kwargs
self.start()
def run(self):
try:
self.result = self.target(*self.args, **self.kwargs)
except Exception as e:
self.exception = e
else:
self.exception = None
def timeout(timeout):
"""
A decorator to timeout a function. Decorated method calls are executed in a separate new thread
with a specified timeout.
Also check if a thread for the same function already exists before creating a new one.
Note: Compatible with Windows (thread based).
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs)
if key in _thread_by_func:
# A thread for the same function already exists.
worker = _thread_by_func[key]
else:
worker = ThreadMethod(func, args, kwargs)
_thread_by_func[key] = worker
worker.join(timeout)
if worker.is_alive():
raise TimeoutException()
del _thread_by_func[key]
if worker.exception:
raise worker.exception
else:
return worker.result
return wrapper
return decorator
|
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate import exceptions
from designate import objects
from designate import policy
from designate.scheduler.filters import base
LOG = logging.getLogger(__name__)
class PoolIDAttributeFilter(base.Filter):
"""This allows users with the correct role to specify the exact pool_id
to schedule the supplied zone to.
This is supplied as an attribute on the zone
.. code-block:: python
:emphasize-lines: 3
{
"attributes": {
"pool_id": "794ccc2c-d751-44fe-b57f-8894c9f5c842"
},
"email": "user@example.com",
"name": "example.com."
}
The pool is loaded to ensure it exists, and then a policy check is
performed to ensure the user has the correct role.
.. warning::
This should only be enabled if required, as it will raise a
403 Forbidden if a user without the correct role uses it.
"""
name = 'pool_id_attribute'
"""Name to enable in the ``[designate:central:scheduler].filters`` option
list
"""
def filter(self, context, pools, zone):
"""Attempt to load and set the pool to the one provided in the
Zone attributes.
:param context: :class:`designate.context.DesignateContext` - Context
Object from request
:param pools: :class:`designate.objects.pool.PoolList` - List of pools
to choose from
:param zone: :class:`designate.objects.zone.Zone` - Zone to be created
:return: :class:`designate.objects.pool.PoolList` -- A PoolList with
containing a single pool.
:raises: Forbidden, PoolNotFound
"""
try:
if zone.attributes.get('pool_id'):
pool_id = zone.attributes.get('pool_id')
try:
pool = self.storage.get_pool(context, pool_id)
except Exception:
return objects.PoolList()
policy.check('zone_create_forced_pool', context, pool)
if pool in pools:
pools = objects.PoolList()
pools.append(pool)
return pools
else:
return pools
except exceptions.RelationNotLoaded:
return pools
|
from .operation import Operation
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Utilities related to the ORM."""
__all__ = ('load_code', 'load_computer', 'load_group', 'load_node')
def load_entity(
entity_loader=None, identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True
):
# pylint: disable=too-many-arguments
"""
Load an entity instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Code
:param pk: pk of a Code
:param uuid: uuid of a Code, or the beginning of the uuid
:param label: label of a Code
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:returns: the Code instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Code is found
:raise aiida.common.MultipleObjectsError: if more than one Code was found
"""
from aiida.orm.utils.loaders import OrmEntityLoader, IdentifierType
if entity_loader is None or not issubclass(entity_loader, OrmEntityLoader):
raise TypeError(f'entity_loader should be a sub class of {type(OrmEntityLoader)}')
inputs_provided = [value is not None for value in (identifier, pk, uuid, label)].count(True)
if inputs_provided == 0:
raise ValueError("one of the parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
elif inputs_provided > 1:
raise ValueError("only one of parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
if pk is not None:
if not isinstance(pk, int):
raise TypeError('a pk has to be an integer')
identifier = pk
identifier_type = IdentifierType.ID
elif uuid is not None:
if not isinstance(uuid, str):
raise TypeError('uuid has to be a string type')
identifier = uuid
identifier_type = IdentifierType.UUID
elif label is not None:
if not isinstance(label, str):
raise TypeError('label has to be a string type')
identifier = label
identifier_type = IdentifierType.LABEL
else:
identifier = str(identifier)
identifier_type = None
return entity_loader.load_entity(
identifier, identifier_type, sub_classes=sub_classes, query_with_dashes=query_with_dashes
)
def load_code(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Code instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Code
:param pk: pk of a Code
:param uuid: uuid of a Code, or the beginning of the uuid
:param label: label of a Code
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Code instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Code is found
:raise aiida.common.MultipleObjectsError: if more than one Code was found
"""
from aiida.orm.utils.loaders import CodeEntityLoader
return load_entity(
CodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_computer(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Computer instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Computer
:param pk: pk of a Computer
:param uuid: uuid of a Computer, or the beginning of the uuid
:param label: label of a Computer
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Computer instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Computer is found
:raise aiida.common.MultipleObjectsError: if more than one Computer was found
"""
from aiida.orm.utils.loaders import ComputerEntityLoader
return load_entity(
ComputerEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_group(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Group instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Group
:param pk: pk of a Group
:param uuid: uuid of a Group, or the beginning of the uuid
:param label: label of a Group
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Group instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Group is found
:raise aiida.common.MultipleObjectsError: if more than one Group was found
"""
from aiida.orm.utils.loaders import GroupEntityLoader
return load_entity(
GroupEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_node(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a node by one of its identifiers: pk or uuid. If the type of the identifier is unknown
simply pass it without a keyword and the loader will attempt to infer the type
:param identifier: pk (integer) or uuid (string)
:param pk: pk of a node
:param uuid: uuid of a node, or the beginning of the uuid
:param label: label of a Node
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:returns: the node instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Node is found
:raise aiida.common.MultipleObjectsError: if more than one Node was found
"""
from aiida.orm.utils.loaders import NodeEntityLoader
return load_entity(
NodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides tools to compute class activation map.
"""
# Imports
import logging
import skimage
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as func
# Global parameters
logger = logging.getLogger("pynet")
class FeatureExtractor(object):
""" Class for extracting activations and registering gradients from
targetted intermediate layers.
"""
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for name, module in self.model._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs(object):
""" Class for making a forward pass, and getting:
1- the network output.
2- activations from intermeddiate targetted layers.
3- gradients from intermeddiate targetted layers.
"""
def __init__(self, model, target_layers):
self.model = model
self.feature_extractor = FeatureExtractor(
self.model.features, target_layers)
def get_activations_gradient(self):
return self.feature_extractor.gradients
def get_activations(self, x):
return self.feature_extractor(x)
def __call__(self, x):
if hasattr(self.model, "pre"):
x = self.model.pre(x)
target_activations, output = self.feature_extractor(x)
if hasattr(self.model, "pool"):
output = self.model.pool(output)
output = output.view(output.size(0), -1)
output = self.model.classifier(output)
return target_activations, output
class GradCam(object):
""" Class for computing class activation map.
"""
def __init__(self, model, target_layers, labels, top=1):
self.model = model
self.labels = labels
self.top = top
self.model.eval()
self.extractor = ModelOutputs(self.model, target_layers)
def forward(self, input):
return self.model(input)
def __call__(self, input):
features, output = self.extractor(input)
pred_prob = func.softmax(output, dim=1).data.squeeze()
probs, indices = pred_prob.sort(0, True)
probs = probs.data.numpy()
indices = indices.data.numpy()
heatmaps = {}
for cnt, (prob, index) in enumerate(zip(probs, indices)):
if cnt == self.top:
break
label = self.labels[str(index)][1]
line = "{0:.3f} -> {1}".format(prob, label)
logger.info(line)
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
one_hot = torch.sum(one_hot * output)
self.model.features.zero_grad()
self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
gradients = self.extractor.get_activations_gradient()[-1]
gradients = gradients.cpu().data.numpy()
pooled_gradients = np.mean(gradients, axis=(0, 2, 3))
activations = features[-1]
activations = activations.cpu().data.numpy()
for cnt, weight in enumerate(pooled_gradients):
activations[:, cnt] *= weight
heatmap = np.mean(activations, axis=1).squeeze()
heatmap = np.maximum(heatmap, 0)
heatmap -= np.min(heatmap)
heatmap /= np.max(heatmap)
heatmap_highres = skimage.transform.resize(
heatmap, input.shape[2:])
heatmaps[label] = (input, heatmap, heatmap_highres)
return heatmaps
|
# -*- coding: utf-8 -*-
from PySide2 import QtCore, QtGui, QtWidgets
import json
import core_functions as cf
import numpy as np
from UI_labeled_slider import LabeledSlider
class Ui_AssignGroup(object):
def setupUi(self, AssignGroups):
# Note: this is not how it should be done but currently I don't know
# how to do it differently. This is only needed to be able to emit
# signals to the main window
AssignGroups.setObjectName("AssignGroups")
AssignGroups.setWindowTitle("Group Assignement Dialog")
AssignGroups.resize(509, 317)
AssignGroups.setStyleSheet(
"QWidget {\n"
" background-color: rgb(44, 49, 60);\n"
" color: rgb(255, 255, 255);\n"
' font: 63 10pt "Segoe UI";\n'
"}\n"
"QPushButton {\n"
" border: 2px solid rgb(52, 59, 72);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(57, 65, 80);\n"
" border: 2px solid rgb(61, 70, 86);\n"
"}\n"
"QPushButton:pressed {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(43, 50, 61);\n"
"}\n"
"QPushButton:checked {\n"
" background-color: rgb(35, 40, 49);\n"
" border: 2px solid rgb(85, 170, 255);\n"
"}"
"QLineEdit {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QDoubleSpinBox {\n"
" border: 2px solid rgb(61, 70, 86);\n"
" border-radius: 5px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
)
self.verticalLayout = QtWidgets.QVBoxLayout(AssignGroups)
self.verticalLayout.setContentsMargins(25, 10, 25, 10)
self.verticalLayout.setObjectName("verticalLayout")
# # Device settings
# self.device_settings_header_label = QtWidgets.QLabel(AssignGroups)
# self.device_settings_header_label.setMinimumSize(QtCore.QSize(0, 20))
# self.device_settings_header_label.setStyleSheet(
# 'font: 75 bold 10pt "Segoe UI";'
# )
# self.device_settings_header_label.setObjectName("device_settings_header_label")
# self.verticalLayout.addWidget(self.device_settings_header_label)
# self.header_line_1 = QtWidgets.QFrame()
# self.header_line_1.setFrameShape(QtWidgets.QFrame.HLine)
# self.header_line_1.setFrameShadow(QtWidgets.QFrame.Sunken)
# self.verticalLayout.addWidget(self.header_line_1)
# self.header_line_1.setStyleSheet(
# "QFrame {\n" " border: 2px solid rgb(52, 59, 72);\n" "}\n"
# )
# self.manualRowCountGridLayout = 1
# Define dialog in which parameters should be entered
# dialog = QtWidgets.QDialog()
# dialog.setWindowTitle("Group Assignement Dialog")
# Select the scan that shall be evaluated
if not self.include_all_scans:
self.select_scan_number_label = QtWidgets.QLabel()
self.select_scan_number_label.setObjectName("select_scan_number_label")
self.verticalLayout.addWidget(self.select_scan_number_label)
self.select_scan_number_ComboBox = QtWidgets.QComboBox()
self.select_scan_number_ComboBox.setObjectName(
"select_scan_number_ComboBox"
)
for i in range(self.parameters["no_of_scans"]):
self.select_scan_number_ComboBox.addItem(str(int(i + 1)))
self.select_scan_number_ComboBox.setCurrentIndex(0)
self.verticalLayout.addWidget(self.select_scan_number_ComboBox)
# Select the number of groups to define
self.no_groups_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.no_groups_label)
self.no_groups_LabeledSlider = LabeledSlider(
1,
int(np.size(np.unique(self.parameters["device_number"]))),
interval=1,
orientation=QtCore.Qt.Horizontal,
)
self.verticalLayout.addWidget(self.no_groups_LabeledSlider)
self.available_devices_label = QtWidgets.QLabel()
self.verticalLayout.addWidget(self.available_devices_label)
# if np.size(self.paths) == 1:
# verticalLayout.addWidget(self.no_groups_LabeledSlider)
# Define the group assignement fields
self.group_definition_gridLayout = QtWidgets.QGridLayout()
self.group_definition_gridLayout.setSpacing(10)
# Group names and its container
self.group_name_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_name_label, 1, 0, 1, 1)
self.group_name_LineEdit_container = np.empty(0, dtype="object")
self.group_name_LineEdit_container = np.append(
self.group_name_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.group_name_LineEdit_container[0], 2, 0
)
# Enter device numbers and its container
self.device_assignment_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.device_assignment_label, 1, 1, 1, 1
)
self.device_assignment_LineEdit_container = np.empty(0, dtype="object")
self.device_assignment_LineEdit_container = np.append(
self.device_assignment_LineEdit_container, QtWidgets.QLineEdit()
)
self.group_definition_gridLayout.addWidget(
self.device_assignment_LineEdit_container[0], 2, 1
)
# Assign a spectrum file to the group
if not self.autodetect_spectrum:
self.spectrum_file_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(
self.spectrum_file_label, 1, 2, 1, 1
)
self.group_spectrum_PushButton_container = np.empty(0, dtype="object")
self.group_spectrum_PushButton_container = np.append(
self.group_spectrum_PushButton_container, QtWidgets.QPushButton("")
)
self.group_spectrum_PushButton_container[0].setStyleSheet(
"background-color: red"
)
self.group_definition_gridLayout.addWidget(
self.group_spectrum_PushButton_container[0], 2, 2
)
# Definition of a plotting color for the group
self.group_color_label = QtWidgets.QLabel()
self.group_definition_gridLayout.addWidget(self.group_color_label, 1, 3, 1, 1)
self.group_colors_PushButton_container = np.empty(0, dtype="object")
self.group_colors_PushButton_container = np.append(
self.group_colors_PushButton_container, QtWidgets.QPushButton("")
)
self.group_colors_PushButton_container[0].setStyleSheet(
"background-color: " + str(self.group_color[0])
)
self.group_definition_gridLayout.addWidget(
self.group_colors_PushButton_container[0], 2, 3
)
# Define the bottom pushbuttons that allows to close and save the dialog
self.leave_horizontalLayout = QtWidgets.QHBoxLayout()
self.close_pushButton = QtWidgets.QPushButton("Close")
self.save_pushButton = QtWidgets.QPushButton("Save")
self.leave_horizontalLayout.addWidget(self.close_pushButton)
self.leave_horizontalLayout.addWidget(self.save_pushButton)
self.verticalLayout.addLayout(self.group_definition_gridLayout)
self.verticalLayout.addLayout(self.leave_horizontalLayout)
self.setLayout(self.verticalLayout)
self.retranslateUi(AssignGroups)
QtCore.QMetaObject.connectSlotsByName(AssignGroups)
def retranslateUi(self, AssignGroups):
_translate = QtCore.QCoreApplication.translate
AssignGroups.setWindowTitle(_translate("AssignGroups", "Assign Groups"))
if not self.include_all_scans:
self.select_scan_number_label.setText(
_translate("AssignGroups", "Select Scan")
)
self.no_groups_label.setText(
_translate("AssignGroups", "Select Number of Groups")
)
self.available_devices_label.setText(
_translate(
"AssignGroups",
"Available Devices for Assignment "
+ str(self.parameters["device_number"]),
)
)
self.group_name_label.setText(_translate("AssignGroups", "Group Name"))
self.device_assignment_label.setText(
_translate("AssignGroups", "Assign Devices (seperated by ,)")
)
self.group_color_label.setText(_translate("AssignGroups", "Color"))
if not self.autodetect_spectrum:
self.spectrum_file_label.setText(_translate("AssignGroups", "Spectrum"))
|
# Dictionary
class Dict_word_jumbler(object):
def __init__(self):
self.dict = self.build_dict()
def build_dict(self):
""""Build a dictionary to hold all of the words/letters"""
dic = {}
f = open("/usr/share/dict/words", "r")
word_list = f.readlines()
for word in word_list:
word = word.strip().lower()
words = ''.join(sorted(word))
dic[words] = word
return dic
def unscramble(self, words):
"""Build a function to unscramble the letters"""
for word in words:
word = word.strip().lower()
word_sorted = ''.join(sorted(word))
if word_sorted in self.dict:
unscrambled = self.dict[word_sorted]
print(unscrambled)
else:
return None
if __name__ == '__main__':
# Cartoon prompt for final jumble:
# "Farley rolled on the barn floor because of his __-______."
words = ['tefon', 'sokik', 'niumem', 'siconu']
jumble = Dict_word_jumbler()
jumble.unscramble(words)
# # "A bad way for a lawyer to learn the criminal justice system: _____ and _____."
# words = ['laisa', 'laurr', 'bureek', 'prouot']
# jumble = Dict_word_jumbler()
# jumble.unscramble(words)
# # Cartoon prompt for final jumble: "What a dog house is: A ____ ___."
# words = ['TARFD', 'JOBUM', 'TENJUK', 'LETHEM']
# jumble = Dict_word_jumbler()
# jumble.unscramble(words)
|
import pickle
dict1 = {'Python ':90,'Java ':95,'C++ ':85}
f = open('bin)file.dat','wb')
pickle.dump(dict1,f)
f.close()
|
"""
Testing ResGraphNet
"""
import datetime
import numpy as np
import pandas as pd
import torch
import os
import os.path as osp
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
import func.cal as cal
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# device = "cpu"
l_x = 60 # Data sequence length
l_y = 1 # Label sequence length
lr = 0.0001 # Learning rate
weight_decay = 5e-4
epochs = 4000
hidden_dim = 64
gnn_style = "ResGraphNet"
save_fig = True # Whether to save picture
save_txt = False # Whether to save txt
save_np = True # Whether to save np file
save_model = True # Whether to save network model
ratio_train = 0.5 # Proportion of training datasets
fig_size = (16, 12)
ts_name_all = ["cli_dash", "HadCRUT5", "temp_month", "temp_year", "elect", "traffic", "sales"]
ts_name_folder = "HadCRUT5" # Name of the folder where the data resides
ts_name = "HadCRUT5_global" # Name of the selected time series
iv = 1 # sampling interval, used for plotting curves
way = "mean" # The style of plot curves of real data and predict results
x_address = osp.join("../datasets", ts_name_folder, ts_name + ".npy")
x = np.load(x_address)
num = x.shape[0] # The length of time series
result_address = osp.join("../result", ts_name, "ResGraphNet")
if not(osp.exists(result_address)):
os.makedirs(result_address)
num_train = int(ratio_train * num)
data_train, data_test = x[:num_train], x[num_train:num] # get training dataset and test dataset
len_interp = l_y + 6
data_test_ = np.array(data_test[:-l_y].tolist() + data_test[-len_interp-l_y:-l_y].tolist() + data_test[-l_y:].tolist())
# Using Graph Neural network, prepare data information
x_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style="arr")
x_test, y_test = cal.create_inout_sequences(data_test_, l_x, l_y, style="arr")
x_train = torch.from_numpy(x_train).float().to(device)
x_test = torch.from_numpy(x_test).float().to(device)
y_train = torch.from_numpy(y_train).float().to(device)
y_test = torch.from_numpy(y_test).float().to(device)
num_nodes = x_train.shape[0] + x_test.shape[0]
num_train = x_train.shape[0]
x = torch.cat((x_train, x_test), dim=0)
y = torch.cat((y_train, y_test), dim=0)
adm = cal.path_graph(num_nodes)
# adm = cal.ts_un(num_nodes, 6)
edge_index, edge_weight = cal.tran_adm_to_edge_index(adm)
train_index = torch.arange(num_train, dtype=torch.long)
test_index = torch.arange(num_train, num_nodes, dtype=torch.long)
train_mask = cal.index_to_mask(train_index, num_nodes).to(device)
test_mask = cal.index_to_mask(test_index, num_nodes).to(device)
# Using ResGraphNet, predicting time series (The Proposed Network Model)
model = cal.GNNTime(l_x, hidden_dim, l_y, edge_weight, gnn_style, num_nodes).to(device)
criterion = torch.nn.MSELoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
edge_index = edge_index.to(device)
start_time = datetime.datetime.now()
print("Running, {}".format(gnn_style))
for epoch in range(epochs):
model.train()
optimizer.zero_grad()
output = model(x, edge_index)
output_train, y_train = output[train_mask], y[train_mask]
train_loss = criterion(output_train[:, -1], y_train[:, -1])
train_loss.backward()
optimizer.step()
model.eval()
y_test_1 = y[test_mask][:-len_interp-l_y, :]
y_test_2 = y[test_mask][-l_y:, :]
y_test = torch.cat((y_test_1, y_test_2), dim=0)
output_test = output[test_mask][:-len_interp, :]
test_loss = criterion(output_test[:, -1], y_test[:, -1])
train_true = y_train.detach().cpu().numpy()[:, -1]
train_predict = output_train.detach().cpu().numpy()[:, -1]
test_true = y_test.detach().cpu().numpy()[:, -1]
test_predict = output_test.detach().cpu().numpy()[:, -1]
r2_train = cal.get_r2_score(train_predict, train_true, axis=1)
r2_test = cal.get_r2_score(test_predict, test_true, axis=1)
if (epoch + 1) % 100 == 0:
print("Epoch: {:05d} Loss_Train: {:.5f} Loss_Test: {:.5f} R2_Train: {:.7f} R2_Test: {:.7f}".
format(epoch + 1, train_loss.item(), test_loss.item(), r2_train, r2_test))
# predict and plot future time series
plot_predict = test_predict[-l_y:]
plot_true = test_true[-l_y:]
mse_plot = np.mean(np.square(plot_predict - plot_true))
print("mse_plot: {}".format(mse_plot))
cal.plot_spiral(plot_predict) # predict results in the coming year
if save_fig:
plt.savefig(osp.join(result_address, "future_predict.png"))
cal.plot_spiral(plot_true) # true data in the coming year
if save_fig:
plt.savefig(osp.join(result_address, "future_true.png"))
# calculate running time
end_time = datetime.datetime.now()
run_time = end_time - start_time # The running time of program
# save model and numpy.file
if save_model:
torch.save(model, osp.join(result_address, "{}.pkl".format(gnn_style)))
if save_np:
np.save(osp.join(result_address, "train_true.npy"), train_true)
np.save(osp.join(result_address, "test_true.npy"), test_true)
np.save(osp.join(result_address, "train_predict_{}.npy".format(gnn_style)), train_predict)
np.save(osp.join(result_address, "test_predict_{}.npy".format(gnn_style)), test_predict)
# plot the error and results
e_gnn = test_true - test_predict
cal.plot_distribute(e_gnn, 40, 4, x_name="e")
if save_fig:
plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + "_error_distribution.png"))
cal.plot_result(train_true, test_true, train_predict, test_predict, iv, way, fig_size)
if save_fig:
plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + ".png"))
# print indicators
rmse_train = cal.get_rmse(train_predict, train_true)
rmse_test = cal.get_rmse(test_predict, test_true)
r2_train = cal.get_r2_score(train_predict, train_true, axis=1)
r2_test = cal.get_r2_score(test_predict, test_true, axis=1)
print("{}: RMSE_Train={:.5f} RMSE_Test={:.5f} R2_Train={:.7f} R2_Test={:.7f}".
format(gnn_style, rmse_train, rmse_test, r2_train, r2_test))
# The output results of each model are appended to the file
if save_txt:
info_txt_address = osp.join(result_address, "ResGraphNet_result.txt") # txt file address for saving parameter information
info_df_address = osp.join(result_address, "ResGraphNet_result.csv") # csv file address for saving parameter information
f = open(info_txt_address, 'a')
if osp.getsize(info_txt_address) == 0: # add the name of each feature in the first line of the text
f.write("gnn_style r2_test r2_train run_time l_x l_y hidden_dim lr epochs\n")
f.write(str(gnn_style) + " ")
f.write(str(r2_test) + " ")
f.write(str(r2_train) + " ")
f.write(str(run_time) + " ")
f.write(str(l_x) + " ")
f.write(str(l_y) + " ")
f.write(str(hidden_dim) + " ")
f.write(str(lr) + " ")
f.write(str(epochs) + " ")
f.write("\n") # Prepare for next running
f.close() # close file
info = np.loadtxt(info_txt_address, dtype=str)
columns = info[0, :].tolist()
values = info[1:, :]
info_df = pd.DataFrame(values, columns=columns)
info_df.to_csv(info_df_address)
print()
plt.show()
print()
|
# -*- coding: utf-8 -*-
"""
===================== OpenStereotaxy module for FreeCAD =======================
This Python module for FreeCAD allows the user to calculate the chamber-centered
coordinates of the target structure(s). Based on this data, the module will
generate surface meshes (exported in .stl format ready for 3D-printing) of the
following custom parts:
1) a drill guide for performing craniotomy
2) a guide tube guide grid
3) a microdrive system
Written by Aidan Murphy, PhD (murphyap@nih.gov)
"""
import numpy as np
from scipy.io import loadmat
# ================= Load data from Slicer files
def LoadChamberCoords(TransformFile, TargetsFile):
x = loadmat(TransformFile, squeeze_me=True) # Load transform matrix
data = x['AffineTransform_double_3_3']
TransformMatrix = np.reshape(data[0:9], [3,3]) # Reshape array
TransformMatrix = np.append(TransformMatrix, [0,0,0,1])
Ras2lps = [-1,-1,1,1]
Tform = TransformMatrix.*Ras2lps # Convert transfrom matrix from LPS to RAS
fid = open(TargetsFile, 'r') # Load target coordinate data
for line in fid.readlines()[3:]: # For each target...
Coords = line.split(",")
Coords(f).Name = Coords[11]
Coords(f).Description = Coords[12]
Coords(f).TformFile = TransformFile
Coords(f).TformMatrix = Tform
Coords(f).XYZ_RAS = Coords[1:4] # Get the raw MR-volume coordinates
XYZ_Chamber = Tform*[Coords(f).XYZ_RAS,1]' # Apply transform
ChamberCoords[n] = -XYZ_Chamber(1:3)' # Return chamber-centered coordinates
return ChamberCoords
# ================= Move electrode holes
def UpdateHoleLocations(ChamberCoords):
TransformFile = '/Volumes/RAWDATA/murphya/MRI/StevieRay/Surgery2_Plan/ManualTransform_LH_V2.mat'
TargetsFile = '/Volumes/RAWDATA/murphya/MRI/StevieRay/Surgery2_Plan/SurgicalTargets.fcsv'
ChamberCoords = LoadChamberCoords(TransformFile, TargetsFile)
|
import numpy as np
def FNS(scores):
domination = np.all(scores[:, None, :] <= scores[None, :, :], axis=2) # domination[i, j] = "i dominuje j"
domination &= np.any(scores[:, None, :] < scores[None, :, :], axis=2)
Nx = domination.sum(0)
Pf = []
ranks = np.zeros(scores.shape[0])
r = 0
Q = np.nonzero(Nx == 0)[0]
while Q.size > 0:
Nx[Q] = -1
Pf.append(Q)
ranks[Q] = r
r += 1
for i in Q:
Nx[domination[i, :]] -= 1
Q = np.nonzero(Nx == 0)[0]
return Pf, ranks
def crowding_distance(scores):
indices = np.argsort(scores, 0)
sorted_scores = np.take_along_axis(scores, indices, 0)
cd = np.zeros(scores.shape[0])
for k in range(scores.shape[1]):
if sorted_scores[-1, k] != sorted_scores[0, k]:
cd[indices[[0, -1], k]] = np.inf
cd[indices[1:-1, k]] += (sorted_scores[2:, k] - sorted_scores[:-2, k]) / (
sorted_scores[-1, k] - sorted_scores[0, k])
return cd
def random_population(d, n, x_min, x_max):
return np.hstack([np.random.uniform(x_min, x_max, (n, d))])
def tournament_selection(ranks, dists, n):
candidates = np.random.choice(n, (n, 2), replace=True)
mask = np.where(
ranks[candidates[:, 0]] == ranks[candidates[:, 1]],
dists[candidates[:, 0]] > dists[candidates[:, 1]],
ranks[candidates[:, 0]] < ranks[candidates[:, 1]]
)
result = candidates[:, 1]
result[mask] = candidates[mask, 0]
return result
def crossover(x, p, eta): # simulated binary crossover
n, d = x.shape
l = n // 2
mask = np.random.random((l, d)) <= p
m = np.sum(mask)
mi = np.random.random(m)
beta = np.where(
mi < 0.5,
np.power(2 * mi, 1. / (eta + 1.)),
np.power(1. / (2. * (1 - mi)), 1. / (eta + 1.))
)
c1 = x[:l, :].copy()
c2 = x[l:, :].copy()
c1[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]
c2[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask]
return np.vstack([c1, c2])
def mutation(x, x_min, x_max, p, eta): # polynomial mutation
n, d = x.shape
mask = np.random.random((n, d)) <= p
if isinstance(x_min, np.ndarray):
x_min = np.repeat(x_min[None, :], n, axis=0)
x_min = x_min[mask]
if isinstance(x_max, np.ndarray):
x_max = np.repeat(x_max[None, :], n, axis=0)
x_max = x_max[mask]
m = np.sum(mask)
mi = np.random.random(m)
beta = np.where(
mi < 0.5,
np.power(2 * mi, 1. / (eta + 1.)) - 1.,
1. - np.power(2. * (1 - mi), 1. / (eta + 1.))
)
y = x.copy()
y[mask] = np.where(
mi < 0.5,
x[mask] + beta * (x[mask] - x_min),
x[mask] + beta * (x_max - x[mask])
)
return y
def elitist_selection(fronts, dists, to_take):
taken = []
for front in fronts:
if len(front) <= to_take:
taken += list(front)
if len(front) == to_take:
break
to_take -= len(front)
else:
indices = np.argsort(-dists[front])[:to_take]
taken += list(front[indices])
break
return taken
def constraint_violation(constraints):
n, d = constraints.shape
sort_indices = np.argsort(constraints, 0)
violations = np.zeros(n)
for i in range(d):
values, counts = np.unique(constraints[:, i], return_counts=True) # unikalne wartości są zwracane posortowane
counts = np.cumsum(counts)
counts = list(counts)
if values[0] != 0:
counts = [0] + counts
for rank, (j, k) in enumerate(zip([0] + counts, counts + [len(counts)])):
violations[sort_indices[j:k, i]] += rank
return violations
def evaluation(objective, n_constraints, population):
obj_results = objective(population)
constraint_values = obj_results[:, -n_constraints:]
violation_measure = constraint_violation(constraint_values)
scores = np.concatenate([obj_results[:, :-n_constraints], violation_measure[:, None]], 1)
return scores
def split_and_select(population, scores, n_f, n_inf):
dists = crowding_distance(scores)
mask_f = scores[:, -1] == 0
population_f = population[mask_f, :]
scores_f = scores[mask_f, :]
dists_f = dists[mask_f]
population_inf = population[~mask_f, :]
scores_inf = scores[~mask_f, :]
dists_inf = dists[~mask_f]
s_f = population_f.shape[0]
s_inf = population_inf.shape[0]
n = n_f + n_inf
if s_f < n_f:
to_take_f = s_f
to_take_inf = n - s_f
elif s_inf < n_inf:
to_take_inf = s_inf
to_take_f = n - s_inf
else:
to_take_f = n_f
to_take_inf = n_inf
fronts_f, ranks_f = FNS(scores_f)
taken_f = elitist_selection(fronts_f, dists_f, to_take_f)
fronts_inf, ranks_inf = FNS(scores_inf)
taken_inf = elitist_selection(fronts_inf, dists_inf, to_take_inf)
return population_f[taken_f, :], population_inf[taken_inf, :], scores_f[taken_f, :], scores_inf[taken_inf, :]
def IDEA(objective, n_constraints, x_min, x_max, d, n, *args, **kwargs):
population = random_population(d, n, x_min, x_max)
return sub_IDEA(population, objective, n_constraints, x_min, x_max, n, *args, **kwargs)
def dynamic_IDEA(objective, n_constraints, T, x_min, x_max, d, n, alpha_inf,
*args, num_iterations_init, num_iterations, n_immigrants=0, **kwargs):
population = random_population(d, n, x_min, x_max)
print("=" * 80)
print("t=0")
print("=" * 80)
t = 0
def round_objective(round_population):
return objective(t, round_population)
p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,
num_iterations=num_iterations_init, **kwargs)
population_history = [p]
score_history = [s]
n_to_keep = n - n_immigrants
n_inf = int(n_to_keep * alpha_inf)
n_f = n_to_keep - n_inf
for t in range(1, T):
print("=" * 80)
print(f"t={t}")
print("=" * 80)
population = p[-1, :, :]
scores = s[-1, :, :]
if n_immigrants > 0:
population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)
immigrants = random_population(d, n_immigrants, x_min, x_max)
population = np.vstack([population_f, population_inf, immigrants])
assert population.shape[0] == n
p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args,
num_iterations=num_iterations, **kwargs)
population_history.append(p)
score_history.append(s)
return population_history, score_history
def sub_IDEA(population, objective, n_constraints, x_min, x_max, n, alpha_inf,
eta_c, eta_m, p_c, p_m, num_iterations, log_interval=10):
n_inf = int(n * alpha_inf)
n_f = n - n_inf
populations = []
scores = evaluation(objective, n_constraints, population)
scores_hist = []
fronts, ranks = FNS(scores)
dists = crowding_distance(scores)
def log_message():
count_f = population_f.shape[0]
count_inf = population_inf.shape[0]
print(
f"Iteration {iter_}, " +
f"#feasible: {count_f}, best: {scores_f[:, :-1].min(0) if count_f > 0 else '-'}, " +
f"#infeasible: {count_inf}, best: {scores_inf.min(0) if count_inf > 0 else '-'}"
)
for iter_ in range(num_iterations):
parent_indices = tournament_selection(ranks, dists, n)
offspring = crossover(population[parent_indices, :], p_c, eta_c)
offspring = np.clip(offspring, x_min, x_max)
offspring = mutation(offspring, x_min, x_max, p_m, eta_m)
offspring_scores = evaluation(objective, n_constraints, offspring)
population = np.vstack([population, offspring])
scores = np.vstack([scores, offspring_scores])
population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf)
population = np.vstack([population_f, population_inf])
scores = np.vstack([scores_f, scores_inf])
fronts, ranks = FNS(scores)
dists = crowding_distance(scores)
populations.append(population.copy())
scores_hist.append(scores.copy())
if iter_ % log_interval == 0:
log_message()
log_message()
return np.stack(populations, 0), np.stack(scores_hist, 0)
|
"""
Misc functions.
"""
import ipaddress
import datetime
import hashlib
import json
import netaddr
import netifaces
import os
import re
import requests
import scapy.all as sc
import subprocess
import sys
import threading
import time
import traceback
import uuid
import server_config
IPv4_REGEX = re.compile(r'[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}')
sc.conf.verb = 0
# If non empty, then only devices with the following MAC addresses with be
# inspected. Do not populate this list in production. For internal testing.
TEST_OUI_LIST = [
# 'd83134', # Roku
# '74f61c', # Danny's Pixel phone
]
# Make sure Inspector's directory exits
home_dir = os.path.join(os.path.expanduser('~'), 'princeton-iot-inspector')
if not os.path.isdir(home_dir):
os.mkdir(home_dir)
def is_ipv4_addr(value):
return IPv4_REGEX.match(value)
def get_user_config():
"""Returns the user_config dict."""
user_config_file = os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
'iot_inspector_config.json'
)
try:
with open(user_config_file) as fp:
return json.load(fp)
except Exception:
pass
while True:
user_key = requests.get(server_config.NEW_USER_URL).text.strip()
# Make sure we're not getting server's error messages
if len(user_key) == 32:
break
time.sleep(1)
user_key = user_key.replace('-', '')
secret_salt = str(uuid.uuid4())
with open(user_config_file, 'w') as fp:
config_dict = {
'user_key': user_key,
'secret_salt': secret_salt
}
json.dump(config_dict, fp)
return config_dict
class TimeoutError(Exception):
pass
_lock = threading.Lock()
def log(*args):
log_str = '[%s] ' % datetime.datetime.today()
log_str += ' '.join([str(v) for v in args])
log_file_path = os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
'iot_inspector_logs.txt'
)
print(log_str)
with open(log_file_path, 'a') as fp:
fp.write(log_str + '\n')
def get_gateway_ip(timeout=10):
"""Returns the IP address of the gateway."""
return get_default_route(timeout)[0]
def get_host_ip(timeout=10):
"""Returns the host's local IP (where IoT Inspector client runs)."""
return get_default_route(timeout)[2]
def _get_routes():
while True:
sc.conf.route.resync()
routes = sc.conf.route.routes
if routes:
return routes
time.sleep(1)
def get_default_route():
"""Returns (gateway_ip, iface, host_ip)."""
while True:
routes = _get_routes()
# Look for network = 0.0.0.0, netmask = 0.0.0.0
for default_route in routes:
if default_route[0] == 0 and default_route[1] == 0:
#return default_route[2:5]
return ('192.168.5.1', 'wlan0', '192.168.5.7')
log('get_default_route: retrying')
time.sleep(1)
def get_network_ip_range_windows():
default_iface = get_default_route()
iface_filter = default_iface[1]
print(default_iface)
ip_set = set()
iface_ip = iface_filter.ip
iface_guid = iface_filter.guid
for k, v in netifaces.ifaddresses(iface_guid).items():
if v[0]['addr'] == iface_ip:
netmask = v[0]['netmask']
break
network = netaddr.IPAddress(iface_ip)
cidr = netaddr.IPAddress(netmask).netmask_bits()
subnet = netaddr.IPNetwork('{}/{}'.format(network, cidr))
return ip_set
def get_network_ip_range():
return set(['192.168.5.1', '192.168.5.6', '192.168.5.14', '192.168.5.15', '192.168.5.19'])
def gget_network_ip_range():
"""
Gets network IP range for the default interface specified
by scapy.conf.iface
"""
ip_set = set()
default_route = get_default_route()
iface_str = ''
if sys.platform.startswith('win'):
iface_info = sc.conf.iface
iface_str = iface_info.guid
else:
iface_str = sc.conf.iface
netmask = None
for k, v in netifaces.ifaddresses(iface_str).items():
if v[0]['addr'] == default_route[2]:
netmask = v[0]['netmask']
break
# Netmask is None when user runs VPN.
if netmask is None:
return set()
gateway_ip = netaddr.IPAddress(default_route[0])
cidr = netaddr.IPAddress(netmask).netmask_bits()
subnet = netaddr.IPNetwork('{}/{}'.format(gateway_ip, cidr))
for ip in subnet:
ip_set.add(str(ip))
print('ip_set', ip_set)
1/0
return ip_set
def get_my_mac():
"""Returns the MAC addr of the default route interface."""
mac_set = get_my_mac_set(iface_filter=get_default_route()[1])
return mac_set.pop()
def get_my_mac_set(iface_filter=None):
"""Returns a set of MAC addresses of the current host."""
out_set = set()
if sys.platform.startswith("win"):
from scapy.arch.windows import NetworkInterface
if type(iface_filter) == NetworkInterface:
out_set.add(iface_filter.mac)
for iface in sc.get_if_list():
if iface_filter is not None and iface != iface_filter:
continue
try:
mac = sc.get_if_hwaddr(iface)
except Exception as e:
continue
else:
out_set.add(mac)
return out_set
class _SafeRunError(object):
"""Used privately to denote error state in safe_run()."""
def __init__(self):
pass
def restart_upon_crash(func, args=[], kwargs={}):
"""Restarts func upon unexpected exception and logs stack trace."""
while True:
result = safe_run(func, args, kwargs)
if isinstance(result, _SafeRunError):
time.sleep(1)
continue
return result
def safe_run(func, args=[], kwargs={}):
"""Returns _SafeRunError() upon failure and logs stack trace."""
try:
return func(*args, **kwargs)
except Exception as e:
err_msg = '=' * 80 + '\n'
err_msg += 'Time: %s\n' % datetime.datetime.today()
err_msg += 'Function: %s %s %s\n' % (func, args, kwargs)
err_msg += 'Exception: %s\n' % e
err_msg += str(traceback.format_exc()) + '\n\n\n'
with _lock:
sys.stderr.write(err_msg + '\n')
log(err_msg)
return _SafeRunError()
def get_device_id(device_mac, host_state):
device_mac = str(device_mac).lower().replace(':', '')
s = device_mac + str(host_state.secret_salt)
return 's' + hashlib.sha256(s.encode('utf-8')).hexdigest()[0:10]
def smart_max(v1, v2):
"""
Returns max value even if one value is None.
Python cannot compare None and int, so build a wrapper
around it.
"""
if v1 is None:
return v2
if v2 is None:
return v1
return max(v1, v2)
def smart_min(v1, v2):
"""
Returns min value even if one of the value is None.
By default min(None, x) == None per Python default behavior.
"""
if v1 is None:
return v2
if v2 is None:
return v1
return min(v1, v2)
def get_min_max_tuple(min_max_tuple, value):
"""
Returns a new min_max_tuple with value considered.
For example:
min_max_tuple = (2, 3)
print get_min_max_tuple(min_max_tuple, 4)
We get back (2, 4).
"""
min_v, max_v = min_max_tuple
min_v = smart_min(min_v, value)
max_v = smart_max(max_v, value)
return (min_v, max_v)
def get_oui(mac):
return mac.replace(':', '').lower()[0:6]
def get_os():
"""Returns 'mac', 'linux', or 'windows'. Raises RuntimeError otherwise."""
os_platform = sys.platform
if os_platform.startswith('darwin'):
return 'mac'
if os_platform.startswith('linux'):
return 'linux'
if os_platform.startswith('win'):
return 'windows'
raise RuntimeError('Unsupported operating system.')
def open_browser_on_windows(url):
try:
subprocess.call(['start', '', url], shell=True)
except Exception:
pass
|
# Project: py-trans
# Author: Itz-fork
import aiohttp
from .language_codes import _get_full_lang_name, _get_lang_code
from .errors import check_internet_connection, UnknownErrorOccurred
class Async_PyTranslator:
"""
Async PyTranslator Class
Note:
Before Trying to Translate Create an instance of this with provider (Default provider is google)
Providers:
google - Google Translate
libre - LibreTranslate Engine
translate.com - translate.com Translate
my_memory - MyMemory Translate
translate_dict - Translate Dict
Argument(s):
provider - Provider of Translator. (Must be a supported provider)
Example(s):
async_pytranslator = Async_PyTranslator(provider="google")
"""
def __init__(self, provider="google"):
# Checking internet connection
check_internet_connection()
self.providers = ["google", "libre", "translate.com", "my_memory", "translate_dict"]
if provider in self.providers:
self.provider = provider
else:
self.provider = "google"
# Headers
self.lheader = {"Origin": "https://libretranslate.com", "Host": "libretranslate.com", "Referer": "https://libretranslate.com/"}
# aiohttp session for translation purpose
self.t_session = aiohttp.ClientSession()
# aiohttp session for detecting source lang (This represents the laziness of me)
self.d_session = aiohttp.ClientSession()
async def translate(self, text, dest_lang="en"):
"""
Translator Function
Argument(s):
text - Source Text (Text that need to be translated)
dest_lang - Destination Language
Example(s):
await async_pytranslator.translate(text="Hi, How are you?", dest_lang="si")
"""
if self.provider == "google":
return await self.google_translate(text, dest_lang)
elif self.provider == "libre":
return await self.libre_translate(text, dest_lang)
elif self.provider == "translate.com":
return await self.translate_com(text, dest_lang)
elif self.provider == "my_memory":
return await self.my_memory(text, dest_lang)
elif self.provider == "translate_dict":
return await self.translate_dict(text, dest_lang)
else:
return
# Google Translate
async def google_translate(self, text, dest_lang):
r_url = f"https://clients5.google.com/translate_a/t?client=dict-chrome-ex&sl=auto&tl={dest_lang}&q={text}"
try:
async with self.t_session as tr_ses:
async with tr_ses.get(r_url) as get_req:
request_resp = await get_req.json()
translation = ""
for tr in request_resp["sentences"]:
try:
translation += tr["trans"]
except KeyError:
pass
except BaseException as e:
raise UnknownErrorOccurred(e)
origin_text = text
origin_lang = await self.get_lang_name(request_resp['src'])
dest_lang_f = await self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "Google Translate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_text, "origin_lang": origin_lang}
# Closing unwanted language detection aiohttp session
await self.d_session.close()
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# LibreTranslate
async def _detect_lang(self, text, full_name=False):
r_url = "https://libretranslate.com/detect"
ld_data = {"q": str(text)}
try:
async with self.d_session as tr_ses:
async with tr_ses.post(r_url, data=ld_data) as get_req:
request_resp = await get_req.json()
language_code = request_resp[0]["language"]
except:
# If can't detect the language let's think it's just english (RIP moment)
language_code = "en"
if full_name is False:
return language_code
else:
return await self.get_lang_name(language_code)
async def libre_translate(self, text, dest_lang):
r_url = "https://libretranslate.com/translate"
try:
source_lang = await self._detect_lang(text=text, full_name=False)
l_data = {"q": str(text), "source": source_lang, "target": dest_lang}
async with self.t_session as tr_ses:
async with tr_ses.post(r_url, data=l_data) as get_req:
request_resp = await get_req.json()
translation = request_resp["translatedText"]
origin_lang = await self.get_lang_name(source_lang)
dest_lang_f = await self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "LibreTranslate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# Translate.com
async def translate_com(self, text, dest_lang):
r_url = "https://www.translate.com/translator/ajax_translate"
try:
source_lang = await self._detect_lang(text=text, full_name=False)
tr_data = {"text_to_translate": str(text), "source_lang": source_lang, "translated_lang": dest_lang, "use_cache_only": "false"}
async with self.t_session as tr_ses:
async with tr_ses.post(url=r_url, data=tr_data) as get_req:
request_resp = await get_req.json(content_type='text/html')
translation = request_resp["translated_text"]
origin_lang = await self.get_lang_name(text)
dest_lang_f = await self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "Translate.com", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_lang, "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# My Memory
async def my_memory(self, text, dest_lang):
r_url = "https://api.mymemory.translated.net/get"
try:
source_lang = await self._detect_lang(text=text, full_name=False)
m_params = {"q": text, "langpair": f"{source_lang}|{dest_lang}"}
async with self.t_session as tr_ses:
async with tr_ses.get(r_url, params=m_params) as get_req:
request_resp = await get_req.json()
translation = request_resp["matches"][0]["translation"]
origin_lang = await self.get_lang_name(source_lang)
dest_lang_f = await self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "MyMemory", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# Translate Dict
async def translate_dict(self, text, dest_lang):
r_url = f"https://t3.translatedict.com/1.php?p1=auto&p2={dest_lang}&p3={text}"
try:
async with self.t_session as tr_ses:
async with tr_ses.get(r_url) as get_req:
request_resp = await get_req.text()
origin_lang = await self._detect_lang(text=text, full_name=True)
dest_lang_f = await self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "Translate Dict", "translation": request_resp, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# Get Language Names
async def get_lang_name(self, text):
if len(text) == 2:
return _get_full_lang_name(text)
else:
if len(text) <= 3:
return "Not a full language name"
else:
return _get_lang_code(text)
|
# -*- coding: utf-8 -*-
"""API for working with saved queries for assets."""
import warnings
from typing import Generator, List, Optional, Union
from ...constants.api import MAX_PAGE_SIZE
from ...exceptions import NotFoundError, ResponseError, ApiWarning
# from ...features import Features
from ...parsers.tables import tablize_sqs
from ...tools import check_gui_page_size, listify
from .. import json_api
from ..api_endpoints import ApiEndpoints
from ..mixins import ChildMixins
# XXX need update saved query
class SavedQuery(ChildMixins):
"""API object for working with saved queries for the parent asset type.
Examples:
Create a ``client`` using :obj:`axonius_api_client.connect.Connect` and assume
``apiobj`` is either ``client.devices`` or ``client.users``
>>> apiobj = client.devices # or client.users
* Get a saved query by name: :meth:`get_by_name`
* Get a saved query by UUID: :meth:`get_by_uuid`
* Get a saved query by tags: :meth:`get_by_tags`
* Get all saved query tags: :meth:`get_tags`
* Get all saved queries: :meth:`get`
* Add a saved query: :meth:`add`
* Delete a saved query by name: :meth:`delete_by_name`
* Delete a saved query by UUID or SQ object: :meth:`delete`
See Also:
* Device assets :obj:`axonius_api_client.api.assets.devices.Devices`
* User assets :obj:`axonius_api_client.api.assets.users.Users`
"""
def get_by_name(self, value: str) -> dict:
"""Get a saved query by name.
Examples:
Get a saved query by name
>>> sq = apiobj.saved_query.get_by_name(name="test")
>>> sq['tags']
['Unmanaged Devices']
>>> sq['description'][:80]
'Devices that have been seen by at least one agent or at least one endpoint manag'
>>> sq['view']['fields']
[
'adapters',
'specific_data.data.name',
'specific_data.data.hostname',
'specific_data.data.last_seen',
'specific_data.data.network_interfaces.manufacturer',
'specific_data.data.network_interfaces.mac',
'specific_data.data.network_interfaces.ips',
'specific_data.data.os.type',
'labels'
]
>>> sq['view']['query']['filter'][:80]
'(specific_data.data.adapter_properties == "Agent") or (specific_data.data.adapte'
Args:
value: name of saved query
"""
data = self.get()
found = [x for x in data if x["name"] == value]
if found:
return found[0]
err = f"Saved Query with name of {value!r} not found"
raise NotFoundError(tablize_sqs(data=data, err=err))
def get_by_uuid(self, value: str) -> dict:
"""Get a saved query by uuid.
Examples:
Get a saved query by uuid
>>> sq = apiobj.saved_query.get_by_uuid(value="5f76721ce4557d5cba93f59e")
Args:
value: uuid of saved query
"""
data = self.get()
found = [x for x in data if x["uuid"] == value]
if found:
return found[0]
err = f"Saved Query with UUID of {value!r} not found"
raise NotFoundError(tablize_sqs(data=data, err=err))
def get_by_tags(self, value: Union[str, List[str]], **kwargs) -> List[dict]:
"""Get saved queries by tags.
Examples:
Get all saved queries with tagged with 'AD'
>>> sqs = apiobj.saved_query.get_by_tags('AD')
>>> len(sqs)
2
Get all saved queries with tagged with 'AD' or 'AWS'
>>> sqs = apiobj.saved_query.get_by_tags(['AD', 'AWS'])
>>> len(sqs)
5
Args:
value: list of tags
**kwargs: passed to :meth:`get`
Raises:
:exc:`NotFoundError`: if no saved queries found tagged with supplied tags
"""
value = listify(value)
rows = self.get(**kwargs)
matches = []
known = set()
for row in rows:
for tag in row.get("tags", []):
known.add(tag)
if tag in value and row not in matches:
matches.append(row)
if not matches:
valid = "\n " + "\n ".join(sorted(list(known)))
msg = f"No saved query found with tags {value!r}, valid tags:{valid}"
raise NotFoundError(msg)
return matches
def get_tags(self, **kwargs) -> List[str]:
"""Get all tags for saved queries.
Examples:
Get all known tags for all saved queries
>>> tags = apiobj.saved_query.get_tags()
>>> len(tags)
19
Args:
**kwargs: passed to :meth:`get`
"""
rows = self.get(**kwargs)
tags = [y for x in rows for y in x.get("tags", [])]
return sorted(list(set(tags)))
def get(self, generator: bool = False) -> Union[Generator[dict, None, None], List[dict]]:
"""Get all saved queries.
Examples:
Get all saved queries
>>> sqs = apiobj.saved_query.get()
>>> len(sqs)
39
Args:
generator: return an iterator
"""
gen = self.get_generator()
return gen if generator else list(gen)
def get_generator(self) -> Generator[dict, None, None]:
"""Get Saved Queries using a generator."""
offset = 0
while True:
rows = self._get(offset=offset)
offset += len(rows)
if not rows:
break
for row in rows:
yield row.to_dict()
def add(
self,
name: str,
query: Optional[str] = None,
tags: Optional[List[str]] = None,
description: Optional[str] = None,
expressions: Optional[List[str]] = None,
fields: Optional[Union[List[str], str]] = None,
fields_manual: Optional[Union[List[str], str]] = None,
fields_regex: Optional[Union[List[str], str]] = None,
fields_fuzzy: Optional[Union[List[str], str]] = None,
fields_default: bool = True,
fields_root: Optional[str] = None,
sort_field: Optional[str] = None,
sort_descending: bool = True,
column_filters: Optional[dict] = None,
gui_page_size: Optional[int] = None,
private: bool = False,
always_cached: bool = False,
**kwargs,
) -> dict:
"""Create a saved query.
Examples:
Create a saved query using a :obj:`axonius_api_client.api.wizards.wizard.Wizard`
>>> parsed = apiobj.wizard_text.parse(content="simple hostname contains blah")
>>> query = parsed["query"]
>>> expressions = parsed["expressions"]
>>> sq = apiobj.saved_query.add(
... name="test",
... query=query,
... expressions=expressions,
... description="meep meep",
... tags=["nyuck1", "nyuck2", "nyuck3"],
... )
Notes:
Saved Queries created without expressions will not be editable using the query wizard
in the GUI. Use :obj:`axonius_api_client.api.wizards.wizard.Wizard` to produce a query
and it's accordant expressions for the GUI query wizard.
Args:
name: name of saved query
description: description
tags: list of tags
expressions: expressions built by :obj:`axonius_api_client.api.wizards.wizard.Wizard`
query: query built by GUI or the CLI query wizard
fields: fields to return for each asset (will be validated)
fields_manual: fields to return for each asset (will NOT be validated)
fields_regex: regex of fields to return for each asset
fields_fuzzy: string to fuzzy match of fields to return for each asset
fields_default: include the default fields defined in the parent asset object
fields_root: include all fields of an adapter that are not complex sub-fields
sort_field: sort the returned assets on a given field
sort_descending: reverse the sort of the returned assets
column_filters: column filters keyed as field_name:value
gui_page_size: show N rows per page in GUI
private: make this saved query private to current user
"""
query_expr: Optional[str] = kwargs.get("query_expr", None) or query
gui_page_size = check_gui_page_size(size=gui_page_size)
fields = self.parent.fields.validate(
fields=fields,
fields_manual=fields_manual,
fields_regex=fields_regex,
fields_default=fields_default,
fields_root=fields_root,
fields_fuzzy=fields_fuzzy,
)
if sort_field:
sort_field = self.parent.fields.get_field_name(value=sort_field)
data_column_filters = {}
if column_filters:
for col_field, col_value in column_filters.items():
col_field = self.parent.fields.get_field_name(value=col_field)
data_column_filters[col_field] = col_value
dmeta = {} # TBD
dmeta["enforcementFilter"] = None # TBD
dmeta["uniqueAdapters"] = False # TBD
data_query = {}
data_query["filter"] = query or ""
if query_expr:
data_query["onlyExpressionsFilter"] = query_expr
data_query["expressions"] = expressions or []
data_query["search"] = None # TBD
data_query["meta"] = dmeta # TBD
data_sort = {}
data_sort["desc"] = sort_descending
data_sort["field"] = sort_field or ""
data_view = {}
data_view["query"] = data_query
data_view["sort"] = data_sort
data_view["fields"] = fields
data_view["pageSize"] = gui_page_size
# 4.5 SEMI_BREAKING_CHANGE: now a list of dict
# data_view["colFilters"] = listify(data_column_filters or {})
if data_column_filters:
msg = f"Column filters structure has changed and is currently not supported by the API client."
warnings.warn(message=msg, category=ApiWarning)
# 4.5 SEMI_BREAKING_CHANGE: now a list of dict
# data_view["colExcludedAdapters"] = listify({}) # TBD
# data = {}
# data["name"] = name
# data["query_type"] = "saved"
# data["description"] = description
# data["view"] = data_view
# data["tags"] = tags or []
# data["private"] = private
added = self._add(
name=name,
description=description,
view=data_view,
private=private,
always_cached=always_cached,
tags=tags,
)
return self.get_by_uuid(value=added.id)
def delete_by_name(self, value: str, **kwargs) -> dict:
"""Delete a saved query by name.
Examples:
Delete the saved query by name
>>> deleted = apiobj.saved_query.delete_by_name(name="test")
Args:
value: name of saved query to delete
**kwargs: passed to :meth:`get_by_name`
"""
row = self.get_by_name(value=value, **kwargs)
self._delete(uuid=row["uuid"])
return row
def delete(self, rows: Union[str, List[str], List[dict]]) -> List[str]:
"""Delete saved queries.
Args:
rows: list of UUIDs or rows previously fetched saved queries to delete
"""
rows = listify(rows)
deleted = []
for row in rows:
uuid = row["uuid"] if isinstance(row, dict) else row
self._delete(uuid=uuid)
deleted.append(uuid)
return deleted
def _add(
self,
name: str,
view: dict,
description: Optional[str] = "",
always_cached: bool = False,
private: bool = False,
tags: Optional[List[str]] = None,
) -> str:
"""Direct API method to create a saved query.
Args:
data: saved query metadata
"""
api_endpoint = ApiEndpoints.saved_queries.create
request_obj = api_endpoint.load_request(
name=name,
view=view,
description=description,
always_cached=always_cached,
private=private,
tags=tags or [],
)
return api_endpoint.perform_request(
http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE
)
def _delete(self, uuid: str) -> json_api.generic.Metadata:
"""Direct API method to delete saved queries.
Args:
ids: list of uuid's to delete
"""
# NEW_IN: 05/31/21 cortex/develop
try:
api_endpoint = ApiEndpoints.saved_queries.delete
request_obj = api_endpoint.load_request()
return api_endpoint.perform_request(
http=self.auth.http,
request_obj=request_obj,
asset_type=self.parent.ASSET_TYPE,
uuid=uuid,
)
except ResponseError as exc:
if exc.is_incorrect_type:
api_endpoint = ApiEndpoints.saved_queries.delete_4_3
request_obj = api_endpoint.load_request()
return api_endpoint.perform_request(
http=self.auth.http,
request_obj=request_obj,
asset_type=self.parent.ASSET_TYPE,
uuid=uuid,
)
raise
def _get(
self, limit: int = MAX_PAGE_SIZE, offset: int = 0
) -> List[json_api.saved_queries.SavedQuery]:
"""Direct API method to get all users.
Args:
limit: limit to N rows per page
offset: start at row N
"""
api_endpoint = ApiEndpoints.saved_queries.get
request_obj = api_endpoint.load_request(page={"limit": limit, "offset": offset})
return api_endpoint.perform_request(
http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE
)
|
"""
Miscellaneous package utilities.
.. include:: ../include/links.rst
"""
from itertools import chain, combinations
from IPython import embed
import numpy
def all_subclasses(cls):
"""
Collect all the subclasses of the provided class.
The search follows the inheritance to the highest-level class. Intermediate
base classes are included in the returned set, but not the base class itself.
Thanks to:
https://stackoverflow.com/questions/3862310/how-to-find-all-the-subclasses-of-a-class-given-its-name
Args:
cls (object):
The base class
Returns:
:obj:`set`: The unique set of derived classes, including any
intermediate base classes in the inheritance thread.
"""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)])
def string_table(tbl, delimeter='print', has_header=True):
"""
Provided the array of data, format it with equally spaced columns
and add a header (first row) and contents delimeter.
Args:
tbl (`numpy.ndarray`_):
Array of string representations of the data to print.
delimeter (:obj:`str`, optional):
If the first row in the table containts the column headers (see
``has_header``), this sets the delimeter between first table row and
the column data. Use ``'print'`` for a simple line of hyphens,
anything else results in an ``rst`` style table formatting.
has_header (:obj:`bool`, optional):
The first row in ``tbl`` contains the column headers.
Returns:
:obj:`str`: Single long string with the data table.
"""
nrows, ncols = tbl.shape
col_width = [numpy.amax([len(dij) for dij in dj]) for dj in tbl.T]
_nrows = nrows
start = 1
if delimeter != 'print':
_nrows += 2
start += 1
if has_header:
_nrows += 1
start += 1
row_string = ['']*_nrows
for i in range(start,nrows+start-1):
row_string[i] = ' '.join([tbl[1+i-start,j].ljust(col_width[j]) for j in range(ncols)])
if delimeter == 'print':
# Heading row
row_string[0] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)])
# Delimiter
if has_header:
row_string[1] = '-'*len(row_string[0])
return '\n'.join(row_string)+'\n'
# For an rst table
row_string[0] = ' '.join([ '='*col_width[j] for j in range(ncols)])
row_string[1] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)])
if has_header:
row_string[2] = row_string[0]
row_string[-1] = row_string[0]
return '\n'.join(row_string)+'\n'
def powerset(iterable, reverse=False):
""""
Construct an iterable that steps through all combinations of the
provided iterable.
This is pulled from the recipes provided by the itertools
documentation.
Examples:
Get all unique combinations of the list [1,2,3]:
>>> list(powerset([1,2,3]))
[() (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)]
Args:
iterable (iterable):
An iterable object
reverse (:obj:`bool`, optional):
Reverse the order (only roughly) of the iterable by placing
the longer sequences first.
Returns:
`itertools.chain`: Iterable object that returns the sequence of
combinations.
"""
rng = range(len(iterable)+1)[::-1] if reverse else range(len(iterable)+1)
return chain.from_iterable(combinations(iterable, r) for r in rng)
def polygon_winding_number(polygon, point):
"""
Determine the winding number of a 2D polygon about a point.
The code does **not** check if the polygon is simple (no interesecting line
segments). Algorithm taken from Numerical Recipes Section 21.4.
Args:
polygon (`numpy.ndarray`_):
An Nx2 array containing the x,y coordinates of a polygon.
The points should be ordered either counter-clockwise or
clockwise.
point (`numpy.ndarray`_):
One or more points for the winding number calculation.
Must be either a 2-element array for a single (x,y) pair,
or an Nx2 array with N (x,y) points.
Returns:
:obj:`int`, `numpy.ndarray`_: The winding number of each point with
respect to the provided polygon. Points inside the polygon have winding
numbers of 1 or -1; see :func:`point_inside_polygon`.
Raises:
ValueError:
Raised if ``polygon`` is not 2D, if ``polygon`` does not have two
columns, or if the last axis of ``point`` does not have 2 and only 2
elements.
"""
# Check input shape is for 2D only
if len(polygon.shape) != 2:
raise ValueError('Polygon must be an Nx2 array.')
if polygon.shape[1] != 2:
raise ValueError('Polygon must be in two dimensions.')
_point = numpy.atleast_2d(point)
if _point.shape[1] != 2:
raise ValueError('Point must contain two elements.')
# Get the winding number
nvert = polygon.shape[0]
npnt = _point.shape[0]
dl = numpy.roll(polygon, 1, axis=0)[None,:,:] - _point[:,None,:]
dr = polygon[None,:,:] - point[:,None,:]
dx = dl[...,0]*dr[...,1] - dl[...,1]*dr[...,0]
indx_l = dl[...,1] > 0
indx_r = dr[...,1] > 0
wind = numpy.zeros((npnt, nvert), dtype=int)
wind[indx_l & numpy.logical_not(indx_r) & (dx < 0)] = -1
wind[numpy.logical_not(indx_l) & indx_r & (dx > 0)] = 1
return numpy.sum(wind, axis=1)[0] if point.ndim == 1 else numpy.sum(wind, axis=1)
def point_inside_polygon(polygon, point):
"""
Determine if one or more points is inside the provided polygon.
Primarily a wrapper for :func:`polygon_winding_number`, that
returns True for each point that is inside the polygon.
Args:
polygon (`numpy.ndarray`_):
An Nx2 array containing the x,y coordinates of a polygon.
The points should be ordered either counter-clockwise or
clockwise.
point (`numpy.ndarray`_):
One or more points for the winding number calculation.
Must be either a 2-element array for a single (x,y) pair,
or an Nx2 array with N (x,y) points.
Returns:
:obj:`bool`, `numpy.ndarray`: Boolean indicating whether or not each
point is within the polygon.
"""
return numpy.absolute(polygon_winding_number(polygon, point)) == 1
|
import math
import tensorflow as tf
import os
import struct
import pdb
import numpy as np
from datasets import dataset_factory
from nets import nets_factory
import nets.resnet_v2 as resnet_v2
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
def merge_predictions(predictions_fn):
'''
Merge predictions/logit scores for products that are the same.
'''
out_f = open(predictions_fn + '_merged', 'wb')
f = open(predictions_fn, 'r')
line = f.readline().strip().split()
curr_id = line[0]
curr_scores = np.power(np.array([float(x) for x in line[1:]]), 3)
num_elems = 1
line = f.readline().strip().split()
while line != []:
id = line[0]
# raise elements to the third power, and then take the cubic root
scores = np.power(np.array([float(x) for x in line[1:]]), 3)
if id == curr_id:
num_elems += 1
curr_scores += scores
else:
curr_scores = np.cbrt(curr_scores / float(num_elems))
for score in curr_scores:
out_f.write(struct.pack('>f', score))
curr_scores = scores
num_elems = 1
curr_id = id
line = f.readline().strip().split()
curr_scores = np.cbrt(curr_scores / float(num_elems))
for score in curr_scores:
out_f.write(struct.pack('>f', score))
out_f.close()
f.close()
if __name__ == '__main__':
checkpoint_dir = '/home/shunan/Code/Data/cdiscount/training'
dataset_dir = '/home/shunan/Code/Data/cdiscount/tf_records'
num_classes = 5270
image_size = 180
batch_size = 100
set_name = 'validation'
data_sizes = {'train': 12195682, 'validation': 175611, 'test': 3095080}
out_fn = os.path.join(dataset_dir, '{}_predictions.txt'.format(set_name))
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
# loading the dataset
dataset = dataset_factory.get_dataset('cdiscount', set_name, dataset_dir)
# dataset provider to load data from the dataset.
provider = slim.dataset_data_provider.DatasetDataProvider(dataset, shuffle=False, common_queue_capacity=2*batch_size,
common_queue_min=batch_size)
[image, label, product_id] = provider.get(['image', 'label', 'product_id'])
# Pre-processing step.
image_preprocessing_fn = preprocessing_factory.get_preprocessing('simple', is_training=False)
image = image_preprocessing_fn(image, image_size, image_size)
images, labels, product_ids = tf.train.batch([image, label, product_id], batch_size=batch_size, num_threads=1,
capacity=5 * batch_size)
# Get the model
# network_fn = nets_factory.get_network_fn('resnet_v2_152', num_classes=num_classes, is_training=False)
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=0.)):
logits, end_points = resnet_v2.resnet_v2_152(images, num_classes=num_classes, is_training=False)
#Obtain the trainable variables and a saver
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
output_f = open(out_fn, 'w')
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(tf.global_variables_initializer())
saver.restore(sess, checkpoint_file)
num_iters = int(math.ceil(data_sizes[set_name] / float(batch_size)))
num_last_batch = batch_size - ((num_iters * batch_size) - data_sizes[set_name])
for i in range(num_iters):
output, ids = sess.run([logits, product_ids])
if i == num_iters - 1:
output = output[:num_last_batch, :]
ids = ids[:num_last_batch]
for j in range(output.shape[0]):
vec_str = [str(x) for x in output[j, :]]
output_f.write(str(ids[j]) + ' ' + ' '.join(vec_str) + '\n')
output_f.close()
|
# -*- coding: utf-8 -*-
from logging import getLogger
from time import time, strftime
from BTrees.IIBTree import IITreeSet
from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
from plone.uuid.interfaces import IUUID, IUUIDAware
from zope.interface import implementer
from zope.component import queryUtility, queryAdapter
from collective.solr.indexer import DefaultAdder
from collective.solr.flare import PloneFlare
from collective.solr.interfaces import ISolrConnectionManager
from collective.solr.interfaces import ISolrMaintenanceView
from collective.solr.interfaces import ISolrAddHandler
from collective.solr.interfaces import ICheckIndexable
from collective.solr.indexer import SolrIndexProcessor
from collective.solr.indexer import boost_values
from collective.solr.parser import parse_date_as_datetime
from collective.solr.parser import SolrResponse
from collective.solr.parser import unmarshallers
from collective.solr.utils import findObjects
from collective.solr.utils import prepareData
logger = getLogger("collective.solr.maintenance")
MAX_ROWS = 1000000000
try:
from time import process_time
except ImportError:
# Python < 3.8
from time import clock as process_time
def timer(func=time):
"""set up a generator returning the elapsed time since the last call"""
def gen(last=func()):
while True:
elapsed = func() - last
last = func()
yield "%.3fs" % elapsed
return gen()
def checkpointIterator(function, interval=100):
"""the iterator will call the given function for every nth invocation"""
counter = 0
while True:
counter += 1
if counter % interval == 0:
function()
yield None
def notimeout(func):
"""decorator to prevent long-running solr tasks from timing out"""
def wrapper(*args, **kw):
"""wrapper with random docstring so ttw access still works"""
manager = queryUtility(ISolrConnectionManager)
manager.setTimeout(None, lock=True)
try:
return func(*args, **kw)
finally:
manager.setTimeout(None, lock=False)
return wrapper
@implementer(ISolrMaintenanceView)
class SolrMaintenanceView(BrowserView):
"""helper view for indexing all portal content in Solr"""
def mklog(self, use_std_log=False):
"""helper to prepend a time stamp to the output"""
write = self.request.RESPONSE.write
def log(msg, timestamp=True):
if timestamp:
msg = strftime("%Y/%m/%d-%H:%M:%S ") + msg
write(msg.encode("utf-8"))
if use_std_log:
logger.info(msg)
return log
def optimize(self):
"""optimize solr indexes"""
manager = queryUtility(ISolrConnectionManager)
conn = manager.getConnection()
conn.setTimeout(None)
conn.commit(optimize=True)
return "solr indexes optimized."
def clear(self):
"""clear all data from solr, i.e. delete all indexed objects"""
manager = queryUtility(ISolrConnectionManager)
uniqueKey = manager.getSchema().uniqueKey
conn = manager.getConnection()
conn.setTimeout(None)
conn.deleteByQuery("%s:[* TO *]" % uniqueKey)
conn.commit()
return "solr index cleared."
def reindex(
self,
batch=1000,
skip=0,
limit=0,
ignore_portal_types=None,
only_portal_types=None,
idxs=[],
ignore_exceptions=False,
):
"""find all contentish objects (meaning all objects derived from one
of the catalog mixin classes) and (re)indexes them"""
if ignore_portal_types and only_portal_types:
raise ValueError(
"It is not possible to combine "
"ignore_portal_types with only_portal_types"
)
atomic = idxs != []
manager = queryUtility(ISolrConnectionManager)
proc = SolrIndexProcessor(manager)
conn = manager.getConnection()
zodb_conn = self.context._p_jar
log = self.mklog()
log("reindexing solr catalog...\n")
if skip:
log("skipping indexing of %d object(s)...\n" % skip)
if limit:
log("limiting indexing to %d object(s)...\n" % limit)
real = timer() # real time
lap = timer() # real lap time (for intermediate commits)
cpu = timer(process_time) # cpu time
processed = 0
schema = manager.getSchema()
key = schema.uniqueKey
updates = {} # list to hold data to be updated
def flush():
return conn.commit(soft=True)
flush = notimeout(flush)
def checkPoint():
for my_boost_values, data in updates.values():
adder = data.pop("_solr_adder")
try:
adder(conn, boost_values=my_boost_values, **data)
except Exception as e:
logger.warning("Error %s @ %s", e, data["path_string"])
if not ignore_exceptions:
raise
updates.clear()
msg = (
"intermediate commit (%d items processed, "
"last batch in %s)...\n" % (processed, next(lap))
)
log(msg)
logger.info(msg)
flush()
zodb_conn.cacheGC()
cpi = checkpointIterator(checkPoint, batch)
count = 0
if atomic:
log("indexing only {0} \n".format(idxs))
for path, obj in findObjects(self.context):
if ICheckIndexable(obj)():
count += 1
if count <= skip:
continue
if ignore_portal_types:
if obj.portal_type in ignore_portal_types:
continue
if only_portal_types:
if obj.portal_type not in only_portal_types:
continue
attributes = None
if atomic:
attributes = idxs
# For atomic updates to work the uniqueKey must be present
# in *every* update operation.
if attributes and key not in attributes:
attributes.append(key)
data, missing = proc.getData(obj, attributes=attributes)
prepareData(data)
if not missing or atomic:
value = data.get(key, None)
if value is not None:
log("indexing %r\n" % obj)
pt = data.get("portal_type", "default")
adder = queryAdapter(obj, ISolrAddHandler, name=pt)
if adder is None:
adder = DefaultAdder(obj)
data["_solr_adder"] = adder
updates[value] = (boost_values(obj, data), data)
processed += 1
next(cpi)
else:
log("missing data, skipping indexing of %r.\n" % obj)
if limit and count >= (skip + limit):
break
checkPoint()
conn.commit()
log("solr index rebuilt.\n")
msg = "processed %d items in %s (%s cpu time)."
msg = msg % (processed, next(real), next(cpu))
log(msg)
logger.info(msg)
def sync(self, batch=1000, preImportDeleteQuery="*:*"):
"""Sync the Solr index with the portal catalog. Records contained
in the catalog but not in Solr will be indexed and records not
contained in the catalog will be removed.
"""
manager = queryUtility(ISolrConnectionManager)
proc = SolrIndexProcessor(manager)
conn = manager.getConnection()
key = queryUtility(ISolrConnectionManager).getSchema().uniqueKey
zodb_conn = self.context._p_jar
catalog = getToolByName(self.context, "portal_catalog")
getIndex = catalog._catalog.getIndex
modified_index = getIndex("modified")
uid_index = getIndex(key)
log = self.mklog()
real = timer() # real time
lap = timer() # real lap time (for intermediate commits)
cpu = timer(process_time) # cpu time
# get Solr status
response = conn.search(
q=preImportDeleteQuery, rows=MAX_ROWS, fl="%s modified" % key
)
# avoid creating DateTime instances
simple_unmarshallers = unmarshallers.copy()
simple_unmarshallers["date"] = parse_date_as_datetime
flares = SolrResponse(response, simple_unmarshallers)
response.close()
solr_results = {}
solr_uids = set()
def _utc_convert(value):
t_tup = value.utctimetuple()
return (
((t_tup[0] * 12 + t_tup[1]) * 31 + t_tup[2]) * 24 + t_tup[3]
) * 60 + t_tup[4]
for flare in flares:
uid = flare[key]
solr_uids.add(uid)
solr_results[uid] = _utc_convert(flare["modified"])
# get catalog status
cat_results = {}
cat_uids = set()
for uid, rid in uid_index._index.items():
cat_uids.add(uid)
cat_results[uid] = rid
# differences
index = cat_uids.difference(solr_uids)
solr_uids.difference_update(cat_uids)
unindex = solr_uids
processed = 0
flush = notimeout(lambda: conn.flush())
def checkPoint():
msg = (
"intermediate commit (%d items processed, "
"last batch in %s)...\n" % (processed, next(lap))
)
log(msg)
logger.info(msg)
flush()
zodb_conn.cacheGC()
cpi = checkpointIterator(checkPoint, batch)
# Look up objects
uid_rid_get = cat_results.get
rid_path_get = catalog._catalog.paths.get
catalog_traverse = catalog.unrestrictedTraverse
def lookup(
uid,
rid=None,
uid_rid_get=uid_rid_get,
rid_path_get=rid_path_get,
catalog_traverse=catalog_traverse,
):
if rid is None:
rid = uid_rid_get(uid)
if not rid:
return None
if not isinstance(rid, int):
rid = tuple(rid)[0]
path = rid_path_get(rid)
if not path:
return None
try:
obj = catalog_traverse(path)
except AttributeError:
return None
return obj
log('processing %d "unindex" operations next...\n' % len(unindex))
op = notimeout(lambda uid: conn.delete(id=uid))
for uid in unindex:
obj = lookup(uid)
if obj is None:
op(uid)
processed += 1
next(cpi)
else:
log("not unindexing existing object %r.\n" % uid)
log('processing %d "index" operations next...\n' % len(index))
op = notimeout(lambda obj: proc.index(obj))
for uid in index:
obj = lookup(uid)
if ICheckIndexable(obj)():
op(obj)
processed += 1
next(cpi)
else:
log("not indexing unindexable object %r.\n" % uid)
if obj is not None:
obj._p_deactivate()
log('processing "reindex" operations next...\n')
op = notimeout(lambda obj: proc.reindex(obj))
cat_mod_get = modified_index._unindex.get
solr_mod_get = solr_results.get
done = unindex.union(index)
for uid, rid in cat_results.items():
if uid in done:
continue
if isinstance(rid, IITreeSet):
rid = list(rid.keys())[0]
if cat_mod_get(rid) != solr_mod_get(uid):
obj = lookup(uid, rid=rid)
if ICheckIndexable(obj)():
op(obj)
processed += 1
next(cpi)
else:
log("not reindexing unindexable object %r.\n" % uid)
if obj is not None:
obj._p_deactivate()
conn.commit()
log("solr index synced.\n")
msg = "processed %d object(s) in %s (%s cpu time)."
msg = msg % (processed, next(real), next(cpu))
log(msg)
logger.info(msg)
def cleanup(self, batch=1000):
"""remove entries from solr that don't have a corresponding Zope
object or have a different UID than the real object"""
manager = queryUtility(ISolrConnectionManager)
proc = SolrIndexProcessor(manager)
conn = manager.getConnection()
log = self.mklog(use_std_log=True)
log("cleaning up solr index...\n")
key = manager.getSchema().uniqueKey
start = 0
resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start))
res = resp.results()
log("%s items in solr catalog\n" % resp.response.numFound)
deleted = 0
reindexed = 0
while len(res) > 0:
for flare in res:
try:
ob = PloneFlare(flare).getObject()
except Exception as err:
log(
"Error getting object, removing: %s (%s)\n"
% (flare["path_string"], err)
)
conn.delete(flare[key])
deleted += 1
continue
if ob is None:
log("Object not found, removing: %s\n" % (flare["path_string"]))
conn.delete(flare[key])
deleted += 1
continue
if not IUUIDAware.providedBy(ob):
no_skipping_msg = (
"Object %s of type %s does not " + "support uuids, skipping.\n"
)
log(
no_skipping_msg % ("/".join(ob.getPhysicalPath()), ob.meta_type)
)
continue
uuid = IUUID(ob)
if uuid != flare[key]:
log(
"indexed under wrong UID, removing: %s\n" % flare["path_string"]
)
conn.delete(flare[key])
deleted += 1
realob_res = SolrResponse(
conn.search(q="%s:%s" % (key, uuid))
).results()
if len(realob_res) == 0:
log("no sane entry for last object, reindexing\n")
data, missing = proc.getData(ob)
prepareData(data)
if not missing:
boost = boost_values(ob, data)
conn.add(boost_values=boost, **data)
reindexed += 1
else:
log(" missing data, cannot index.\n")
log("handled batch of %d items, committing\n" % len(res))
conn.commit()
start += batch
resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start))
res = resp.results()
finished_msg = (
"solr cleanup finished, %s item(s) removed, " + "%s item(s) reindexed\n"
)
msg = finished_msg % (deleted, reindexed)
log(msg)
logger.info(msg)
|
from __future__ import division
import sys, os
def run(args):
for path in args:
problem = None
if (not os.path.isfile(path) or os.path.islink(path)):
problem = "not a regular file"
else:
try:
file_content = open(path, "rb").read()
except Exception:
problem = "no read access"
else:
if (not os.access(path, os.W_OK)):
problem = "no write access"
if (problem is not None):
print "%s: %s -> no action" % (path, problem)
else:
n_cr = file_content.count("\r")
n_lf = file_content.count("\n")
n_crlf = file_content.count("\r\n")
action = "unknown -> no action"
unix_content = None
if (n_crlf > 0 and n_crlf == n_cr):
action = "dos -> unix"
unix_content = file_content.replace("\r\n", "\n")
if (ord(unix_content[-1]) == 26):
unix_content = unix_content[:-1]
elif (n_cr > 0 and n_lf == 0):
action = "mac -> unix"
unix_content = file_content.replace("\r", "\n")
elif (n_lf > 0 and n_cr == 0):
action = "unix -> no action"
print "%s: %s" % (path, action)
if (unix_content is not None):
if (unix_content[-1] != "\n"):
unix_content += "\n"
try:
open(path, "wb").write(unix_content)
except Exception:
print >> sys.stdout, "FATAL ERROR: Cannot write file:", path
path_copy = path + "_copy"
print >> sys.stdout, "Saving copy of old content as file:", path_copy
open(path_copy, "wb").write(file_content)
sys.exit(1)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
"""
One of the central problems in statistics is to make estimations — and quantify
how good these estimations are — of the distribution of an entire population
given only a small (random) sample. A classic example is to estimate the average
height of all the people in a country when measuring the height of a randomly
selected sample of people. These kinds of problems are particularly interesting
when the true population distribution, by which we usually mean the mean of the
whole population, cannot feasibly be measured. In this case, we must rely on our
knowledge of statistics and a (usually much smaller) randomly selected sample to
estimate the true population mean and standard deviation, and also quantify how
good our estimations are. It is the latter that is the source of confusion,
misunderstanding, and misrepresentation of statistics in the wider world.
This module illustrates how to estimate the population mean and give a
confidence interval fo these estimates.
"""
import math
import pandas as pd
from scipy import stats
sample_data = pd.Series([
172.3, 171.3, 164.7, 162.9, 172.5, 176.3, 174.8, 171.9,
176.8, 167.8, 164.5, 179.7, 157.8, 170.6, 189.9, 185. ,
172.7, 165.5, 174.5, 171.5])
sample_mean = sample_data.mean()
sample_std = sample_data.std()
print(f"Mean: {sample_mean}, st. dev: {sample_std}")
# Mean: 172.15, st. dev: 7.473778724383846
N = sample_data.count()
std_err = sample_std/math.sqrt(N)
cv_95, cv_99 = stats.t.ppf([0.975, 0.995], df=N-1)
pm_95 = cv_95 * std_err
pm_99 = cv_99 * std_err
conf_interval_95 = [sample_mean - pm_95, sample_mean + pm_95]
conf_interval_99 = [sample_mean - pm_99, sample_mean + pm_99]
print(f"95% confidence: {conf_interval_95}")
print(f"99% confidence: {conf_interval_99}")
# 95% confidence: [168.65216388659374, 175.64783611340627]
# 99% confidence: [167.36884119608774, 176.93115880391227]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import Iterable
def flatten(input_arr, output_arr = None):
if output_arr is None:
output_arr = []
for t in input_arr:
if isinstance(t, Iterable):
flatten(t, output_arr)
else:
output_arr.append(t)
return output_arr
def flatten_iter(iterable):
for t in iterable:
if isinstance(t, Iterable):
yield from flatten_iter(t)
else:
yield t
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
import tensorflow.compat.v2 as tf
import copy
import itertools
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from keras import combinations
from keras import testing_utils
from keras.optimizer_v2 import learning_rate_schedule
from keras.optimizer_v2 import rmsprop
_DATA_TYPES = [
tf.half, tf.float32, tf.float64, tf.complex64,
tf.complex128
]
_TEST_PARAM_VALUES = [
# learning_rate, rho, momentum, epsilon, centered
[0.05, 0.9, 0.0, 1e-3, True],
[0.05, 0.9, 0.0, 1e-3, False],
[0.1, 0.9, 0.0, 1e-3, True],
[0.01, 0.9, 0.0, 1e-5, True],
[0.01, 0.9, 0.9, 1e-5, True],
]
_TESTPARAMS = [
[data_type] + values
for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
]
class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum,
epsilon, centered):
rms_t = rms * rho + (1 - rho) * g * g
if centered:
mg_t = mg * rho + (1 - rho) * g
denom_t = rms_t - mg_t * mg_t
else:
mg_t = mg
denom_t = rms_t
if momentum > 0.:
mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon))
var_t = var - mom_t
else:
mom_t = mom
var_t = var - lr * g / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, rho, momentum, epsilon, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue
if centered:
mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue
denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex]
else:
denom_t = rms_t[gindex]
if momentum > 0.:
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t +
epsilon)
var_t[gindex] = var[gindex] - mom_t[gindex]
else:
mom_t[gindex] = mom[gindex]
var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon)
return var_t, mg_t, rms_t, mom_t
def testDense(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np, dtype=dtype)
var1 = tf.Variable(var1_np, dtype=dtype)
grads0 = tf.constant(grads0_np, dtype=dtype)
grads1 = tf.constant(grads1_np, dtype=dtype)
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
mg1 = opt.get_slot(var1, "mg")
else:
mg0 = None
mg1 = None
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho,
momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho,
momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDenseWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,
epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,
epsilon, centered)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDenseWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
var0_np = np.array([1.0, 2.0])
grads0_np = np.array([0.1, 0.2])
var1_np = np.array([3.0, 4.0])
grads1_np = np.array([0.01, 0.2])
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
learning_rate = 0.01
rho = 0.9
momentum = 0.0
epsilon = 1e-7
centered = False
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
opt = rmsprop.RMSprop(
learning_rate=lr_schedule,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0])
mg1_np = np.array([0.0, 0.0])
rms0_np = np.array([0.0, 0.0])
rms1_np = np.array([0.0, 0.0])
mom0_np = np.array([0.0, 0.0])
mom1_np = np.array([0.0, 0.0])
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSprop
for t in range(2):
self.evaluate(update)
lr = learning_rate / (1 + decay * t)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum,
epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum,
epsilon, centered)
# Validate updated params
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = rmsprop.RMSprop(
learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0,
centered=False).minimize(
loss, var_list=[var0])
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[0., 1.]],
self.evaluate(var0),
atol=0.01)
def testMinimizeSparseResourceVariableCentered(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
with tf.Graph().as_default():
for dtype in _DATA_TYPES:
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
# loss = lambda: pred * pred # pylint: disable=cell-var-from-loop
sgd_op = rmsprop.RMSprop(
learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0,
centered=True).minimize(
loss, var_list=[var0])
self.evaluate(tf.compat.v1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = tf.IndexedSlices(
tf.constant(grads0_np),
tf.constant(grads0_np_indices), tf.constant([1]))
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = tf.IndexedSlices(
tf.constant(grads1_np),
tf.constant(grads1_np_indices), tf.constant([1]))
opt = rmsprop.RMSprop(
learning_rate=learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf.compat.v1.global_variables_initializer())
if centered:
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
else:
mg0 = None
mg1 = None
rms0 = opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
if momentum > 0.:
mom0 = opt.get_slot(var0, "momentum")
mom1 = opt.get_slot(var1, "momentum")
else:
mom0 = None
mom1 = None
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSprop
for _ in range(1, 4):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
learning_rate, rho, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
learning_rate, rho, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
if momentum > 0.:
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@combinations.generate(combinations.combine(mode=["eager"]))
def testCallableParams(self):
for dtype in _DATA_TYPES:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
rho = lambda: 0.9
momentum = lambda: 0.0
epsilon = 1.0
opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0))
]), self.evaluate(var1))
def testConstructRMSpropWithLR(self):
opt = rmsprop.RMSprop(lr=1.0)
opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0)
opt_3 = rmsprop.RMSprop(learning_rate=0.1)
self.assertIsInstance(opt.lr, tf.Variable)
self.assertIsInstance(opt_2.lr, tf.Variable)
self.assertIsInstance(opt_3.lr, tf.Variable)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
@combinations.generate(combinations.combine(mode=["eager"]))
def testSlotsUniqueEager(self):
v1 = tf.Variable(1.)
v2 = tf.Variable(1.)
opt = rmsprop.RMSprop(1., momentum=0., centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and one unique slot variable for v1 and v2.
self.assertLen(set({id(v) for v in opt.variables()}), 3)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertLen(set({id(v) for v in opt.variables()}), 5)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and three unique slot variables for v1 and v2
self.assertLen(set({id(v) for v in opt.variables()}), 7)
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
@combinations.generate(combinations.combine(mode=["eager"]))
def testMomentumProperValue(self):
with self.assertRaisesRegex(ValueError,
r"`momentum` must be between \[0, 1\]. "
r"Received: momentum=2.5 \(of type <class "
r"\'float\'>\)."):
rmsprop.RMSprop(1., momentum=2.5, centered=False)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
class SlotColocationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([True, False])
@test_util.run_gpu_only
def testRunMinimizeOnGPUForCPUVariables(self, use_resource):
with tf.device("/device:CPU:0"):
if use_resource:
var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)
var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)
else:
var0 = tf.Variable([1.0, 2.0], dtype=tf.float32)
var1 = tf.Variable([3.0, 4.0], dtype=tf.float32)
def loss():
return 5 * var0 + 3 * var1
opt = rmsprop.RMSprop(
learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0)
# Fetch params to validate initial values
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step through optimizer on GPU.
# Slot variables are created the first time optimizer is used on some
# variable. This tests that slot variables will be colocated with the base
# variable.
with tf.device("/device:GPU:0"):
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
opt_op = opt.minimize(loss, [var0, var1])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params, All variables should have decreased.
self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)),
msg="updated variables: %s" % self.evaluate(var0))
self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)),
msg="updated variables: %s" % self.evaluate(var1))
if __name__ == "__main__":
tf.test.main()
|
"""
ArcBall.py -- Math utilities, vector, matrix types and ArcBall quaternion rotation class
>>> unit_test_ArcBall_module ()
unit testing ArcBall
Quat for first drag
[ 0.08438914 -0.08534209 -0.06240178 0.99080837]
First transform
[[ 0.97764552 -0.1380603 0.15858325 0. ]
[ 0.10925253 0.97796899 0.17787792 0. ]
[-0.17964739 -0.15657592 0.97119039 0. ]
[ 0. 0. 0. 1. ]]
LastRot at end of first drag
[[ 0.97764552 -0.1380603 0.15858325]
[ 0.10925253 0.97796899 0.17787792]
[-0.17964739 -0.15657592 0.97119039]]
Quat for second drag
[ 0.00710336 0.31832787 0.02679029 0.94757545]
Second transform
[[ 0.88022292 -0.08322023 -0.46720669 0. ]
[ 0.14910145 0.98314685 0.10578787 0. ]
[ 0.45052907 -0.16277808 0.8777966 0. ]
[ 0. 0. 0. 1.00000001]]
"""
try:
import numpy as Numeric
def sumDot( a,b ):
return Numeric.dot (a, b)
except ImportError:
try:
import Numeric
def sumDot( a,b ):
return sum (Numeric.dot (a, b) )
except ImportError:
print ("This demo requires the numpy or Numeric extension, sorry")
import sys
sys.exit()
import copy
from math import sqrt
# //assuming IEEE-754(GLfloat), which i believe has max precision of 7 bits
Epsilon = 1.0e-5
class ArcBallT:
def __init__ (self, NewWidth, NewHeight):
self.m_StVec = Vector3fT ()
self.m_EnVec = Vector3fT ()
self.m_AdjustWidth = 1.0
self.m_AdjustHeight = 1.0
self.setBounds (NewWidth, NewHeight)
def __str__ (self):
str_rep = ""
str_rep += "StVec = " + str (self.m_StVec)
str_rep += "\nEnVec = " + str (self.m_EnVec)
str_rep += "\n scale coords %f %f" % (self.m_AdjustWidth, self.m_AdjustHeight)
return str_rep
def setBounds (self, NewWidth, NewHeight):
# //Set new bounds
assert (NewWidth > 1.0 and NewHeight > 1.0), "Invalid width or height for bounds."
# //Set adjustment factor for width/height
self.m_AdjustWidth = 1.0 / ((NewWidth - 1.0) * 0.5)
self.m_AdjustHeight = 1.0 / ((NewHeight - 1.0) * 0.5)
def _mapToSphere (self, NewPt):
# Given a new window coordinate, will modify NewVec in place
X = 0
Y = 1
Z = 2
NewVec = Vector3fT ()
# //Copy paramter into temp point
TempPt = copy.copy (NewPt)
#print ('NewPt', NewPt, TempPt)
# //Adjust point coords and scale down to range of [-1 ... 1]
TempPt [X] = (NewPt [X] * self.m_AdjustWidth) - 1.0
TempPt [Y] = 1.0 - (NewPt [Y] * self.m_AdjustHeight)
# //Compute the square of the length of the vector to the point from the center
length = sumDot( TempPt, TempPt)
# //If the point is mapped outside of the sphere... (length > radius squared)
if (length > 1.0):
# //Compute a normalizing factor (radius / sqrt(length))
norm = 1.0 / sqrt (length);
# //Return the "normalized" vector, a point on the sphere
NewVec [X] = TempPt [X] * norm;
NewVec [Y] = TempPt [Y] * norm;
NewVec [Z] = 0.0;
else: # //Else it's on the inside
# //Return a vector to a point mapped inside the sphere sqrt(radius squared - length)
NewVec [X] = TempPt [X]
NewVec [Y] = TempPt [Y]
NewVec [Z] = sqrt (1.0 - length)
return NewVec
def click (self, NewPt):
# //Mouse down (Point2fT
self.m_StVec = self._mapToSphere (NewPt)
return
def drag (self, NewPt):
# //Mouse drag, calculate rotation (Point2fT Quat4fT)
""" drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec
"""
X = 0
Y = 1
Z = 2
W = 3
self.m_EnVec = self._mapToSphere (NewPt)
# //Compute the vector perpendicular to the begin and end vectors
# Perp = Vector3fT ()
Perp = Vector3fCross(self.m_StVec, self.m_EnVec);
NewRot = Quat4fT ()
# //Compute the length of the perpendicular vector
if (Vector3fLength(Perp) > Epsilon): # //if its non-zero
# //We're ok, so return the perpendicular vector as the transform after all
NewRot[X] = Perp[X];
NewRot[Y] = Perp[Y];
NewRot[Z] = Perp[Z];
# //In the quaternion values, w is cosine (theta / 2), where theta is rotation angle
NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec);
else: # //if its zero
# //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation)
NewRot.X = NewRot.Y = NewRot.Z = NewRot.W = 0.0;
return NewRot
# ##################### Math utility ##########################################
def Matrix4fT ():
return Numeric.identity (4, 'f')
def Matrix3fT ():
return Numeric.identity (3, 'f')
def Quat4fT ():
return Numeric.zeros (4, 'f')
def Vector3fT ():
return Numeric.zeros (3, 'f')
def Point2fT (x = 0.0, y = 0.0):
pt = Numeric.zeros (2, 'f')
pt [0] = x
pt [1] = y
return pt
def Vector3fDot(u, v):
# Dot product of two 3f vectors
dotprod = Numeric.dot (u,v)
return dotprod
def Vector3fCross(u, v):
# Cross product of two 3f vectors
X = 0
Y = 1
Z = 2
cross = Numeric.zeros (3, 'f')
cross [X] = (u[Y] * v[Z]) - (u[Z] * v[Y])
cross [Y] = (u[Z] * v[X]) - (u[X] * v[Z])
cross [Z] = (u[X] * v[Y]) - (u[Y] * v[X])
return cross
def Vector3fLength (u):
mag_squared = sumDot(u,u)
mag = sqrt (mag_squared)
return mag
def Matrix3fSetIdentity ():
return Numeric.identity (3, 'f')
def Matrix3fMulMatrix3f (matrix_a, matrix_b):
return sumDot( matrix_a, matrix_b )
def Matrix4fSVD (NewObj):
X = 0
Y = 1
Z = 2
s = sqrt (
( (NewObj [X][X] * NewObj [X][X]) + (NewObj [X][Y] * NewObj [X][Y]) + (NewObj [X][Z] * NewObj [X][Z]) +
(NewObj [Y][X] * NewObj [Y][X]) + (NewObj [Y][Y] * NewObj [Y][Y]) + (NewObj [Y][Z] * NewObj [Y][Z]) +
(NewObj [Z][X] * NewObj [Z][X]) + (NewObj [Z][Y] * NewObj [Z][Y]) + (NewObj [Z][Z] * NewObj [Z][Z]) ) / 3.0 )
return s
def Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix):
# Modifies NewObj in-place by replacing its upper 3x3 portion from the
# passed in 3x3 matrix.
# NewObj = Matrix4fT ()
NewObj [0:3,0:3] = three_by_three_matrix
return NewObj
# /**
# * Sets the rotational component (upper 3x3) of this matrix to the matrix
# * values in the T precision Matrix3d argument; the other elements of
# * this matrix are unchanged; a singular value decomposition is performed
# * on this object's upper 3x3 matrix to factor out the scale, then this
# * object's upper 3x3 matrix components are replaced by the passed rotation
# * components, and then the scale is reapplied to the rotational
# * components.
# * @param three_by_three_matrix T precision 3x3 matrix
# */
def Matrix4fSetRotationFromMatrix3f (NewObj, three_by_three_matrix):
scale = Matrix4fSVD (NewObj)
NewObj = Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix);
scaled_NewObj = NewObj * scale # Matrix4fMulRotationScale(NewObj, scale);
return scaled_NewObj
def Matrix3fSetRotationFromQuat4f (q1):
# Converts the H quaternion q1 into a new equivalent 3x3 rotation matrix.
X = 0
Y = 1
Z = 2
W = 3
NewObj = Matrix3fT ()
n = sumDot(q1, q1)
s = 0.0
if (n > 0.0):
s = 2.0 / n
xs = q1 [X] * s; ys = q1 [Y] * s; zs = q1 [Z] * s
wx = q1 [W] * xs; wy = q1 [W] * ys; wz = q1 [W] * zs
xx = q1 [X] * xs; xy = q1 [X] * ys; xz = q1 [X] * zs
yy = q1 [Y] * ys; yz = q1 [Y] * zs; zz = q1 [Z] * zs
# This math all comes about by way of algebra, complex math, and trig identities.
# See Lengyel pages 88-92
NewObj [X][X] = 1.0 - (yy + zz); NewObj [Y][X] = xy - wz; NewObj [Z][X] = xz + wy;
NewObj [X][Y] = xy + wz; NewObj [Y][Y] = 1.0 - (xx + zz); NewObj [Z][Y] = yz - wx;
NewObj [X][Z] = xz - wy; NewObj [Y][Z] = yz + wx; NewObj [Z][Z] = 1.0 - (xx + yy)
return NewObj
def unit_test_ArcBall_module ():
# Unit testing of the ArcBall calss and the real math behind it.
# Simulates a click and drag followed by another click and drag.
print ("unit testing ArcBall")
Transform = Matrix4fT ()
LastRot = Matrix3fT ()
ThisRot = Matrix3fT ()
ArcBall = ArcBallT (640, 480)
# print "The ArcBall with NO click"
# print ArcBall
# First click
LastRot = copy.copy (ThisRot)
mouse_pt = Point2fT (500,250)
ArcBall.click (mouse_pt)
# print "The ArcBall with first click"
# print ArcBall
# First drag
mouse_pt = Point2fT (475, 275)
ThisQuat = ArcBall.drag (mouse_pt)
# print "The ArcBall after first drag"
# print ArcBall
# print
# print
print ("Quat for first drag")
print (ThisQuat)
ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat)
# Linear Algebra matrix multiplication A = old, B = New : C = A * B
ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot)
Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot)
print ("First transform")
print (Transform)
# Done with first drag
# second click
LastRot = copy.copy (ThisRot)
print ("LastRot at end of first drag")
print (LastRot)
mouse_pt = Point2fT (350,260)
ArcBall.click (mouse_pt)
# second drag
mouse_pt = Point2fT (450, 260)
ThisQuat = ArcBall.drag (mouse_pt)
# print "The ArcBall"
# print ArcBall
print ("Quat for second drag")
print (ThisQuat)
ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat)
ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot)
# print ThisRot
Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot)
print ("Second transform")
print (Transform)
# Done with second drag
LastRot = copy.copy (ThisRot)
def _test ():
# This will run doctest's unit testing capability.
# see http://www.python.org/doc/current/lib/module-doctest.html
#
# doctest introspects the ArcBall module for all docstrings
# that look like interactive python sessions and invokes
# the same commands then and there as unit tests to compare
# the output generated. Very nice for unit testing and
# documentation.
import doctest, ArcBall
return doctest.testmod (ArcBall)
if __name__ == "__main__":
# Invoke our function that runs python's doctest unit testing tool.
_test ()
# unit_test ()
|
import os
import boto3
import fsspec
import pytest
from moto import mock_s3
from datasets.filesystems import (
COMPRESSION_FILESYSTEMS,
HfFileSystem,
S3FileSystem,
extract_path_from_uri,
is_remote_filesystem,
)
from .utils import require_lz4, require_zstandard
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key"
os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token"
os.environ["AWS_SESSION_TOKEN"] = "fake_session_token"
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
def test_extract_path_from_uri(s3):
mock_bucket = "moto-mock-s3-bucket"
# We need to create the bucket since this is all in Moto's 'virtual' AWS account
s3.create_bucket(Bucket=mock_bucket)
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem():
fs = S3FileSystem(key="fake_access_key", secret="fake_secret")
is_remote = is_remote_filesystem(fs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@require_zstandard
@require_lz4
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = str(input_paths[compression_fs_class.protocol])
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data.txt"]
with open(text_file) as f:
assert hffs.open("data.txt", "r").read() == f.read()
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class MessengerDestinationPageWelcomeMessage(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isMessengerDestinationPageWelcomeMessage = True
super(MessengerDestinationPageWelcomeMessage, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
id = 'id'
page_welcome_message_body = 'page_welcome_message_body'
page_welcome_message_type = 'page_welcome_message_type'
template_name = 'template_name'
time_created = 'time_created'
time_last_used = 'time_last_used'
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=MessengerDestinationPageWelcomeMessage,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'id': 'string',
'page_welcome_message_body': 'string',
'page_welcome_message_type': 'string',
'template_name': 'string',
'time_created': 'datetime',
'time_last_used': 'datetime',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
|
from base_app.serializers import CustomUserSerializer
from rest_framework import serializers
from task_app.models import TaskFile
class TaskFileCreateSerializer(serializers.ModelSerializer):
'''Serializer for creating task files'''
author = CustomUserSerializer(read_only=True)
class Meta:
model = TaskFile
fields = '__all__'
read_only_fields = ['author', 'task']
class TaskFileDetailsSerializer(serializers.ModelSerializer):
'''Serializer for a specified task file
This serializer provides detailed information about task file.'''
file = serializers.FileField(read_only=True, allow_empty_file=True)
author = CustomUserSerializer(read_only=True)
class Meta:
model = TaskFile
exclude = ['task']
read_only_fields = ['file', 'author']
class TaskFileUpdateSerializer(serializers.ModelSerializer):
'''Serializer for updating a specified task file.
With this serializer task file can be updated only by a task file author.
'''
file = serializers.FileField(allow_empty_file=True)
author = CustomUserSerializer(read_only=True)
class Meta:
model = TaskFile
fields = '__all__'
read_only_fields = ['task', 'author']
|
# encoding: utf-8
import datetime
import re
import requests
from ckan.common import config
from ckan.common import asbool
from six import text_type, string_types
from ckan.common import _, json
import ckan.lib.maintain as maintain
log = __import__('logging').getLogger(__name__)
class License(object):
"""Domain object for a license."""
def __init__(self, data):
# convert old keys if necessary
if 'is_okd_compliant' in data:
data['od_conformance'] = 'approved' \
if asbool(data['is_okd_compliant']) else ''
del data['is_okd_compliant']
if 'is_osi_compliant' in data:
data['osd_conformance'] = 'approved' \
if asbool(data['is_osi_compliant']) else ''
del data['is_osi_compliant']
self._data = data
for (key, value) in self._data.items():
if key == 'date_created':
# Parse ISO formatted datetime.
value = datetime.datetime(*map(int, re.split('[^\d]', value)))
self._data[key] = value
elif isinstance(value, str):
# Convert str to unicode (keeps Pylons and SQLAlchemy happy).
value = value.decode('utf8')
self._data[key] = value
def __getattr__(self, name):
if name == 'is_okd_compliant':
log.warn('license.is_okd_compliant is deprecated - use '
'od_conformance instead.')
return self._data['od_conformance'] == 'approved'
if name == 'is_osi_compliant':
log.warn('license.is_osi_compliant is deprecated - use '
'osd_conformance instead.')
return self._data['osd_conformance'] == 'approved'
return self._data[name]
@maintain.deprecated("License.__getitem__() is deprecated and will be "
"removed in a future version of CKAN. Instead, "
"please use attribute access.")
def __getitem__(self, key):
'''NB This method is deprecated and will be removed in a future version
of CKAN. Instead, please use attribute access.
'''
return self.__getattr__(key)
def isopen(self):
if not hasattr(self, '_isopen'):
self._isopen = self.od_conformance == 'approved' or \
self.osd_conformance == 'approved'
return self._isopen
@maintain.deprecated("License.as_dict() is deprecated and will be "
"removed in a future version of CKAN. Instead, "
"please use attribute access.")
def as_dict(self):
'''NB This method is deprecated and will be removed in a future version
of CKAN. Instead, please use attribute access.
'''
data = self._data.copy()
if 'date_created' in data:
value = data['date_created']
value = value.isoformat()
data['date_created'] = value
# deprecated keys
if 'od_conformance' in data:
data['is_okd_compliant'] = data['od_conformance'] == 'approved'
if 'osd_conformance' in data:
data['is_osi_compliant'] = data['osd_conformance'] == 'approved'
return data
class LicenseRegister(object):
"""Dictionary-like interface to a group of licenses."""
def __init__(self):
group_url = config.get('licenses_group_url', None)
if group_url:
self.load_licenses(group_url)
else:
default_license_list = [
LicenseNotSpecified(),
LicenseOpenDataCommonsPDDL(),
LicenseOpenDataCommonsOpenDatabase(),
LicenseOpenDataAttribution(),
LicenseCreativeCommonsZero(),
LicenseCreativeCommonsAttribution(),
LicenseCreativeCommonsAttributionShareAlike(),
LicenseGNUFreeDocument(),
LicenseOtherOpen(),
LicenseOtherPublicDomain(),
LicenseOtherAttribution(),
LicenseOpenGovernment(),
LicenseCreativeCommonsNonCommercial(),
LicenseOtherNonCommercial(),
LicenseOtherClosed(),
]
self._create_license_list(default_license_list)
def load_licenses(self, license_url):
try:
if license_url.startswith('file://'):
with open(license_url.replace('file://', ''), 'r') as f:
license_data = json.load(f)
else:
response = requests.get(license_url)
license_data = response.json()
except requests.RequestException as e:
msg = "Couldn't get the licenses file {}: {}".format(license_url, e)
raise Exception(msg)
except ValueError as e:
msg = "Couldn't parse the licenses file {}: {}".format(license_url, e)
raise Exception(msg)
for license in license_data:
if isinstance(license, string_types):
license = license_data[license]
if license.get('title'):
license['title'] = _(license['title'])
self._create_license_list(license_data, license_url)
def _create_license_list(self, license_data, license_url=''):
if isinstance(license_data, dict):
self.licenses = [License(entity) for entity in license_data.values()]
elif isinstance(license_data, list):
self.licenses = [License(entity) for entity in license_data]
else:
msg = "Licenses at %s must be dictionary or list" % license_url
raise ValueError(msg)
def __getitem__(self, key, default=Exception):
for license in self.licenses:
if key == license.id:
return license
if default != Exception:
return default
else:
raise KeyError("License not found: %s" % key)
def get(self, key, default=None):
return self.__getitem__(key, default=default)
def keys(self):
return [license.id for license in self.licenses]
def values(self):
return self.licenses
def items(self):
return [(license.id, license) for license in self.licenses]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.licenses)
class DefaultLicense(dict):
''' The license was a dict but this did not allow translation of the
title. This is a slightly changed dict that allows us to have the title
as a property and so translated. '''
domain_content = False
domain_data = False
domain_software = False
family = ''
is_generic = False
od_conformance = 'not reviewed'
osd_conformance = 'not reviewed'
maintainer = ''
status = 'active'
url = ''
title = ''
id = ''
keys = ['domain_content',
'id',
'domain_data',
'domain_software',
'family',
'is_generic',
'od_conformance',
'osd_conformance',
'maintainer',
'status',
'url',
'title']
def __getitem__(self, key):
''' behave like a dict but get from attributes '''
if key in self.keys:
value = getattr(self, key)
if isinstance(value, str):
return text_type(value)
else:
return value
else:
raise KeyError()
def copy(self):
''' create a dict of the license used by the licenses api '''
out = {}
for key in self.keys:
out[key] = text_type(getattr(self, key))
return out
class LicenseNotSpecified(DefaultLicense):
id = "notspecified"
is_generic = True
@property
def title(self):
return _("License not specified")
class LicenseOpenDataCommonsPDDL(DefaultLicense):
domain_data = True
id = "odc-pddl"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/odc-pddl"
@property
def title(self):
return _("Open Data Commons Public Domain Dedication and License (PDDL)")
class LicenseOpenDataCommonsOpenDatabase(DefaultLicense):
domain_data = True
id = "odc-odbl"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/odc-odbl"
@property
def title(self):
return _("Open Data Commons Open Database License (ODbL)")
class LicenseOpenDataAttribution(DefaultLicense):
domain_data = True
id = "odc-by"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/odc-by"
@property
def title(self):
return _("Open Data Commons Attribution License")
class LicenseCreativeCommonsZero(DefaultLicense):
domain_content = True
domain_data = True
id = "cc-zero"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/cc-zero"
@property
def title(self):
return _("Creative Commons CCZero")
class LicenseCreativeCommonsAttribution(DefaultLicense):
id = "cc-by"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/cc-by"
@property
def title(self):
return _("Creative Commons Attribution")
class LicenseCreativeCommonsAttributionShareAlike(DefaultLicense):
domain_content = True
id = "cc-by-sa"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/cc-by-sa"
@property
def title(self):
return _("Creative Commons Attribution Share-Alike")
class LicenseGNUFreeDocument(DefaultLicense):
domain_content = True
id = "gfdl"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/gfdl"
@property
def title(self):
return _("GNU Free Documentation License")
class LicenseOtherOpen(DefaultLicense):
domain_content = True
id = "other-open"
is_generic = True
od_conformance = 'approved'
@property
def title(self):
return _("Other (Open)")
class LicenseOtherPublicDomain(DefaultLicense):
domain_content = True
id = "other-pd"
is_generic = True
od_conformance = 'approved'
@property
def title(self):
return _("Other (Public Domain)")
class LicenseOtherAttribution(DefaultLicense):
domain_content = True
id = "other-at"
is_generic = True
od_conformance = 'approved'
@property
def title(self):
return _("Other (Attribution)")
class LicenseOpenGovernment(DefaultLicense):
domain_content = True
id = "uk-ogl"
od_conformance = 'approved'
# CS: bad_spelling ignore
url = "http://reference.data.gov.uk/id/open-government-licence"
@property
def title(self):
# CS: bad_spelling ignore
return _("UK Open Government Licence (OGL)")
class LicenseCreativeCommonsNonCommercial(DefaultLicense):
id = "cc-nc"
url = "http://creativecommons.org/licenses/by-nc/2.0/"
@property
def title(self):
return _("Creative Commons Non-Commercial (Any)")
class LicenseOtherNonCommercial(DefaultLicense):
id = "other-nc"
is_generic = True
@property
def title(self):
return _("Other (Non-Commercial)")
class LicenseOtherClosed(DefaultLicense):
id = "other-closed"
is_generic = True
@property
def title(self):
return _("Other (Not Open)")
|
from sklearn.datasets import fetch_20newsgroups
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
# Define the category map
category_map = {'talk.politics.misc': 'Politics', 'rec.autos': 'Autos',
'rec.sport.hockey': 'Hockey', 'sci.electronics': 'Electronics',
'sci.med': 'Medicine'}
# Get the training dataset
training_data = fetch_20newsgroups(subset='train',
categories=category_map.keys(), shuffle=True, random_state=5)
# Build a count vectorizer and extract term counts
count_vectorizer = CountVectorizer()
train_tc = count_vectorizer.fit_transform(training_data.data)
print("\nDimensions of training data:", train_tc.shape)
# Create the tf-idf transformer
tfidf = TfidfTransformer()
train_tfidf = tfidf.fit_transform(train_tc)
# Define test data
input_data = [
'You need to be careful with cars when you are driving on slippery roads',
'A lot of devices can be operated wirelessly',
'Players need to be careful when they are close to goal posts',
'Political debates help us understand the perspectives of both sides'
]
# Train a Multinomial Naive Bayes classifier
classifier = MultinomialNB().fit(train_tfidf, training_data.target)
# Transform input data using count vectorizer
input_tc = count_vectorizer.transform(input_data)
# Transform vectorized data using tfidf transformer
input_tfidf = tfidf.transform(input_tc)
# Predict the output categories
predictions = classifier.predict(input_tfidf)
# Print the outputs
for sent, category in zip(input_data, predictions):
print('\nInput:', sent, '\nPredicted category:', \
category_map[training_data.target_names[category]])
|
from loguru import logger
from flask import request
from flasgger import swag_from
from flask_restful import Resource
from jwt.exceptions import ExpiredSignatureError
from ada_friend_app.modulo.cripto import Sha256
from ada_friend_app.modulo.jwt_auth import Token
from ada_friend_app.api.resposta_api import Resposta
from ada_friend_app.servico.mod_database import Database
class Login(Resource):
@swag_from('../../docs/api/login_post.yml')
def post(self):
json = request.json
if json.get('email', False) and json.get('senha', False):
senha = Sha256(json['senha']).hash
usuario = Database().get_document('usuarios', {'_id': json['email'], 'senha': senha})
if usuario:
usuario = usuario[0]
logger.debug(f"{json['email']} - CONECTADO")
try:
token = Token.gerar(usuario['senha'], usuario['_id'])
return Resposta.token_validado(token)
except ExpiredSignatureError:
return Resposta.nao_aceito('Token expirado')
except Exception as e:
return Resposta.error(str(e))
else:
logger.debug(f"{json['email']} - ERRO DE ACESSO")
return Resposta.nao_aceito('Usuário ou senha inválido!')
else:
return Resposta.error('JSON Inválido!')
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .mixed_precision import *
from . import mixed_precision
__all__ = mixed_precision.__all__
|
import argparse
from rasa.cli.arguments.default_arguments import (
add_nlu_data_param,
add_out_param,
add_data_param,
add_domain_param,
)
def set_convert_arguments(parser: argparse.ArgumentParser):
add_data_param(parser, required=True, default=None, data_type="Rasa NLU ")
add_out_param(
parser,
required=True,
default=None,
help_text="File where to save training data in Rasa format.",
)
parser.add_argument("-l", "--language", default="en", help="Language of data.")
parser.add_argument(
"-f",
"--format",
required=True,
choices=["json", "md"],
help="Output format the training data should be converted into.",
)
def set_split_arguments(parser: argparse.ArgumentParser):
add_nlu_data_param(parser, help_text="File or folder containing your NLU data.")
parser.add_argument(
"--training-fraction",
type=float,
default=0.8,
help="Percentage of the data which should be in the training data.",
)
add_out_param(
parser,
default="train_test_split",
help_text="Directory where the split files should be stored.",
)
def set_validator_arguments(parser: argparse.ArgumentParser):
add_domain_param(parser)
add_data_param(parser)
|
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class Simulation:
def __init__(self, init_investment, stock_returns, strategy, predicted_movements=None):
self.init_investment = init_investment
self.predicted_movements = predicted_movements
self.stock_returns = stock_returns
self.strategy = strategy
self.action_history = []
self.account_history = [init_investment]
self.__actual_investment = 0
self.step = 0
self.return_on_investment = 0
self.profit_on_investment = 0
def start(self):
for self.step in range(len(self.stock_returns)):
if self.predicted_movements is not None:
action = self.strategy.decide(self.predicted_movements[self.step])
else:
action = self.strategy.decide(self.step)
self.__make_transaction(action)
def __make_transaction(self, action):
self.action_history.append(action)
if action == 'buy':
self.__buy()
elif action == 'hold':
self.__hold()
elif action == 'sell':
self.__sell()
elif action == 'wait':
self.__wait()
else:
sys.exit('Action not implemented, exiting program!')
def get_investment_performance(self):
self.return_on_investment = (self.account_history[-1] - self.init_investment) / self.init_investment
self.profit_on_investment = self.account_history[-1] - self.init_investment
return {'return': self.return_on_investment,
'profit': self.profit_on_investment}
def plot_trading_history(self, stock_prices, date):
date = date.iloc[-len(stock_prices-1):]
stock_prices = np.insert(stock_prices, 0, stock_prices[0])
fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(40, 20))
ax1.plot(stock_prices, color='black', label='Cena zamknięcia akcji')
actions = pd.DataFrame(self.action_history)
buy_idx = actions[actions[0] == 'buy'].index.to_list()
sell_idx = actions[actions[0] == 'sell'].index.to_list()
stock_prices = np.array(stock_prices)
ax1.scatter(buy_idx, stock_prices[buy_idx], color='green', s=40, label='Kupno')
ax1.scatter(sell_idx, stock_prices[sell_idx], color='red', s=40, label='Sprzedaż')
ax1.legend()
ax2.plot(self.account_history[:-1], label='Kapitał')
plt.xlabel('Krok czasowy')
ax1.set_ylabel('Cena akcji')
ax2.set_ylabel('Kapitał')
ax2.legend()
plt.show()
def __calculate_daily_profit(self):
self.__actual_investment += self.__actual_investment * self.stock_returns[self.step]
def __buy(self):
self.__actual_investment = self.account_history[self.step]
self.__calculate_daily_profit()
self.account_history.append(self.__actual_investment)
def __hold(self):
self.__calculate_daily_profit()
self.account_history.append(self.__actual_investment)
def __sell(self):
self.account_history.append(self.__actual_investment)
self.__actual_investment = 0
def __wait(self):
self.account_history.append(self.account_history[self.step-1])
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import os
from six.moves import configparser
from prompt_toolkit import prompt # pylint: disable=import-error
from azure.cli.core._help import PRIVACY_STATEMENT
SELECT_SYMBOL = {
'outside': '#',
'query': '??',
'example': '::',
'exit_code': '$',
'scope': '%%',
'unscope': '..'
}
GESTURE_INFO = {
SELECT_SYMBOL['outside'] + "[cmd]": "use commands outside the application",
# pylint: disable=line-too-long
"[cmd] + [param] +" + "\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Inject jmespath query from previous command",
"\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Jmespath query of the previous command",
"[cmd] " + SELECT_SYMBOL['example'] + " [num]": "do a step by step tutorial of example",
SELECT_SYMBOL['exit_code']: "get the exit code of the previous command",
SELECT_SYMBOL['scope'] + '[cmd]': "set a scope, and scopes can be chained with spaces",
SELECT_SYMBOL['scope'] + ' ' + SELECT_SYMBOL['unscope']: "go back a scope",
}
CONFIG_FILE_NAME = 'shell-config'
GESTURE_LENGTH = max(len(key) for key in GESTURE_INFO) + 1
def help_text(values):
""" reformats the help text """
result = ""
for key in values:
result += key + ' '.join('' for x in range(GESTURE_LENGTH - len(key))) +\
': ' + values[key] + '\n'
return result
SHELL_HELP = help_text(GESTURE_INFO)
class Configuration(object):
""" configuration for program """
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False,
'y': True, 'Y': True, 'n': False, 'N': False}
""" Configuration information """
def __init__(self, cli_config, style=None):
self.config = configparser.ConfigParser({
'firsttime': 'yes',
'style': style if style else 'default'
})
self.cli_config = cli_config
self.config.add_section('Help Files')
self.config.add_section('Layout')
self.config.set('Help Files', 'command', 'help_dump.json')
self.config.set('Help Files', 'history', 'history.txt')
self.config.set('Help Files', 'frequency', 'frequency.json')
self.config.set('Layout', 'command_description', 'yes')
self.config.set('Layout', 'param_description', 'yes')
self.config.set('Layout', 'examples', 'yes')
self.config_dir = os.getenv('AZURE_CONFIG_DIR') or os.path.expanduser(os.path.join('~', '.azure-shell'))
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
if not os.path.exists(os.path.join(self.config_dir, CONFIG_FILE_NAME)):
with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file:
self.config.write(config_file)
else:
with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'r') as config_file:
self.config.readfp(config_file) # pylint: disable=deprecated-method
self.update()
def get_config_dir(self):
return self.config_dir
def get_history(self):
""" returns the history """
return self.config.get('Help Files', 'history')
def get_help_files(self):
""" returns where the command table is cached """
return self.config.get('Help Files', 'command')
def get_frequency(self):
""" returns the name of the frequency file """
return self.config.get('Help Files', 'frequency')
def load(self, path):
""" loads the configuration settings """
self.config.read(path)
def firsttime(self):
""" sets it as already done"""
self.config.set('DEFAULT', 'firsttime', 'no')
if self.cli_config.getboolean('core', 'collect_telemetry', fallback=False):
print(PRIVACY_STATEMENT)
else:
self.cli_config.set_value('core', 'collect_telemetry', ask_user_for_telemetry())
self.update()
def get_style(self):
""" gets the last style they used """
return self.config.get('DEFAULT', 'style')
def has_feedback(self):
""" returns whether user has given feedback """
return self.cli_config.getboolean('core', 'given feedback', fallback='false')
def set_feedback(self, value):
""" sets the feedback in the config """
self.cli_config.set_value('core', 'given feedback', value)
def set_style(self, val):
""" sets the style they used """
self.set_val('DEFAULT', 'style', val)
def set_val(self, direct, section, val):
""" set the config values """
if val is not None:
self.config.set(direct, section, val)
self.update()
def update(self):
""" updates the configuration settings """
with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file:
self.config.write(config_file)
def ask_user_for_telemetry():
""" asks the user for if we can collect telemetry """
answer = " "
while answer.lower() != 'yes' and answer.lower() != 'no':
answer = prompt(u'\nDo you agree to sending telemetry (yes/no)? Default answer is yes: ')
if answer == '':
answer = 'yes'
return answer
|
from distutils.core import setup
setup(
name='yanccm',
packages=[
'controller',
'sot',
'ncservice',
'ncservice.configDb',
'ncservice.ncDeviceOps',
'ncservice.ncDeviceOps.threaded',
'view'],
version='0.0.2',
license='MIT',
description='''YANCCM (pronounced yank'em) - Yet Another Network Configuration and Change Managment tool, is
multi-threaded configuration manger for network devices that leverages the NETCONF protocol''',
author='Richard Cunningham',
author_email='cunningr@gmail.com',
url='https://github.com/cunningr/yanccm',
download_url='https://github.com/cunningr/yanccm',
keywords=['Netconf', 'Cisco', 'configuration management'],
install_requires=[
'ncclient',
'lxml',
'pyyaml',
'pymongo',
'tabulate',
'requests',
'jinja2'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'
],
entry_points={
'console_scripts': [
'yanccm = controller.cli:main'
]
}
)
|
from terregex.mlr import Node, NodeList, Literal, NotLiteral, \
In, Negate, Range, Category, MinRepeat, MaxRepeat, \
SubPattern, Branch, Any, parse
from terregex.transform import Transformer
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PrinseqLite(Package):
"""PRINSEQ will help you to preprocess your genomic or metagenomic
sequence data in FASTA or FASTQ format."""
homepage = "http://prinseq.sourceforge.net"
url = "https://sourceforge.net/projects/prinseq/files/standalone/prinseq-lite-0.20.4.tar.gz"
version('0.20.4', sha256='9b5e0dce3b7f02f09e1cc7e8a2dd77c0b133e5e35529d570ee901f53ebfeb56f')
variant('nopca', default=True, description="Graphs version without PCA")
depends_on('perl', type='run')
depends_on('perl-cairo', type='run')
depends_on('perl-digest-md5', type='run')
depends_on('perl-json', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
filter_file(r'#!/usr/bin/perl',
'#!/usr/bin/env perl',
'prinseq-graphs-noPCA.pl')
filter_file(r'#!/usr/bin/perl',
'#!/usr/bin/env perl',
'prinseq-lite.pl')
install('prinseq-graphs-noPCA.pl', prefix.bin)
install('prinseq-lite.pl', prefix.bin)
chmod = which('chmod')
chmod('+x', join_path(self.prefix.bin, 'prinseq-graphs-noPCA.pl'))
chmod('+x', join_path(self.prefix.bin, 'prinseq-lite.pl'))
|
import gzip
import sys
def ParseFields(line):
fields = {}
var = line.rstrip("\n").lstrip("#").lstrip(">").split("\t")
for x in range(0, len(var)):
fields[var[x]] = x
return fields
def StripLeadLag(line):
var = line.rstrip("\n").lstrip("#").lstrip(">").split("\t")
return var
#Checks if the file is compressed or not and opens it appropriately
def CarefulOpen(samples):
for sample in samples:
if sample.endswith("gz"):
sample_file = gzip.open(sample)
else:
sample_file = open(sample)
yield sample_file
#Read the folders and individual sample names
folders = []
samples = []
infile = open(sys.argv[1])
sys.stderr.write("Samples file name: " + sys.argv[1] + "\n")
for line in infile:
samples.append(line.rstrip("\n").split("\t")[0])
segment_boundaries = {}
header_printed = False
for sample in CarefulOpen(samples):
for line in sample:
var = StripLeadLag(line)
if line.startswith("#"):
fields = ParseFields(line)
if not header_printed:
print ">chromosome\tbegin\tend"
header_printed = True
elif not line.startswith("#") and line!="\n":
if var[fields["chrom"]] not in segment_boundaries:
segment_boundaries[var[fields["chrom"]]] = set()
segment_boundaries[var[fields["chrom"]]].add(var[fields["start"]])
segment_boundaries[var[fields["chrom"]]].add(var[fields["end"]])
sys.stderr.write("%s\n" % ",".join(sorted(segment_boundaries.keys())))
for chromos in sorted(segment_boundaries):
sorted_set = sorted(list(segment_boundaries[chromos]), key=int)
sys.stderr.write("Number of elements in %s = %d\n" % (chromos, len(sorted_set)))
for x in range(len(sorted_set)-1):
print "\t".join([chromos, sorted_set[x], sorted_set[x+1]])
|
"""
Unit test for Linear Programming
"""
import sys
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_equal,
assert_array_less, assert_warns, suppress_warnings)
from pytest import raises as assert_raises
from scipy.optimize import linprog, OptimizeWarning
from scipy.sparse.linalg import MatrixRankWarning
from scipy.linalg import LinAlgWarning
import scipy.sparse
import pytest
has_umfpack = True
try:
from scikits.umfpack import UmfpackWarning
except ImportError:
has_umfpack = False
has_cholmod = True
try:
import sksparse
from sksparse.cholmod import cholesky as cholmod
except ImportError:
has_cholmod = False
def _assert_iteration_limit_reached(res, maxiter):
assert_(not res.success, "Incorrectly reported success")
assert_(res.success < maxiter, "Incorrectly reported number of iterations")
assert_equal(res.status, 1, "Failed to report iteration limit reached")
def _assert_infeasible(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 2, "failed to report infeasible status")
def _assert_unbounded(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 3, "failed to report unbounded status")
def _assert_unable_to_find_basic_feasible_sol(res):
# res: linprog result object
# The status may be either 2 or 4 depending on why the feasible solution
# could not be found. If the undelying problem is expected to not have a
# feasible solution, _assert_infeasible should be used.
assert_(not res.success, "incorrectly reported success")
assert_(res.status in (2, 4), "failed to report optimization failure")
def _assert_success(res, desired_fun=None, desired_x=None,
rtol=1e-8, atol=1e-8):
# res: linprog result object
# desired_fun: desired objective function value or None
# desired_x: desired solution or None
if not res.success:
msg = "linprog status {0}, message: {1}".format(res.status,
res.message)
raise AssertionError(msg)
assert_equal(res.status, 0)
if desired_fun is not None:
assert_allclose(res.fun, desired_fun,
err_msg="converged to an unexpected objective value",
rtol=rtol, atol=atol)
if desired_x is not None:
assert_allclose(res.x, desired_x,
err_msg="converged to an unexpected solution",
rtol=rtol, atol=atol)
def magic_square(n):
"""
Generates a linear program for which integer solutions represent an
n x n magic square; binary decision variables represent the presence
(or absence) of an integer 1 to n^2 in each position of the square.
"""
np.random.seed(0)
M = n * (n**2 + 1) / 2
numbers = np.arange(n**4) // n**2 + 1
numbers = numbers.reshape(n**2, n, n)
zeros = np.zeros((n**2, n, n))
A_list = []
b_list = []
# Rule 1: use every number exactly once
for i in range(n**2):
A_row = zeros.copy()
A_row[i, :, :] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 2: Only one number per square
for i in range(n):
for j in range(n):
A_row = zeros.copy()
A_row[:, i, j] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 3: sum of rows is M
for i in range(n):
A_row = zeros.copy()
A_row[:, i, :] = numbers[:, i, :]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 4: sum of columns is M
for i in range(n):
A_row = zeros.copy()
A_row[:, :, i] = numbers[:, :, i]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 5: sum of diagonals is M
A_row = zeros.copy()
A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)]
A_list.append(A_row.flatten())
b_list.append(M)
A_row = zeros.copy()
A_row[:, range(n), range(-1, -n - 1, -1)] = \
numbers[:, range(n), range(-1, -n - 1, -1)]
A_list.append(A_row.flatten())
b_list.append(M)
A = np.array(np.vstack(A_list), dtype=float)
b = np.array(b_list, dtype=float)
c = np.random.rand(A.shape[1])
return A, b, c, numbers
def lpgen_2d(m, n):
""" -> A b c LP test: m*n vars, m+n constraints
row sums == n/m, col sums == 1
https://gist.github.com/denis-bz/8647461
"""
np.random.seed(0)
c = - np.random.exponential(size=(m, n))
Arow = np.zeros((m, m * n))
brow = np.zeros(m)
for j in range(m):
j1 = j + 1
Arow[j, j * n:j1 * n] = 1
brow[j] = n / m
Acol = np.zeros((n, m * n))
bcol = np.zeros(n)
for j in range(n):
j1 = j + 1
Acol[j, j::n] = 1
bcol[j] = 1
A = np.vstack((Arow, Acol))
b = np.hstack((brow, bcol))
return A, b, c.ravel()
def very_random_gen(seed=0):
np.random.seed(seed)
m_eq, m_ub, n = 10, 20, 50
c = np.random.rand(n)-0.5
A_ub = np.random.rand(m_ub, n)-0.5
b_ub = np.random.rand(m_ub)-0.5
A_eq = np.random.rand(m_eq, n)-0.5
b_eq = np.random.rand(m_eq)-0.5
lb = -np.random.rand(n)
ub = np.random.rand(n)
lb[lb < -np.random.rand()] = -np.inf
ub[ub > np.random.rand()] = np.inf
bounds = np.vstack((lb, ub)).T
return c, A_ub, b_ub, A_eq, b_eq, bounds
def nontrivial_problem():
c = [-1, 8, 4, -6]
A_ub = [[-7, -7, 6, 9],
[1, -1, -3, 0],
[10, -10, -7, 7],
[6, -1, 3, 4]]
b_ub = [-3, 6, -6, 6]
A_eq = [[-10, 1, 1, -8]]
b_eq = [-4]
x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391]
f_star = 7083 / 1391
return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star
def l1_regression_prob(seed=0, m=8, d=9, n=100):
'''
Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)}
x in R^d
y in R
n: number of training samples
d: dimension of x, i.e. x in R^d
phi: feature map R^d -> R^m
m: dimension of feature space
'''
np.random.seed(seed)
phi = np.random.normal(0, 1, size=(m, d)) # random feature mapping
w_true = np.random.randn(m)
x = np.random.normal(0, 1, size=(d, n)) # features
y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) # measurements
# construct the problem
c = np.ones(m+n)
c[:m] = 0
A_ub = scipy.sparse.lil_matrix((2*n, n+m))
idx = 0
for ii in range(n):
A_ub[idx, :m] = phi @ x[:, ii]
A_ub[idx, m+ii] = -1
A_ub[idx+1, :m] = -1*phi @ x[:, ii]
A_ub[idx+1, m+ii] = -1
idx += 2
A_ub = A_ub.tocsc()
b_ub = np.zeros(2*n)
b_ub[0::2] = y
b_ub[1::2] = -y
bnds = [(None, None)]*m + [(0, None)]*n
return c, A_ub, b_ub, bnds
def generic_callback_test(self):
# Check that callback is as advertised
last_cb = {}
def cb(res):
message = res.pop('message')
complete = res.pop('complete')
assert_(res.pop('phase') in (1, 2))
assert_(res.pop('status') in range(4))
assert_(isinstance(res.pop('nit'), int))
assert_(isinstance(complete, bool))
assert_(isinstance(message, str))
last_cb['x'] = res['x']
last_cb['fun'] = res['fun']
last_cb['slack'] = res['slack']
last_cb['con'] = res['con']
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method)
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
assert_allclose(last_cb['fun'], res['fun'])
assert_allclose(last_cb['x'], res['x'])
assert_allclose(last_cb['con'], res['con'])
assert_allclose(last_cb['slack'], res['slack'])
def test_unknown_solvers_and_options():
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
assert_raises(ValueError, linprog,
c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki')
assert_raises(ValueError, linprog,
c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki')
assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub,
options={"rr_method": 'ekki-ekki-ekki'})
def test_choose_solver():
# 'highs' chooses 'dual'
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub, b_ub, method='highs')
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
A_ub = None
b_ub = None
A_eq = None
b_eq = None
bounds = None
################
# Common Tests #
################
class LinprogCommonTests:
"""
Base class for `linprog` tests. Generally, each test will be performed
once for every derived class of LinprogCommonTests, each of which will
typically change self.options and/or self.method. Effectively, these tests
are run for many combination of method (simplex, revised simplex, and
interior point) and options (such as pivoting rule or sparse treatment).
"""
##################
# Targeted Tests #
##################
def test_callback(self):
generic_callback_test(self)
def test_disp(self):
# test that display option does not break anything.
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"disp": True})
_assert_success(res, desired_fun=-64.049494229)
def test_docstring_example(self):
# Example from linprog docstring.
c = [-1, 4]
A = [[-3, 1], [1, 2]]
b = [6, 4]
x0_bounds = (None, None)
x1_bounds = (-3, None)
res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
options=self.options, method=self.method)
_assert_success(res, desired_fun=-22)
def test_type_error(self):
# (presumably) checks that linprog recognizes type errors
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1]
A_eq = [[1]]
b_eq = "hello"
assert_raises(TypeError, linprog,
c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
def test_aliasing_b_ub(self):
# (presumably) checks that linprog does not modify b_ub
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_ub = np.array([[1.0]])
b_ub_orig = np.array([3.0])
b_ub = b_ub_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-4, desired_x=[-4])
assert_allclose(b_ub_orig, b_ub)
def test_aliasing_b_eq(self):
# (presumably) checks that linprog does not modify b_eq
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq_orig = np.array([3.0])
b_eq = b_eq_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
assert_allclose(b_eq_orig, b_eq)
def test_non_ndarray_args(self):
# (presumably) checks that linprog accepts list in place of arrays
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1.0]
A_ub = [[1.0]]
b_ub = [3.0]
A_eq = [[1.0]]
b_eq = [2.0]
bounds = (-1.0, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=2, desired_x=[2])
def test_unknown_options(self):
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
def f(c, A_ub=None, b_ub=None, A_eq=None,
b_eq=None, bounds=None, options={}):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=options)
o = {key: self.options[key] for key in self.options}
o['spam'] = 42
assert_warns(OptimizeWarning, f,
c, A_ub=A_ub, b_ub=b_ub, options=o)
def test_invalid_inputs(self):
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# Test ill-formatted bounds
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)])
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)])
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)])
# Test other invalid inputs
assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2])
assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2])
assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1)
# this last check doesn't make sense for sparse presolve
if ("_sparse_presolve" in self.options and
self.options["_sparse_presolve"]):
return
# there aren't 3-D sparse matrices
assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1)
def test_sparse_constraints(self):
# gh-13559: improve error message for sparse inputs when unsupported
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
np.random.seed(0)
m = 100
n = 150
A_eq = scipy.sparse.rand(m, n, 0.5)
x_valid = np.random.randn((n))
c = np.random.randn((n))
ub = x_valid + np.random.rand((n))
lb = x_valid - np.random.rand((n))
bounds = np.column_stack((lb, ub))
b_eq = A_eq * x_valid
if self.method in {'simplex', 'revised simplex'}:
# simplex and revised simplex should raise error
with assert_raises(ValueError, match=f"Method '{self.method}' "
"does not support sparse constraint matrices."):
linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options)
else:
# other methods should succeed
options = {**self.options}
if self.method in {'interior-point'}:
options['sparse'] = True
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=options)
assert res.success
def test_maxiter(self):
# test iteration limit w/ Enzo example
c = [4, 8, 3, 0, 0, 0]
A = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b = [185, 155, 600]
np.random.seed(0)
maxiter = 3
res = linprog(c, A_eq=A, b_eq=b, method=self.method,
options={"maxiter": maxiter})
_assert_iteration_limit_reached(res, maxiter)
assert_equal(res.nit, maxiter)
def test_bounds_fixed(self):
# Test fixed bounds (upper equal to lower)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
do_presolve = self.options.get('presolve', True)
res = linprog([1], bounds=(1, 1),
method=self.method, options=self.options)
_assert_success(res, 1, 1)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)],
method=self.method, options=self.options)
_assert_success(res, 12, [5, -1, 3])
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 1], bounds=[(1, 1), (1, 3)],
method=self.method, options=self.options)
_assert_success(res, 2, [1, 1])
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7],
bounds=[(-5, 5), (0, 10), (3.5, 3.5)],
method=self.method, options=self.options)
_assert_success(res, 15, [1, 7, 3.5])
if do_presolve:
assert_equal(res.nit, 0)
def test_bounds_infeasible(self):
# Test ill-valued bounds (upper less than lower)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
do_presolve = self.options.get('presolve', True)
res = linprog([1], bounds=(1, -2), method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
def test_bounds_infeasible_2(self):
# Test ill-valued bounds (lower inf, upper -inf)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
# For the simplex method, the cases do not result in an
# infeasible status, but in a RuntimeWarning. This is a
# consequence of having _presolve() take care of feasibility
# checks. See issue gh-11618.
do_presolve = self.options.get('presolve', True)
simplex_without_presolve = not do_presolve and self.method == 'simplex'
c = [1, 2, 3]
bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)]
bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)]
if simplex_without_presolve:
def g(c, bounds):
res = linprog(c, bounds=bounds, method=self.method, options=self.options)
return res
with pytest.warns(RuntimeWarning):
with pytest.raises(IndexError):
g(c, bounds=bounds_1)
with pytest.warns(RuntimeWarning):
with pytest.raises(IndexError):
g(c, bounds=bounds_2)
else:
res = linprog(c=c, bounds=bounds_1, method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog(c=c, bounds=bounds_2, method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
def test_empty_constraint_1(self):
c = [-1, -2]
res = linprog(c, method=self.method, options=self.options)
_assert_unbounded(res)
def test_empty_constraint_2(self):
c = [-1, 1, -1, 1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve requires no iterations
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_empty_constraint_3(self):
c = [1, -1, 1, -1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2)
def test_inequality_constraints(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf
c = np.array([3, 2]) * -1 # maximize
A_ub = [[2, 1],
[1, 1],
[1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-18, desired_x=[2, 6])
def test_inequality_constraints2(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf
# (dead link)
c = [6, 3]
A_ub = [[0, 3],
[-1, -1],
[-2, 1]]
b_ub = [2, -1, -1]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3])
def test_bounds_simple(self):
c = [1, 2]
bounds = (1, 2)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
bounds = [(1, 2), (1, 2)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
def test_bounded_below_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (1.0, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_below_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (0.5, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounded_above_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (None, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_above_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, 4)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_infinity(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_mixed(self):
# Problem has one unbounded variable and
# another with a negative lower bound.
c = np.array([-1, 4]) * -1 # maximize
A_ub = np.array([[-3, 1],
[1, 2]], dtype=np.float64)
b_ub = [6, 4]
x0_bounds = (-np.inf, np.inf)
x1_bounds = (-3, np.inf)
bounds = (x0_bounds, x1_bounds)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7])
def test_bounds_equal_but_infeasible(self):
c = [-4, 1]
A_ub = [[7, -2], [0, 1], [2, -2]]
b_ub = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_but_infeasible2(self):
c = [-4, 1]
A_eq = [[7, -2], [0, 1], [2, -2]]
b_eq = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_no_presolve(self):
# There was a bug when a lower and upper bound were equal but
# presolve was not on to eliminate the variable. The bound
# was being converted to an equality constraint, but the bound
# was not eliminated, leading to issues in postprocessing.
c = [1, 2]
A_ub = [[1, 2], [1.1, 2.2]]
b_ub = [4, 8]
bounds = [(1, 2), (2, 2)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_zero_column_1(self):
m, n = 3, 4
np.random.seed(0)
c = np.random.rand(n)
c[1] = 1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = [[1, 0, 1, 1]]
b_ub = 3
bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-9.7087836730413404)
def test_zero_column_2(self):
np.random.seed(0)
m, n = 2, 4
c = np.random.rand(n)
c[1] = -1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = np.random.rand(m, n)
A_ub[:, 1] = 0
b_ub = np.random.rand(m)
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_zero_row_1(self):
c = [1, 2, 3]
A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_eq = [0, 3, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3)
def test_zero_row_2(self):
A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_ub = [0, 3, 0]
c = [1, 2, 3]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0)
def test_zero_row_3(self):
m, n = 2, 4
c = np.random.rand(n)
A_eq = np.random.rand(m, n)
A_eq[0, :] = 0
b_eq = np.random.rand(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_zero_row_4(self):
m, n = 2, 4
c = np.random.rand(n)
A_ub = np.random.rand(m, n)
A_ub[0, :] = 0
b_ub = -np.random.rand(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_1(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 2, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_2(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 1, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=4)
def test_singleton_row_ub_1(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -2, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_ub_2(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -0.5, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0.5)
def test_infeasible(self):
# Test linprog response to an infeasible problem
c = [-1, -1]
A_ub = [[1, 0],
[0, 1],
[-1, -1]]
b_ub = [2, 2, -5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_infeasible_inequality_bounds(self):
c = [1]
A_ub = [[2]]
b_ub = 4
bounds = (5, 6)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_unbounded(self):
# Test linprog response to an unbounded problem
c = np.array([1, 1]) * -1 # maximize
A_ub = [[-1, 1],
[-1, -1]]
b_ub = [-1, -2]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_unbounded_below_no_presolve_corrected(self):
c = [1]
bounds = [(None, 1)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c=c, bounds=bounds,
method=self.method,
options=o)
if self.method == "revised simplex":
# Revised simplex has a special pathway for no constraints.
assert_equal(res.status, 5)
else:
_assert_unbounded(res)
def test_unbounded_no_nontrivial_constraints_1(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, -1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
if not self.method.lower().startswith("highs"):
assert_equal(res.x[-1], np.inf)
assert_equal(res.message[:36],
"The problem is (trivially) unbounded")
def test_unbounded_no_nontrivial_constraints_2(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, 1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (None, 0)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
if not self.method.lower().startswith("highs"):
assert_equal(res.x[-1], -np.inf)
assert_equal(res.message[:36],
"The problem is (trivially) unbounded")
def test_cyclic_recovery(self):
# Test linprogs recovery from cycling using the Klee-Minty problem
# Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf
c = np.array([100, 10, 1]) * -1 # maximize
A_ub = [[1, 0, 0],
[20, 1, 0],
[200, 20, 1]]
b_ub = [1, 100, 10000]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7)
def test_cyclic_bland(self):
# Test the effect of Bland's rule on a cycling problem
c = np.array([-10, 57, 9, 24.])
A_ub = np.array([[0.5, -5.5, -2.5, 9],
[0.5, -1.5, -0.5, 1],
[1, 0, 0, 0]])
b_ub = [0, 0, 1]
# copy the existing options dictionary but change maxiter
maxiter = 100
o = {key: val for key, val in self.options.items()}
o['maxiter'] = maxiter
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
if self.method == 'simplex' and not self.options.get('bland'):
# simplex cycles without Bland's rule
_assert_iteration_limit_reached(res, o['maxiter'])
else:
# other methods, including simplex with Bland's rule, succeed
_assert_success(res, desired_x=[1, 0, 1, 0])
# note that revised simplex skips this test because it may or may not
# cycle depending on the initial basis
def test_remove_redundancy_infeasibility(self):
# mostly a test of redundancy removal, which is carefully tested in
# test__remove_redundancy.py
m, n = 10, 10
c = np.random.rand(n)
A_eq = np.random.rand(m, n)
b_eq = np.random.rand(m)
A_eq[-1, :] = 2 * A_eq[-2, :]
b_eq[-1] *= -1
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
#################
# General Tests #
#################
def test_nontrivial_problem(self):
# Problem involves all constraint types,
# negative resource limits, and rounding issues.
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
def test_lpgen_problem(self):
# Test linprog with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
A_ub, b_ub, c = lpgen_2d(20, 20)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-64.049494229)
def test_network_flow(self):
# A network flow problem with supply and demand at nodes
# and with costs along directed edges.
# https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf
c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]
n, p = -1, 1
A_eq = [
[n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],
[p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],
[0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],
[0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],
[0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]
b_eq = [0, 19, -16, 33, 0, 0, -36]
with suppress_warnings() as sup:
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7)
def test_network_flow_limited_capacity(self):
# A network flow problem with supply and demand at nodes
# and with costs and capacities along directed edges.
# http://blog.sommer-forst.de/2013/04/10/
c = [2, 2, 1, 3, 1]
bounds = [
[0, 4],
[0, 2],
[0, 2],
[0, 3],
[0, 5]]
n, p = -1, 1
A_eq = [
[n, n, 0, 0, 0],
[p, 0, n, n, 0],
[0, p, p, 0, n],
[0, 0, 0, p, p]]
b_eq = [-4, 0, 0, 4]
with suppress_warnings() as sup:
# this is an UmfpackWarning but I had trouble importing it
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Solving system with option...")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14)
def test_simplex_algorithm_wikipedia_example(self):
# https://en.wikipedia.org/wiki/Simplex_algorithm#Example
c = [-2, -3, -4]
A_ub = [
[3, 2, 1],
[2, 5, 3]]
b_ub = [10, 15]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-20)
def test_enzo_example(self):
# https://github.com/scipy/scipy/issues/1779 lp2.py
#
# Translated from Octave code at:
# http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm
# and placed under MIT licence by Enzo Michelangeli
# with permission explicitly granted by the original author,
# Prof. Kazunobu Yoshida
c = [4, 8, 3, 0, 0, 0]
A_eq = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b_eq = [185, 155, 600]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=317.5,
desired_x=[66.25, 0, 17.5, 0, 183.75, 0],
atol=6e-6, rtol=1e-7)
def test_enzo_example_b(self):
# rescued from https://github.com/scipy/scipy/pull/218
c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]
A_eq = [[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]]
b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-1.77,
desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])
def test_enzo_example_c_with_degeneracy(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 20
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros(m))
def test_enzo_example_c_with_unboundedness(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_enzo_example_c_with_infeasibility(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [1, 1]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_basic_artificial_vars(self):
# Problem is chosen to test two phase simplex methods when at the end
# of phase 1 some artificial variables remain in the basis.
# Also, for `method='simplex'`, the row in the tableau corresponding
# with the artificial variables is not all zero.
c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004])
A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0],
[0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0],
[1.0, 1.0, 0, 0, 0, 0]])
b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0])
A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]])
b_eq = np.array([0, 0])
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros_like(c),
atol=2e-6)
def test_optimize_result(self):
# check all fields in OptimizeResult
c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
assert_(res.success)
assert_(res.nit)
assert_(not res.status)
assert_(res.message == "Optimization terminated successfully.")
assert_allclose(c @ res.x, res.fun)
assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11)
assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11)
#################
# Bug Fix Tests #
#################
def test_bug_5400(self):
# https://github.com/scipy/scipy/issues/5400
bounds = [
(0, None),
(0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100),
(0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900),
(0, None), (0, None), (0, None), (0, None), (0, None), (0, None)]
f = 1 / 9
g = -1e4
h = -3.1
A_ub = np.array([
[1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0],
[1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0],
[0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0],
[0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0],
[0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0],
[0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]])
b_ub = np.array([
0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900,
900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
sup.filter(OptimizeWarning,
"Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-106.63507541835018)
def test_bug_6139(self):
# linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Note: This is not strictly a bug as the default tolerance determines
# if a result is "close enough" to zero and should not be expected
# to work for all cases.
c = np.array([1, 1, 1])
A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]])
b_eq = np.array([5.00000000e+00, -1.00000000e+04])
A_ub = -np.array([[0., 1000000., 1010000.]])
b_ub = -np.array([10000000.])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14.95,
desired_x=np.array([5, 4.95, 5]))
def test_bug_6690(self):
# linprog simplex used to violate bound constraint despite reporting
# success.
# https://github.com/scipy/scipy/issues/6690
A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]])
b_eq = np.array([0.9626])
A_ub = np.array([
[0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0],
[0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37],
[0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0]
])
b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022])
bounds = np.array([
[-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73],
[0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]
]).T
c = np.array([
-1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28
])
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(OptimizeWarning,
"Solving system with option 'cholesky'")
sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = -1.19099999999
desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800,
0.5000, 0.4700, 0.0900, 0.3200, -0.7300])
_assert_success(res, desired_fun=desired_fun, desired_x=desired_x)
# Add small tol value to ensure arrays are less than or equal.
atol = 1e-6
assert_array_less(bounds[:, 0] - atol, res.x)
assert_array_less(res.x, bounds[:, 1] + atol)
def test_bug_7044(self):
# linprog simplex failed to "identify correct constraints" (?)
# leading to a non-optimal solution if A is rank-deficient.
# https://github.com/scipy/scipy/issues/7044
A_eq, b_eq, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = 1.730550597
_assert_success(res, desired_fun=desired_fun)
assert_allclose(A_eq.dot(res.x), b_eq)
assert_array_less(np.zeros(res.x.size) - 1e-5, res.x)
def test_bug_7237(self):
# https://github.com/scipy/scipy/issues/7237
# linprog simplex "explodes" when the pivot value is very
# close to zero.
c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0])
A_ub = np.array([
[1., -724., 911., -551., -555., -896., 478., -80., -293.],
[1., 566., 42., 937., 233., 883., 392., -909., 57.],
[1., -208., -894., 539., 321., 532., -924., 942., 55.],
[1., 857., -859., 83., 462., -265., -971., 826., 482.],
[1., 314., -424., 245., -424., 194., -443., -104., -429.],
[1., 540., 679., 361., 149., -827., 876., 633., 302.],
[0., -1., -0., -0., -0., -0., -0., -0., -0.],
[0., -0., -1., -0., -0., -0., -0., -0., -0.],
[0., -0., -0., -1., -0., -0., -0., -0., -0.],
[0., -0., -0., -0., -1., -0., -0., -0., -0.],
[0., -0., -0., -0., -0., -1., -0., -0., -0.],
[0., -0., -0., -0., -0., -0., -1., -0., -0.],
[0., -0., -0., -0., -0., -0., -0., -1., -0.],
[0., -0., -0., -0., -0., -0., -0., -0., -1.],
[0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1.]
])
b_ub = np.array([
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.])
A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]])
b_eq = np.array([[1.]])
bounds = [(None, None)] * 9
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=108.568535, atol=1e-6)
def test_bug_8174(self):
# https://github.com/scipy/scipy/issues/8174
# The simplex method sometimes "explodes" if the pivot value is very
# close to zero.
A_ub = np.array([
[22714, 1008, 13380, -2713.5, -1116],
[-4986, -1092, -31220, 17386.5, 684],
[-4986, 0, 0, -2713.5, 0],
[22714, 0, 0, 17386.5, 0]])
b_ub = np.zeros(A_ub.shape[0])
c = -np.ones(A_ub.shape[1])
bounds = [(0, 1)] * A_ub.shape[1]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex':
_assert_unable_to_find_basic_feasible_sol(res)
else:
_assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6)
def test_bug_8174_2(self):
# Test supplementary example from issue 8174.
# https://github.com/scipy/scipy/issues/8174
# https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution
c = np.array([1, 0, 0, 0, 0, 0, 0])
A_ub = -np.identity(7)
b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]])
A_eq = np.array([
[1, 1, 1, 1, 1, 1, 0],
[0.3, 1.3, 0.9, 0, 0, 0, -1],
[0.3, 0, 0, 0, 0, 0, -2/3],
[0, 0.65, 0, 0, 0, 0, -1/15],
[0, 0, 0.3, 0, 0, 0, -1/15]
])
b_eq = np.array([[100], [0], [0], [0], [0]])
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=43.3333333331385)
def test_bug_8561(self):
# Test that pivot row is chosen correctly when using Bland's rule
# This was originally written for the simplex method with
# Bland's rule only, but it doesn't hurt to test all methods/options
# https://github.com/scipy/scipy/issues/8561
c = np.array([7, 0, -4, 1.5, 1.5])
A_ub = np.array([
[4, 5.5, 1.5, 1.0, -3.5],
[1, -2.5, -2, 2.5, 0.5],
[3, -0.5, 4, -12.5, -7],
[-1, 4.5, 2, -3.5, -2],
[5.5, 2, -4.5, -1, 9.5]])
b_ub = np.array([0, 0, 0, 0, 1])
res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options,
method=self.method)
_assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3])
def test_bug_8662(self):
# linprog simplex used to report incorrect optimal results
# https://github.com/scipy/scipy/issues/8662
c = [-10, 10, 6, 3]
A_ub = [[8, -8, -4, 6],
[-8, 8, 4, -6],
[-4, 4, 8, -4],
[3, -3, -3, -10]]
b_ub = [9, -9, -9, -4]
bounds = [(0, None), (0, None), (0, None), (0, None)]
desired_fun = 36.0000000000
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# Set boundary condition as a constraint
A_ub.append([0, 0, -1, 0])
b_ub.append(0)
bounds[2] = (None, None)
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
rtol = 1e-5
_assert_success(res1, desired_fun=desired_fun, rtol=rtol)
_assert_success(res2, desired_fun=desired_fun, rtol=rtol)
def test_bug_8663(self):
# exposed a bug in presolve
# https://github.com/scipy/scipy/issues/8663
c = [1, 5]
A_eq = [[0, -7]]
b_eq = [-6]
bounds = [(0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7)
def test_bug_8664(self):
# interior-point has trouble with this when presolve is off
# tested for interior-point with presolve off in TestLinprogIPSpecific
# https://github.com/scipy/scipy/issues/8664
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bug_8973(self):
"""
Test whether bug described at:
https://github.com/scipy/scipy/issues/8973
was fixed.
"""
c = np.array([0, 0, 0, 1, -1])
A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]])
b_ub = np.array([2, -2])
bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# solution vector x is not unique
_assert_success(res, desired_fun=-2)
# HiGHS IPM had an issue where the following wasn't true!
assert_equal(c @ res.x, res.fun)
def test_bug_8973_2(self):
"""
Additional test for:
https://github.com/scipy/scipy/issues/8973
suggested in
https://github.com/scipy/scipy/pull/8985
review by @antonior92
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[-2], desired_fun=0)
def test_bug_10124(self):
"""
Test for linprog docstring problem
'disp'=True caused revised simplex failure
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
c = [-1, 4]
A_ub = [[-3, 1], [1, 2]]
b_ub = [6, 4]
bounds = [(None, None), (-3, None)]
o = {"disp": True}
o.update(self.options)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_x=[10, -3], desired_fun=-22)
def test_bug_10349(self):
"""
Test for redundancy removal tolerance issue
https://github.com/scipy/scipy/issues/10349
"""
A_eq = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1]])
b_eq = np.array([221, 210, 10, 141, 198, 102])
c = np.concatenate((0, 1, np.zeros(4)), axis=None)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92)
def test_bug_10466(self):
"""
Test that autoscale fixes poorly-scaled problem
"""
c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.]
A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]]
b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08,
1.00663296e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09]
o = {}
# HiGHS methods don't use autoscale option
if not self.method.startswith("highs"):
o = {"autoscale": True}
o.update(self.options)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "Solving system with option...")
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(RuntimeWarning, "divide by zero encountered...")
sup.filter(RuntimeWarning, "overflow encountered...")
sup.filter(RuntimeWarning, "invalid value encountered...")
sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
assert_allclose(res.fun, -8589934560)
#########################
# Method-specific Tests #
#########################
class LinprogSimplexTests(LinprogCommonTests):
method = "simplex"
class LinprogIPTests(LinprogCommonTests):
method = "interior-point"
class LinprogRSTests(LinprogCommonTests):
method = "revised simplex"
# Revised simplex does not reliably solve these problems.
# Failure is intermittent due to the random choice of elements to complete
# the basis after phase 1 terminates. In any case, linprog exists
# gracefully, reporting numerical difficulties. I do not think this should
# prevent revised simplex from being merged, as it solves the problems
# most of the time and solves a broader range of problems than the existing
# simplex implementation.
# I believe that the root cause is the same for all three and that this
# same issue prevents revised simplex from solving many other problems
# reliably. Somehow the pivoting rule allows the algorithm to pivot into
# a singular basis. I haven't been able to find a reference that
# acknowledges this possibility, suggesting that there is a bug. On the
# other hand, the pivoting rule is quite simple, and I can't find a
# mistake, which suggests that this is a possibility with the pivoting
# rule. Hopefully, a better pivoting rule will fix the issue.
def test_bug_5400(self):
pytest.skip("Intermittent failure acceptable.")
def test_bug_8662(self):
pytest.skip("Intermittent failure acceptable.")
def test_network_flow(self):
pytest.skip("Intermittent failure acceptable.")
class LinprogHiGHSTests(LinprogCommonTests):
def test_callback(self):
# this is the problem from test_callback
cb = lambda res: None
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub,
callback=cb, method=self.method)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method)
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
@pytest.mark.parametrize("options",
[{"maxiter": -1},
{"disp": -1},
{"presolve": -1},
{"time_limit": -1},
{"dual_feasibility_tolerance": -1},
{"primal_feasibility_tolerance": -1},
{"ipm_optimality_tolerance": -1},
{"simplex_dual_edge_weight_strategy": "ekki"},
])
def test_invalid_option_values(self, options):
def f(options):
linprog(1, method=self.method, options=options)
options.update(self.options)
assert_warns(OptimizeWarning, f, options=options)
def test_crossover(self):
c = np.array([1, 1]) * -1 # maximize
A_ub = np.array([[1, 1]])
b_ub = [1]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
# there should be nonzero crossover iterations for IPM (only)
assert_equal(res.crossover_nit == 0, self.method != "highs-ipm")
################################
# Simplex Option-Specific Tests#
################################
class TestLinprogSimplexDefault(LinprogSimplexTests):
def setup_method(self):
self.options = {}
def test_bug_5400(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_7237_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate error is raised.
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate warning is issued.
self.options.update({'tol': 1e-12})
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexDefault, self).test_bug_8174()
class TestLinprogSimplexBland(LinprogSimplexTests):
def setup_method(self):
self.options = {'bland': True}
def test_bug_5400(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate error is raised.
self.options.update({'tol': 1e-12})
with pytest.raises(AssertionError):
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexBland, self).test_bug_8174()
class TestLinprogSimplexNoPresolve(LinprogSimplexTests):
def setup_method(self):
self.options = {'presolve': False}
is_32_bit = np.intp(0).itemsize < 8
is_linux = sys.platform.startswith('linux')
@pytest.mark.xfail(
condition=is_32_bit and is_linux,
reason='Fails with warning on 32-bit linux')
def test_bug_5400(self):
super(TestLinprogSimplexNoPresolve, self).test_bug_5400()
def test_bug_6139_low_tol(self):
# Linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Without ``presolve`` eliminating such rows the result is incorrect.
self.options.update({'tol': 1e-12})
with pytest.raises(AssertionError, match='linprog status 4'):
return super(TestLinprogSimplexNoPresolve, self).test_bug_6139()
def test_bug_7237_low_tol(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate warning is issued.
self.options.update({'tol': 1e-12})
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexNoPresolve, self).test_bug_8174()
def test_unbounded_no_nontrivial_constraints_1(self):
pytest.skip("Tests behavior specific to presolve")
def test_unbounded_no_nontrivial_constraints_2(self):
pytest.skip("Tests behavior specific to presolve")
#######################################
# Interior-Point Option-Specific Tests#
#######################################
class TestLinprogIPDense(LinprogIPTests):
options = {"sparse": False}
if has_cholmod:
class TestLinprogIPSparseCholmod(LinprogIPTests):
options = {"sparse": True, "cholesky": True}
if has_umfpack:
class TestLinprogIPSparseUmfpack(LinprogIPTests):
options = {"sparse": True, "cholesky": False}
def test_bug_10466(self):
pytest.skip("Autoscale doesn't fix everything, and that's OK.")
class TestLinprogIPSparse(LinprogIPTests):
options = {"sparse": True, "cholesky": False, "sym_pos": False}
@pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level "
"perturbations in linear system solution in "
"_linprog_ip._sym_solve.")
def test_bug_6139(self):
super(TestLinprogIPSparse, self).test_bug_6139()
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparse, self).test_bug_6690()
def test_magic_square_sparse_no_presolve(self):
# test linprog with a problem with a rank-deficient A_eq matrix
A_eq, b_eq, c, N = magic_square(3)
bounds = (0, 1)
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
sup.filter(OptimizeWarning, "Solving system with option...")
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_fun=1.730550597)
def test_sparse_solve_options(self):
# checking that problem is solved with all column permutation options
A_eq, b_eq, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Invalid permc_spec option")
o = {key: self.options[key] for key in self.options}
permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A',
'COLAMD', 'ekki-ekki-ekki')
# 'ekki-ekki-ekki' raises warning about invalid permc_spec option
# and uses default
for permc_spec in permc_specs:
o["permc_spec"] = permc_spec
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_fun=1.730550597)
class TestLinprogIPSparsePresolve(LinprogIPTests):
options = {"sparse": True, "_sparse_presolve": True}
@pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level "
"perturbations in linear system solution in "
"_linprog_ip._sym_solve.")
def test_bug_6139(self):
super(TestLinprogIPSparsePresolve, self).test_bug_6139()
def test_enzo_example_c_with_infeasibility(self):
pytest.skip('_sparse_presolve=True incompatible with presolve=False')
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparsePresolve, self).test_bug_6690()
class TestLinprogIPSpecific:
method = "interior-point"
# the following tests don't need to be performed separately for
# sparse presolve, sparse after presolve, and dense
def test_solver_select(self):
# check that default solver is selected as expected
if has_cholmod:
options = {'sparse': True, 'cholesky': True}
elif has_umfpack:
options = {'sparse': True, 'cholesky': False}
else:
options = {'sparse': True, 'cholesky': False, 'sym_pos': False}
A, b, c = lpgen_2d(20, 20)
res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options)
res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver
assert_allclose(res1.fun, res2.fun,
err_msg="linprog default solver unexpected result",
rtol=1e-15, atol=1e-15)
def test_unbounded_below_no_presolve_original(self):
# formerly caused segfault in TravisCI w/ "cholesky":True
c = [-1]
bounds = [(None, 1)]
res = linprog(c=c, bounds=bounds,
method=self.method,
options={"presolve": False, "cholesky": True})
_assert_success(res, desired_fun=-1)
def test_cholesky(self):
# use cholesky factorization and triangular solves
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"cholesky": True}) # only for dense
_assert_success(res, desired_fun=-64.049494229)
def test_alternate_initial_point(self):
# use "improved" initial point
A, b, c = lpgen_2d(20, 20)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "Solving system with option...")
sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"ip": True, "disp": True})
# ip code is independent of sparse/dense
_assert_success(res, desired_fun=-64.049494229)
def test_bug_8664(self):
# interior-point has trouble with this when presolve is off
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options={"presolve": False})
assert_(not res.success, "Incorrectly reported success")
########################################
# Revised Simplex Option-Specific Tests#
########################################
class TestLinprogRSCommon(LinprogRSTests):
options = {}
def test_cyclic_bland(self):
pytest.skip("Intermittent failure acceptable.")
def test_nontrivial_problem_with_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_unbounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, None), (None, None), (0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, 1), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_negative_unbounded_variable(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
b_eq = [4]
x_star = np.array([-219/385, 582/385, 0, 4/10])
f_star = 3951/385
bounds = [(None, None), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bad_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bad_guess = [1, 2, 3, .5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=bad_guess)
assert_equal(res.status, 6)
def test_redundant_constraints_with_guess(self):
A, b, c, N = magic_square(3)
p = np.random.rand(*c.shape)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_eq=A, b_eq=b, method=self.method)
res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x)
res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x)
_assert_success(res2, desired_fun=1.730550597)
assert_equal(res2.nit, 0)
_assert_success(res3)
assert_(res3.nit < res.nit) # hot start reduces iterations
class TestLinprogRSBland(LinprogRSTests):
options = {"pivot": "bland"}
############################################
# HiGHS-Simplex-Dual Option-Specific Tests #
############################################
class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests):
method = "highs-ds"
options = {}
def test_lad_regression(self):
'''The scaled model should be optimal but unscaled model infeasible.'''
c, A_ub, b_ub, bnds = l1_regression_prob()
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds,
method=self.method, options=self.options)
assert_equal(res.status, 4)
assert_('An optimal solution to the scaled '
'model was found but' in res.message)
assert_(res.x is not None)
assert_(np.all(res.slack > -1e-6))
assert_(np.all(res.x <= [np.inf if u is None else u for l, u in bnds]))
assert_(np.all(res.x >= [-np.inf if l is None else l for l, u in bnds]))
###################################
# HiGHS-IPM Option-Specific Tests #
###################################
class TestLinprogHiGHSIPM(LinprogHiGHSTests):
method = "highs-ipm"
options = {}
###########################
# Autoscale-Specific Tests#
###########################
class AutoscaleTests:
options = {"autoscale": True}
test_bug_6139 = LinprogCommonTests.test_bug_6139
test_bug_6690 = LinprogCommonTests.test_bug_6690
test_bug_7237 = LinprogCommonTests.test_bug_7237
class TestAutoscaleIP(AutoscaleTests):
method = "interior-point"
def test_bug_6139(self):
self.options['tol'] = 1e-10
return AutoscaleTests.test_bug_6139(self)
class TestAutoscaleSimplex(AutoscaleTests):
method = "simplex"
class TestAutoscaleRS(AutoscaleTests):
method = "revised simplex"
def test_nontrivial_problem_with_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bad_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bad_guess = [1, 2, 3, .5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=bad_guess)
assert_equal(res.status, 6)
###########################
# Redundancy Removal Tests#
###########################
class RRTests:
method = "interior-point"
LCT = LinprogCommonTests
# these are a few of the existing tests that have redundancy
test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility
test_bug_10349 = LCT.test_bug_10349
test_bug_7044 = LCT.test_bug_7044
test_NFLC = LCT.test_network_flow_limited_capacity
test_enzo_example_b = LCT.test_enzo_example_b
class TestRRSVD(RRTests):
options = {"rr_method": "SVD"}
class TestRRPivot(RRTests):
options = {"rr_method": "pivot"}
class TestRRID(RRTests):
options = {"rr_method": "ID"}
|
import unittest
from pyowm.agroapi10.polygon import Polygon, GeoPoint, GeoPolygon
class TestPolygon(unittest.TestCase):
geopoint= GeoPoint(34, -56.3)
geopolygon = GeoPolygon([
[[2.3, 57.32], [23.19, -20.2], [-120.4, 19.15], [2.3, 57.32]]
])
def test_polygon_fails_with_wrong_parameters(self):
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, 123.4, 'user')
self.assertRaises(AssertionError, Polygon, 'id', 'polygon', 'wrong', self.geopoint, 123.4, 'user')
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, 'wrong', 123.4, 'user')
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, None, 'user')
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, -77, 'user')
def test_area_kilometers_property(self):
area_hs = 456.78
expected = area_hs * 0.01
instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, area_hs, 'user')
self.assertEqual(expected, instance.area_km)
instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, None, 'user')
self.assertIsNone(instance.area_km)
def test_from_dict(self):
_id = "5abb9fb82c8897000bde3e87"
name = "Polygon Sample"
coords = [121.1867, 37.6739]
geopolygon = GeoPolygon([[
[-121.1958, 37.6683],
[-121.1779, 37.6687],
[-121.1773, 37.6792],
[-121.1958, 37.6792],
[-121.1958, 37.6683]]])
center = GeoPoint(coords[0], coords[1])
area = 190.6343
user_id = "557066d0ff7a7e3897531d94"
the_dict = {
"id": _id,
"geo_json": {
"type": "Feature",
"properties": {
},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[-121.1958, 37.6683],
[-121.1779, 37.6687],
[-121.1773, 37.6792],
[-121.1958, 37.6792],
[-121.1958, 37.6683]
]
]
}
},
"name": name,
"center": coords,
"area": area,
"user_id": user_id
}
expected = Polygon(_id, name, geopolygon, center, area, user_id)
result = Polygon.from_dict(the_dict)
self.assertEqual(expected.id, result.id)
self.assertEqual(expected.name, result.name)
self.assertEqual(expected.area, result.area)
self.assertEqual(expected.user_id, result.user_id)
self.assertEqual(expected.center.lat, result.center.lat)
self.assertEqual(expected.center.lon, result.center.lon)
self.assertEqual(expected.geopolygon.geojson(), result.geopolygon.geojson())
# now testing with dirty data
self.assertRaises(AssertionError, Polygon.from_dict, None)
the_dict['center'] = ['no_lon', 'no_lat']
self.assertRaises(ValueError, Polygon.from_dict, the_dict)
the_dict['center'] = coords
del the_dict['id']
self.assertRaises(AssertionError, Polygon.from_dict, the_dict)
def test_repr(self):
instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, 1.2, 'user')
repr(instance)
instance = Polygon('id')
repr(instance)
|
def convert_request_to_dictionary(request, fields):
emp = {}
for field in fields:
if field in request.json:
emp[field] = request.json[field]
del emp["identity"]
return emp
|
import os
from pydub import playback
from playsound import playsound
from simpleaudio import play_buffer
import winsound
from manuscript.tools.counter import Counter
def play_sound(sound, block=True):
if sound is not None:
prefix = "tmp"
with Counter(prefix) as counter:
tmp_file = os.path.join(".", prefix + f"_{counter:010d}.mp3")
sound.export(tmp_file)
playsound(tmp_file, block=block)
#os.remove(tmp_file)
|
from telegram import ReplyKeyboardMarkup, KeyboardButton
def get_keyboard():
contact_button = KeyboardButton('Отправить контакты', request_contact=True)
location_button = KeyboardButton('Отправить локацию', request_location=True)
my_keyboard = ReplyKeyboardMarkup([['Анекдот', 'Начать'],
[contact_button, location_button]], resize_keyboard=True)
return my_keyboard
|
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
"""Queries the pytorch op registry and generates ODS and CC sources for the ops.
"""
from typing import List, Optional, TextIO
import argparse
import logging
import os
import sys
from .utils import TextEmitter
from .registry import Registry, JitOperator
# Mapping from torch types to their corresponding ODS type predicates.
# Use `get_ods_type` instead of using this directly.
TORCH_TYPE_TO_ODS_TYPE = {
"Tensor": "AnyTorchTensorType",
"Tensor?": "AnyTorchOptionalTensorType",
"Tensor?[]": "AnyTorchListOfOptionalTensorType",
"Tensor[]": "AnyTorchListOfTensorType",
"Scalar": "AnyTorchScalarType",
"Scalar?": "AnyTorchOptionalScalarType",
"int": "Torch_IntType",
"int[]": "AnyTorchListOfTorchIntType",
"int?": "AnyTorchOptionalIntType",
"int[]?": "AnyTorchOptionalListOfTorchIntType",
"bool": "Torch_BoolType",
"bool[]": "AnyTorchListOfTorchBoolType",
"bool?": "AnyTorchOptionalBoolType",
"float": "Torch_FloatType",
"float?": "AnyTorchOptionalFloatType",
"t[]": "AnyTorchListType",
"t": "AnyTorchType",
"t1": "AnyTorchType",
"t2": "AnyTorchType",
"Any": "AnyTorchType",
"Device": "Torch_DeviceType",
"Device?": "AnyTorchOptionalDeviceType",
"Generator": "Torch_GeneratorType",
"Generator?": "AnyTorchOptionalGeneratorType",
"str": "Torch_StringType",
"str?": "AnyTorchOptionalStringType",
"str[]": "AnyTorchListOfTorchStringType",
"Dict": "Torch_DictType",
"__torch__.torch.classes.quantized.LinearPackedParamsBase": "Torch_LinearParamsType",
}
def get_ods_type(type: str):
# TODO: Increase precision on dict type modeling.
if type.startswith("Dict("):
type = "Dict"
ods_type = TORCH_TYPE_TO_ODS_TYPE.get(type)
if ods_type is None:
raise Exception(
f"{type!r} not in TORCH_TYPE_TO_ODS_TYPE mapping. Please add it!")
return ods_type
def _get_main_module_name() -> str:
# pytype: disable=attribute-error
return sys.modules["__main__"].__loader__.name
# pytype: enable=attribute-error
ODS_BANNER = f"""//===-------------------------------------------------------*- tablegen -*-===//
//
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Also available under a BSD-style license. See LICENSE.
//
// Operation summaries and descriptions were systematically derived from public
// API docstrings and are licensed accordingly:
// https://github.com/pytorch/pytorch/blob/master/LICENSE
//===----------------------------------------------------------------------===//
//
// This file is automatically generated. Please do not edit.
// Generated via:
// ```
// python -m {_get_main_module_name()}
// ```
//
//===----------------------------------------------------------------------===//
"""
def raw_emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*, traits: List[str],
has_folder: bool, has_canonicalizer: bool):
"""Emit the ODS for a JitOperator to a textual file.
This is the lowest level of emission and is responsible for low-level
textual emission details. This function should not have any "smarts"
for deducing traits/etc.
You probably don't want to call this directly.
"""
p_td = lambda *args: emitter_td.print(*args)
op_name, cpp_class_name = operator.get_mlir_names()
# Generate unique result names for ops with nameless results
multiple_results = len(operator.returns) > 1
def generic_result_name(i):
return "result" + (str(i) if multiple_results else "")
p_td(
f"def Torch_{cpp_class_name} : Torch_Op<{emitter_td.quote(op_name)}, [")
with emitter_td.indent():
with emitter_td.indent():
p_td(",\n".join(traits))
p_td("]> {")
with emitter_td.indent():
summary = f"Generated op for `{operator.unique_key}`"
p_td(f"let summary = {emitter_td.quote(summary)};")
p_td(f"let arguments = (ins")
with emitter_td.indent():
if operator.is_vararg:
p_td("Variadic<AnyTorchType>:$operands")
else:
p_td(",\n".join([
f"""{get_ods_type(arg["type"])}:${arg["name"]}"""
for arg in operator.arguments
]))
p_td(");")
p_td(f"let results = (outs")
with emitter_td.indent():
if operator.is_varret:
p_td("Variadic<AnyTorchType>:$results")
else:
p_td(",\n".join([
f"""{get_ods_type(ret["type"])}:${ret["name"] or generic_result_name(e)}"""
for e, ret in enumerate(operator.returns)
]))
p_td(");")
if operator.is_vararg or operator.is_varret:
if operator.is_vararg:
assembly_operands = "`(` $operands `)`"
assembly_operand_types = "qualified(type($operands))"
else:
assembly_operands = " `,` ".join("$" + arg["name"]
for arg in operator.arguments)
assembly_operand_types = " `,` ".join(
f"""qualified(type(${arg["name"]}))""" for arg in operator.arguments)
if operator.is_varret:
assembly_result_types = "qualified(type($results))"
else:
assembly_result_types = " `,` ".join(
f"""qualified(type(${ret["name"] or generic_result_name(e)}))"""
for e, ret in enumerate(operator.returns))
if assembly_operand_types and assembly_result_types:
maybe_arrow = " `->` "
else:
maybe_arrow = ""
assembly_format = f"{assembly_operands} attr-dict `:` {assembly_operand_types}{maybe_arrow}{assembly_result_types}"
p_td(f"let assemblyFormat = {emitter_td.quote(assembly_format)};")
else:
p_td(f"let hasCustomAssemblyFormat = 1;")
p_td(f"""let extraClassDefinition = [{{
ParseResult {cpp_class_name}::parse(OpAsmParser &parser, OperationState &result) {{
return parseDefaultTorchOp(parser, result, {len(operator.arguments)}, {len(operator.returns)});
}}
void {cpp_class_name}::print(OpAsmPrinter &printer) {{
printDefaultTorchOp(printer, *this, {len(operator.arguments)}, {len(operator.returns)});
}}
}}];
""")
if has_folder:
p_td("let hasFolder = 1;")
if has_canonicalizer:
p_td("let hasCanonicalizer = 1;")
p_td("}")
p_td("\n")
def emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*,
traits: Optional[List[str]] = None,
has_folder: bool = False,
has_canonicalizer: bool = False):
"""Main entry point for op emission.
Besides emitting the op, it deduces / adds traits based on the operator
information.
"""
if traits is None:
traits = []
# All Torch operators allow type refinement.
traits += ["AllowsTypeRefinement"]
if operator.has_value_semantics():
traits += ["HasValueSemantics"]
if operator.is_readonly():
traits += ["ReadOnly"]
raw_emit_op(operator,
emitter_td,
traits=traits,
has_folder=has_folder,
has_canonicalizer=has_canonicalizer)
def emit_ops(emitter_td: TextEmitter, registry: Registry):
def emit(key, **kwargs):
emit_op(registry[key], emitter_td, **kwargs)
def emit_with_mutating_variants(key, **kwargs):
operator = registry[key]
emit_op(operator, emitter_td, **kwargs)
ns, unqual, overload = operator.triple
emit_op(registry.get_by_triple((ns, unqual + "_", overload)),
emitter_td,
traits=["IsTrailingUnderscoreInplaceVariant"])
# ==========================================================================
# `aten::` namespace.
# ==========================================================================
# Elementwise tensor compute ops
for key in [
"aten::tanh : (Tensor) -> (Tensor)",
"aten::hardtanh : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::relu : (Tensor) -> (Tensor)",
"aten::leaky_relu : (Tensor, Scalar) -> (Tensor)",
"aten::log : (Tensor) -> (Tensor)",
"aten::sigmoid : (Tensor) -> (Tensor)",
"aten::hardsigmoid : (Tensor) -> (Tensor)",
"aten::hardswish : (Tensor) -> (Tensor)",
"aten::erf : (Tensor) -> (Tensor)",
"aten::silu : (Tensor) -> (Tensor)",
"aten::sin : (Tensor) -> (Tensor)",
"aten::exp : (Tensor) -> (Tensor)",
"aten::cos : (Tensor) -> (Tensor)",
"aten::neg : (Tensor) -> (Tensor)",
"aten::floor : (Tensor) -> (Tensor)",
"aten::ceil : (Tensor) -> (Tensor)",
"aten::bitwise_not : (Tensor) -> (Tensor)",
"aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::div.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lerp.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)",
"aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::gt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::ne.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::div.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::le.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)",
"aten::log2 : (Tensor) -> (Tensor)",
"aten::rsqrt : (Tensor) -> (Tensor)",
"aten::abs : (Tensor) -> (Tensor)",
"aten::reciprocal : (Tensor) -> (Tensor)",
"aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::square : (Tensor) -> (Tensor)",
]:
emit_with_mutating_variants(key)
# Elementwise tensor compute ops that don't have the standard mutating
# variants.
emit("aten::addcmul : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::addcdiv : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::maximum : (Tensor, Tensor) -> (Tensor)")
emit("aten::minimum : (Tensor, Tensor) -> (Tensor)")
emit("aten::rsub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::gelu : (Tensor, str) -> (Tensor)")
emit("aten::pow.Tensor_Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::threshold_backward : (Tensor, Tensor, Scalar) -> (Tensor)")
# Ops without value semantics but the corresponding without trailing
# underscore variant doesn't exist.
emit("aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::uniform_ : (Tensor, float, float, Generator?) -> (Tensor)")
emit("aten::rand_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::bernoulli : (Tensor, Generator?) -> (Tensor)")
emit("aten::bernoulli_.float : (Tensor, float, Generator?) -> (Tensor)")
emit("aten::bernoulli_.Tensor : (Tensor, Tensor, Generator?) -> (Tensor)")
emit_with_mutating_variants("aten::triu : (Tensor, int) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put : (Tensor, Tensor?[], Tensor, bool) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put.hacked_twin : (Tensor, Tensor[], Tensor, bool) -> (Tensor)")
# Non-elementwise tensor compute ops
emit("aten::linear : (Tensor, Tensor, Tensor?) -> (Tensor)")
emit("aten::mm : (Tensor, Tensor) -> (Tensor)")
emit("aten::addmm : (Tensor, Tensor, Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::matmul : (Tensor, Tensor) -> (Tensor)")
emit(
"aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)"
)
emit("aten::convolution : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::convolution_overrideable : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::flip : (Tensor, int[]) -> (Tensor)")
emit(
"aten::native_batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float, bool) -> (Tensor)"
)
emit(
"aten::layer_norm : (Tensor, int[], Tensor?, Tensor?, float, bool) -> (Tensor)"
)
emit(
"aten::native_layer_norm : (Tensor, int[], Tensor?, Tensor?, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::max_pool2d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)"
)
emit(
"aten::max_pool2d_with_indices : (Tensor, int[], int[], int[], int[], bool) -> (Tensor, Tensor)"
)
emit(
"aten::max_pool2d_with_indices_backward : (Tensor, Tensor, int[], int[], int[], int[], bool, Tensor) -> (Tensor)"
)
emit(
"aten::avg_pool2d : (Tensor, int[], int[], int[], bool, bool, int?) -> (Tensor)"
)
emit(
"aten::softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::log_softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::_log_softmax : (Tensor, int, bool) -> (Tensor)"
)
emit("aten::adaptive_avg_pool2d : (Tensor, int[]) -> (Tensor)")
emit("aten::topk : (Tensor, int, int, bool, bool) -> (Tensor, Tensor)")
emit("aten::transpose.int : (Tensor, int, int) -> (Tensor)")
emit("aten::permute : (Tensor, int[]) -> (Tensor)")
emit("aten::bmm : (Tensor, Tensor) -> (Tensor)")
emit("aten::cumsum : (Tensor, int, int?) -> (Tensor)")
emit("aten::floor_divide.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::logsumexp : (Tensor, int[], bool) -> (Tensor)")
emit("aten::mean.dim : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
emit("aten::sqrt : (Tensor) -> (Tensor)")
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
emit("aten::mean : (Tensor, int?) -> (Tensor)")
emit("aten::std : (Tensor, bool) -> (Tensor)")
emit("aten::var : (Tensor, bool) -> (Tensor)")
emit("aten::nll_loss_forward : (Tensor, Tensor, Tensor?, int, int) -> (Tensor, Tensor)")
emit("aten::nll_loss_backward : (Tensor, Tensor, Tensor, Tensor?, int, int, Tensor) -> (Tensor)")
emit("aten::bincount : (Tensor, Tensor?, int) -> (Tensor)")
# Misc tensor ops.
emit("aten::constant_pad_nd : (Tensor, int[], Scalar) -> (Tensor)")
emit("aten::pad : (Tensor, int[], str, float?) -> (Tensor)")
emit("aten::squeeze.dim : (Tensor, int) -> (Tensor)", has_folder=True)
emit("aten::unsqueeze : (Tensor, int) -> (Tensor)")
emit("aten::squeeze : (Tensor) -> (Tensor)", has_folder=True)
emit("aten::flatten.using_ints : (Tensor, int, int) -> (Tensor)")
emit("aten::dim : (Tensor) -> (int)", has_folder=True)
emit("aten::size : (Tensor) -> (int[])", has_canonicalizer=True)
emit("aten::Bool.Tensor : (Tensor) -> (bool)")
emit("aten::ones : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::new_ones : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zero_ : (Tensor) -> (Tensor)")
emit("aten::new_zeros : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::tensor : (t[], int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.bool : (bool, int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.int : (int, int?, Device?, bool) -> (Tensor)")
emit("aten::_shape_as_tensor : (Tensor) -> (Tensor)")
emit("aten::all : (Tensor) -> (Tensor)")
emit("aten::any : (Tensor) -> (Tensor)")
emit("aten::any.dim : (Tensor, int, bool) -> (Tensor)")
emit("aten::arange : (Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start : (Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start_step : (Scalar, Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::argmax : (Tensor, int?, bool) -> (Tensor)")
emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)")
emit("aten::clone : (Tensor, int?) -> (Tensor)")
emit("aten::contiguous : (Tensor, int) -> (Tensor)")
emit("aten::copy_ : (Tensor, Tensor, bool) -> (Tensor)")
emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)")
emit("aten::detach : (Tensor) -> (Tensor)")
emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)")
emit("aten::empty_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::new_empty : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::ones_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::empty.memory_format : (int[], int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::expand : (Tensor, int[], bool) -> (Tensor)")
emit("aten::expand_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::broadcast_to : (Tensor, int[]) -> (Tensor)")
emit("aten::index.Tensor : (Tensor, Tensor?[]) -> (Tensor)")
emit("aten::index_select : (Tensor, int, Tensor) -> (Tensor)")
emit("aten::_index_put_impl_ : (Tensor, Tensor?[], Tensor, bool, bool) -> (Tensor)")
emit("aten::item : (Tensor) -> (Scalar)")
emit("aten::masked_select : (Tensor, Tensor) -> (Tensor)")
emit("aten::numel : (Tensor) -> (int)")
emit("aten::repeat : (Tensor, int[]) -> (Tensor)")
emit("aten::reshape : (Tensor, int[]) -> (Tensor)")
emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)")
emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)")
emit("aten::select.int : (Tensor, int, int) -> (Tensor)")
emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True)
emit("aten::stack : (Tensor[], int) -> (Tensor)")
emit("aten::sum : (Tensor, int?) -> (Tensor)")
emit("aten::sum.dim_IntList : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::max : (Tensor) -> (Tensor)")
emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)")
emit("aten::to.prim_Device : (Tensor, Device?, int?, bool, bool) -> (Tensor)")
emit("aten::type_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True)
emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)")
emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)")
emit("aten::where.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)")
emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)")
emit("aten::len.Tensor : (Tensor) -> (int)")
emit("aten::cpu : (Tensor) -> (Tensor)")
emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)")
emit("aten::IntImplicit : (Tensor) -> (int)")
emit("aten::tensor.float : (float, int?, Device?, bool) -> (Tensor)")
emit("aten::Int.Tensor : (Tensor) -> (int)", has_folder=True)
emit("aten::Float.Tensor : (Tensor) -> (float)", has_folder=True)
emit_with_mutating_variants("aten::dropout : (Tensor, float, bool) -> (Tensor)")
emit("aten::t : (Tensor) -> (Tensor)")
emit("aten::full : (int[], Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::full_like : (Tensor, Scalar, int?, int?, Device?, bool?, int?) -> (Tensor)")
# Dict ops.
emit("aten::__contains__.str : (Dict(str, t), str) -> (bool)", has_folder=True)
emit("aten::__getitem__.Dict_str : (Dict(str, t), str) -> (t)", has_folder=True)
emit("aten::_set_item.str : (Dict(str, t), str, t) -> ()")
emit("aten::keys.str : (Dict(str, t)) -> (str[])")
emit("aten::get.default_str : (Dict(str, t), str, t) -> (t)")
emit("aten::Delete.Dict_str : (Dict(str, t), str) -> ()")
# List ops.
emit("aten::cat : (Tensor[], int) -> (Tensor)")
emit("aten::append.t : (t[], t) -> (t[])")
emit("aten::add.t : (t[], t[]) -> (t[])")
emit("aten::eq.int_list : (int[], int[]) -> (bool)", has_folder=True)
emit("aten::list.t : (t[]) -> (t[])")
emit("aten::slice.t : (t[], int?, int?, int) -> (t[])")
emit("aten::insert.t : (t[], int, t) -> ()")
emit("aten::ne.int_list : (int[], int[]) -> (bool)")
# Str ops.
emit("aten::add.str : (str, str) -> (str)")
emit("aten::eq.str : (str, str) -> (bool)", has_folder=True)
emit("aten::str : (t) -> (str)")
emit("aten::format : (...) -> (str)")
emit("aten::join : (str, str[]) -> (str)")
# Type conversion ops.
emit("aten::Float.Scalar : (Scalar) -> (float)", has_folder=True)
emit("aten::Float.str : (str) -> (float)")
emit("aten::Int.float : (float) -> (int)")
# Primitive ops
emit("aten::__range_length : (int, int, int) -> (int)", has_folder=True)
emit("aten::__derive_index : (int, int, int) -> (int)", has_folder=True)
emit("aten::gt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ge.int : (int, int) -> (bool)", has_folder=True)
emit("aten::lt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::le.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ne.int : (int, int) -> (bool)", has_folder=True)
emit("aten::eq.int : (int, int) -> (bool)", has_folder=True)
emit("aten::floordiv.int : (int, int) -> (int)", has_folder=True)
emit("aten::remainder.int : (int, int) -> (int)", has_folder=True)
emit("aten::add.int : (int, int) -> (int)", has_folder=True)
emit("aten::sub.int : (int, int) -> (int)", has_folder=True)
emit("aten::mul.int : (int, int) -> (int)", has_folder=True)
emit("aten::neg.int : (int) -> (int)", has_folder=True)
emit("aten::log.int : (int) -> (float)")
emit("aten::add.float_int : (float, int) -> (float)")
emit("aten::sub.float : (float, float) -> (float)")
emit("aten::mul.float : (float, float) -> (float)")
emit("aten::div.float : (float, float) -> (float)", has_folder=True)
emit("aten::neg.float : (float) -> (float)")
emit("aten::eq.float : (float, float) -> (bool)", has_folder=True)
emit("aten::gt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::ge.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float_int : (float, int) -> (bool)")
emit("aten::ge.float_int : (float, int) -> (bool)")
emit("aten::ne.float_int : (float, int) -> (bool)")
emit("aten::gt.float_int : (float, int) -> (bool)")
emit("aten::__and__.bool : (bool, bool) -> (bool)")
emit("aten::ne.bool : (bool, bool) -> (bool)", has_folder=True)
emit("aten::__is__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__isnot__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__not__ : (bool) -> (bool)", has_folder=True)
emit("aten::len.t : (t[]) -> (int)",
has_folder=True,
has_canonicalizer=True)
emit("aten::__getitem__.t : (t[], int) -> (t)", has_canonicalizer=True)
emit("aten::_set_item.t : (t[], int, t) -> (t[])")
emit("aten::div : (Scalar, Scalar) -> (float)")
emit("aten::add : (Scalar, Scalar) -> (Scalar)")
emit("aten::eq.device : (Device, Device) -> (bool)")
emit("aten::ceil.float : (float) -> (int)", has_folder=True)
# backprop ops
emit("aten::_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
emit("aten::tanh_backward : (Tensor, Tensor) -> (Tensor)")
emit("aten::gelu_backward : (Tensor, Tensor, str) -> (Tensor)")
emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
# ==========================================================================
# `prim::` namespace.
# ==========================================================================
emit("prim::layout : (Tensor) -> (int)")
emit("prim::TupleIndex : (Any, int) -> (Any)", has_canonicalizer=True)
emit("prim::device : (Tensor) -> (Device)")
emit("prim::dtype : (Tensor) -> (int)", has_folder=True)
emit("prim::TupleUnpack : (Any) -> (...)", has_canonicalizer=True)
emit("prim::NumToTensor.Scalar : (Scalar) -> (Tensor)")
emit("prim::min.self_int : (int[]) -> (int)", has_folder=True)
emit("prim::min.int : (int, int) -> (int)")
emit("prim::max.self_int : (int[]) -> (int)")
emit("prim::max.int : (int, int) -> (int)", has_folder=True)
emit("prim::RaiseException : (str, str?) -> ()")
emit("prim::Uninitialized : () -> (Any)",
has_canonicalizer=True, traits=["NoSideEffect"])
emit("prim::unchecked_cast : (t) -> (t)", has_folder=True,
traits=["DeclareOpInterfaceMethods<CastOpInterface>"])
emit("prim::Print : (...) -> ()")
emit("prim::tolist : (...) -> (...)")
emit("prim::abs.Scalar : (Scalar) -> (Scalar)")
# ==========================================================================
# `quantized::` namespace.
# ==========================================================================
emit(
"quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)",
traits=["HasValueSemantics"])
def dump_registered_ops(outfile: TextIO, registry: Registry):
for _, v in sorted(registry.by_unique_key.items()):
outfile.write(repr(v))
def main(args: argparse.Namespace):
registry = Registry.load()
if args.debug_registry_dump:
with open(args.debug_registry_dump, "w") as debug_registry_dump:
dump_registered_ops(debug_registry_dump, registry)
td_path = os.path.join(args.torch_ir_include_dir, "GeneratedTorchOps.td")
with open(td_path, "w") as f_td:
emitter_td = TextEmitter(f_td)
emitter_td.print(ODS_BANNER)
emit_ops(emitter_td, registry)
def _create_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="generate_ods")
parser.add_argument(
"--torch_ir_include_dir",
required=True,
help="Directory in include/ containing the Torch dialect")
parser.add_argument(
"--debug_registry_dump",
help="File to dump the the PyTorch JIT operator registry into")
return parser
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = _create_argparse()
args = parser.parse_args()
main(args)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'micropython-ulab'
copyright = '2019, Zoltán Vörös'
author = 'Zoltán Vörös'
# The full version, including alpha/beta/rc tags
release = '0.26'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
author=u'Zoltán Vörös'
copyright=author
language='en'
latex_documents = [
(master_doc, 'ulab-manual.tex', 'Micropython ulab documentation',
'Zoltán Vörös', 'manual'),
]
|
import spacy
from spacy.lang.en import English
from spacy.util import minibatch, compounding
from spacy.util import decaying
class ExperimentParam:
def __init__(self, TRAIN_DATA: list, max_batch_sizes: dict, model_type='ner',
dropout_start: float = 0.6, dropout_end: float = 0.2, interval: float = 1e-4):
self.TRAIN_DATA = TRAIN_DATA
self.max_batch_sizes = max_batch_sizes
self.model_type = model_type
self.dropout_start = dropout_start
self.dropout_end = dropout_end
self.interval = interval
def get_batches(self):
"""
max_batch_sizes =
Initialize with batch size 1, and compound to a maximum determined by your data size and problem type.
{"tagger": 32, "parser": 16, "ner": 16, "textcat": 64}
"""
max_batch_size = self.max_batch_sizes[self.model_type]
if len(self.TRAIN_DATA) < 1000:
max_batch_size /= 2
if len(self.TRAIN_DATA) < 500:
max_batch_size /= 2
batch_size = compounding(1, max_batch_size, 1.001)
batches = minibatch(self.TRAIN_DATA, size=batch_size)
return batches
@property
def determine_dropout(self):
"""
For small datasets, it’s useful to set a high dropout rate at first, and decay it down towards a more reasonable value. This helps avoid the network immediately overfitting, while still encouraging it to learn some of the more interesting things in your data.
"""
dropout = decaying(self.dropout_start, self.dropout_end, self.interval)
return dropout
|
from example_system import serializer
from example_system.bike import Bike
from example_system.human import Human
def run_example() -> None:
krzysztof = Human(name="Krzysztof", age=37)
giant_bike = Bike(brand="Giant", model="Contend AR")
krzysztof_json = serializer.serialize(krzysztof)
print(krzysztof_json)
bike_json = serializer.serialize(giant_bike)
print(bike_json)
krzysztof_deserialized = serializer.deserialize(krzysztof_json)
print(krzysztof)
print(krzysztof_deserialized)
bike_deserialized = serializer.deserialize(bike_json)
print(giant_bike)
print(bike_deserialized)
if __name__ == "__main__":
run_example()
|
'''
Created by auto_sdk on 2020.11.25
'''
from dingtalk.api.base import RestApi
class OapiSmartdeviceBatcheventPostRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.device_event_vos = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.smartdevice.batchevent.post'
|
import pandas as pd
import io
from joblib import load
import logging
logging.getLogger().setLevel(logging.INFO)
def generate_data():
new_data = pd.DataFrame({
'Pclass':[3,2,1],
'Sex': ['male', 'female', 'male'],
'Age':[4, 22, 28]
})
return new_data
def load_model():
try:
return load('../output/titanic_model_rf.pkl')
except:
try:
return load('../../output/titanic_model_rf.pkl')
except:
logging.error('Model not loaded')
def predict_new(X, probs=True):
model = load_model()
p = model.get_preprocessing()
X = p.clean_data(X)
X = p.categ_encoding(X)
columns = model.get_columns()
for col in columns:
if col not in X.columns:
X[col] = 0
if probs:
return model.predict_proba(X)[:,1]
else:
return model.predict(X)
if __name__ == "__main__":
df = generate_data()
preds = predict_new(df, probs=True)
logging.info("Predictions:")
print(preds)
|
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Generic, Type, TypeVar, Union
from .devices import I2CDevice
from .parsers import RegisterParser
from .typing import RegisterState
BlockType = TypeVar("BlockType")
class RegisterBlock(Generic[BlockType], ABC):
"""
Abstract base class for collections of registers that represent distinct
features of an I2C device. A RegisterBlock translates between high-level
data structures and the low-level representation of that data as expressed
by RegisterParsers. For example, for the DS series RTCs, there are sub-
classes of RegisterBlock for the clock, the alarms, and their configuration
states. The Clock subclass encapsulates RegisterParsers for the BCD-ish
encoding of the Hour, Minute, Second, etc. stored in the device registers.
RegisterBlock is a Generic type. When subclassing, add the appropriate type
for the value represented by the subclass to its signature:
class TimekeepingRegisterBlock(RegisterBlock[datetime]): ...
A RegisterBlock subclass should define one or more attributes that are
RegisterParsers. Subclasses must also define two methods:
1) `_value` to read the data from its attributes and produce a value of
the designated type
2) `_prepare_update` to set its attributes to a given value
For example, suppose some device stored a positive decimal number like
12.34 with the integer part in register 0x00 and the fractional part in
register 0x01, each represented as 2 digit standard BCD. You want to read
or write this value as a 2-tuple of ints. A RegisterBlock for accessing
this number could be:
class DecimalRegisterBlock(RegisterBlock[Tuple[int, int]]):
integer_part = BCDRegisterParser(0x00)
fractional_part = BCDRegisterParser(0x01)
def _value(self) -> Tuple[int, int]:
return self.integer_part, self.fractional_part
def _prepare_update(self, value: Tuple[int, int]) -> None:
self.integer_part, self.fractional_part = value
"""
@property
def register_state(self) -> "RegisterState":
"""
Accesses register state from the most recent read of the parent device.
"""
return self._register_state
@register_state.setter
def register_state(self, state: "RegisterState") -> None:
"""
Setting register_state also keeps a copy to use as pending_state.
"""
self._register_state = state
self.pending_state = self._register_state.copy()
def __init__(self) -> None:
"""
Initialize a new RegisterBlock. RegisterBlock is a data descriptor, so
it must be used as an attribute on a subclass of I2CDevice in order to
have access to the device register state.
"""
# The very first access to the descriptor will populate actual state.
self.register_state: RegisterState = []
def __get__(
self, instance: "I2CDevice", owner: Type["I2CDevice"]
) -> BlockType:
"""
RegisterBlock is a data descriptor with access to the state of the
I2CDevice instance that it belongs to, so we can use that register
state for all parsers associated with this RegisterBlock (see
RegisterParser.__get__).
It is important for all RegisterParser instances to have a shared
register state (i.e. the state stored in this class) in order to avoid
mistakes if the state changes during a read. For example, if an RTC's
Second register is read at 0 minutes 59 seconds, and then the clock
ticks before we read the Minute register, the time would come out as
1 minute 59 seconds. Maxim DS RTCs (and probably others) use of 2 sets
of registers to prevent this issue from affecting I2C block reads, so
we just need to make sure we only make one call to `read_registers()`
for all the RegisterParsers within a RegisterBlock.
"""
if not instance:
raise AttributeError(
"RegisterBlock must be accessed from an I2CDevice instance."
)
self.register_state = instance.read_registers()
return self._value()
def __set__(self, instance: "I2CDevice", value: BlockType) -> None:
"""
Setting the value of the RegisterBlock updates its state via the
RegisterParser descriptors that belong to the block.
"""
# Make sure we have the latest state loaded before modifying it
self.register_state = instance.read_registers()
self._prepare_update(value)
# A minor optimization to only write a contiguous block from the first
# changed register to the last changed register, leaving the rest
# unmodified. This helps improve the speed of small updates.
addresses_changed = [
i
for i, b in enumerate(self.pending_state)
if b != self._register_state[i]
]
first_changed = min(addresses_changed)
last_changed = max(addresses_changed)
to_write = self.pending_state[first_changed : last_changed + 1]
instance.write_registers(to_write, first_changed)
@abstractmethod
def _prepare_update(self, value: BlockType) -> None:
"""
Subclasses should define behavior for setting the values of their
RegisterParser attributes to reflect the requested `value` for the
RegisterBlock. Parsers' `__set__` methods call `update_register_state`
on this instance so they can all keep their pending state in sync.
"""
@abstractmethod
def _value(self) -> BlockType:
"""
Value should return an appropriate object to represent the state of
this register block e.g. a datetime for the clock/alarms or a float for
the temperature
"""
def update_register_state(
self, address: Union[int, slice], value: "RegisterState"
) -> None:
"""
RegisterParsers should call this method to stage their changes to the
register state. This allows parsers to be aware of each other's pending
changes so e.g. two distinct parsers can flip two different bits in the
same register. Once all parsers have staged their changes (implement
via _prepare_update), the __set__ method will write all the changes to
the parent I2CDevice instance.
Parameters
----------
address : Union[int, slice]
The register address(es) to set
value : RegisterState
The bytes to insert at address
"""
if isinstance(address, int):
address = slice(address, address + 1)
if len(value) != len(self.pending_state[address]):
raise ValueError("Value must have as many bytes as slice")
self.pending_state[address] = value
class DatetimeRegisterBlock(RegisterBlock[datetime]):
"""
Base class whose subclasses keep track of the register addresses where
various components of the date/time/alarms are stored for RTC ICs such
as the Maxim DS series.
"""
hour: RegisterParser[int]
minute: RegisterParser[int]
day_of_month: RegisterParser[int]
# Define defaults for attributes that may be left unset, e.g. the DS3231
# and DS1337 have no seconds for Alarm 2, and no year or month for either
# Alarm.
@property
def second(self) -> Union[RegisterParser[int], int]:
return 0
@second.setter
def second(self, value: int) -> None:
pass
@property
def month(self) -> Union[RegisterParser[int], int]:
return datetime.now().month
@month.setter
def month(self, value: int) -> None:
pass
@property
def year(self) -> Union[RegisterParser[int], int]:
return datetime.now().year
@year.setter
def year(self, value: int) -> None:
pass
def _prepare_update(self, value: datetime) -> None:
# FIXME pycharm doesn't understand you can assign an int to the
# parser descriptors, but mypy does
self.second = value.second
self.minute = value.minute
self.hour = value.hour
self.day_of_month = value.day
self.month = value.month
self.year = value.year
def _value(self) -> datetime:
try:
value = datetime(
self.year,
self.month,
self.day_of_month,
self.hour,
self.minute,
self.second,
)
except ValueError as err:
raise ValueError(
"Could not parse datetime. Perhaps the register state is"
"invalid? Try setting to a known valid state first."
) from err
return value
|
"""A flexible Python library for atomic structure generation."""
|
import sys
import time
from TOSSIM import *
from TossimHelp import *
t = Tossim([])
sf = SerialForwarder(9002)
throttle = Throttle(t, 10)
nodecount = loadLinkModel(t, "linkgain.out")
loadNoiseModel(t, "meyer.txt", nodecount)
# Set debug channels
print "Setting debug channels..."
t.addChannel("printf", sys.stdout);
t.addChannel("UB.debug", sys.stdout);
t.addChannel("UB.error", sys.stdout);
t.addChannel("DebugSender.error", sys.stdout)
initializeNodes(t, nodecount)
sf.process();
throttle.initialize();
print "Running simulation (press Ctrl-c to stop)..."
try:
while True:
throttle.checkThrottle();
t.runNextEvent();
sf.process();
except KeyboardInterrupt:
print "Closing down simulation!"
#throttle.printStatistics()
|
import asyncio
import logging
import time
from datetime import datetime
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
from blspy import PrivateKey, G1Element
from seno.cmds.init_funcs import check_keys
from seno.consensus.block_rewards import calculate_base_farmer_reward
from seno.protocols.protocol_message_types import ProtocolMessageTypes
from seno.server.outbound_message import NodeType, make_msg
from seno.simulator.simulator_protocol import FarmNewBlockProtocol
from seno.types.blockchain_format.coin import Coin
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from seno.util.byte_types import hexstr_to_bytes
from seno.util.ints import uint32, uint64
from seno.util.keychain import bytes_to_mnemonic, generate_mnemonic
from seno.util.path import path_from_root
from seno.util.ws_message import WsRpcMessage, create_payload_dict
from seno.wallet.cc_wallet.cc_wallet import CCWallet
from seno.wallet.rl_wallet.rl_wallet import RLWallet
from seno.wallet.did_wallet.did_wallet import DIDWallet
from seno.wallet.trade_record import TradeRecord
from seno.wallet.transaction_record import TransactionRecord
from seno.wallet.util.backup_utils import download_backup, get_backup_info, upload_backup
from seno.wallet.util.trade_utils import trade_record_to_dict
from seno.wallet.util.transaction_type import TransactionType
from seno.wallet.util.wallet_types import WalletType
from seno.wallet.wallet_info import WalletInfo
from seno.wallet.wallet_node import WalletNode
# Timeout for response from wallet/full node for sending a transaction
TIMEOUT = 30
log = logging.getLogger(__name__)
class WalletRpcApi:
def __init__(self, wallet_node: WalletNode):
assert wallet_node is not None
self.service = wallet_node
self.service_name = "seno_wallet"
def get_routes(self) -> Dict[str, Callable]:
return {
# Key management
"/log_in": self.log_in,
"/get_public_keys": self.get_public_keys,
"/get_private_key": self.get_private_key,
"/generate_mnemonic": self.generate_mnemonic,
"/add_key": self.add_key,
"/delete_key": self.delete_key,
"/delete_all_keys": self.delete_all_keys,
# Wallet node
"/get_sync_status": self.get_sync_status,
"/get_height_info": self.get_height_info,
"/farm_block": self.farm_block, # Only when node simulator is running
"/get_initial_freeze_period": self.get_initial_freeze_period,
"/get_network_info": self.get_network_info,
# Wallet management
"/get_wallets": self.get_wallets,
"/create_new_wallet": self.create_new_wallet,
# Wallet
"/get_wallet_balance": self.get_wallet_balance,
"/get_transaction": self.get_transaction,
"/get_transactions": self.get_transactions,
"/get_next_address": self.get_next_address,
"/send_transaction": self.send_transaction,
"/create_backup": self.create_backup,
"/get_transaction_count": self.get_transaction_count,
"/get_farmed_amount": self.get_farmed_amount,
"/create_signed_transaction": self.create_signed_transaction,
# Coloured coins and trading
"/cc_set_name": self.cc_set_name,
"/cc_get_name": self.cc_get_name,
"/cc_spend": self.cc_spend,
"/cc_get_colour": self.cc_get_colour,
"/create_offer_for_ids": self.create_offer_for_ids,
"/get_discrepancies_for_offer": self.get_discrepancies_for_offer,
"/respond_to_offer": self.respond_to_offer,
"/get_trade": self.get_trade,
"/get_all_trades": self.get_all_trades,
"/cancel_trade": self.cancel_trade,
# DID Wallet
"/did_update_recovery_ids": self.did_update_recovery_ids,
"/did_spend": self.did_spend,
"/did_get_pubkey": self.did_get_pubkey,
"/did_get_did": self.did_get_did,
"/did_recovery_spend": self.did_recovery_spend,
"/did_get_recovery_list": self.did_get_recovery_list,
"/did_create_attest": self.did_create_attest,
"/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery,
"/did_create_backup_file": self.did_create_backup_file,
# RL wallet
"/rl_set_user_info": self.rl_set_user_info,
"/send_clawback_transaction:": self.send_clawback_transaction,
"/add_rate_limited_funds:": self.add_rate_limited_funds,
}
async def _state_changed(self, *args) -> List[WsRpcMessage]:
"""
Called by the WalletNode or WalletStateManager when something has changed in the wallet. This
gives us an opportunity to send notifications to all connected clients via WebSocket.
"""
if len(args) < 2:
return []
data = {
"state": args[0],
}
if args[1] is not None:
data["wallet_id"] = args[1]
if args[2] is not None:
data["additional_data"] = args[2]
return [create_payload_dict("state_changed", data, "seno_wallet", "wallet_ui")]
async def _stop_wallet(self):
"""
Stops a currently running wallet/key, which allows starting the wallet with a new key.
Each key has it's own wallet database.
"""
if self.service is not None:
self.service._close()
await self.service._await_closed()
##########################################################################################
# Key management
##########################################################################################
async def log_in(self, request):
"""
Logs in the wallet with a specific key.
"""
fingerprint = request["fingerprint"]
if self.service.logged_in_fingerprint == fingerprint:
return {"fingerprint": fingerprint}
await self._stop_wallet()
log_in_type = request["type"]
recovery_host = request["host"]
testing = False
if "testing" in self.service.config and self.service.config["testing"] is True:
testing = True
if log_in_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif log_in_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
else:
started = await self.service._start(fingerprint)
if started is True:
return {"fingerprint": fingerprint}
elif testing is True and self.service.backup_initialized is False:
response = {"success": False, "error": "not_initialized"}
return response
elif self.service.backup_initialized is False:
backup_info = None
backup_path = None
try:
private_key = self.service.get_key_for_fingerprint(fingerprint)
last_recovery = await download_backup(recovery_host, private_key)
backup_path = path_from_root(self.service.root_path, "last_recovery")
if backup_path.exists():
backup_path.unlink()
backup_path.write_text(last_recovery)
backup_info = get_backup_info(backup_path, private_key)
backup_info["backup_host"] = recovery_host
backup_info["downloaded"] = True
except Exception as e:
log.error(f"error {e}")
response = {"success": False, "error": "not_initialized"}
if backup_info is not None:
response["backup_info"] = backup_info
response["backup_path"] = f"{backup_path}"
return response
return {"success": False, "error": "Unknown Error"}
async def get_public_keys(self, request: Dict):
fingerprints = [sk.get_g1().get_fingerprint() for (sk, seed) in self.service.keychain.get_all_private_keys()]
return {"public_key_fingerprints": fingerprints}
async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]:
for sk, seed in self.service.keychain.get_all_private_keys():
if sk.get_g1().get_fingerprint() == fingerprint:
return sk, seed
return None, None
async def get_private_key(self, request):
fingerprint = request["fingerprint"]
sk, seed = await self._get_private_key(fingerprint)
if sk is not None:
s = bytes_to_mnemonic(seed) if seed is not None else None
return {
"private_key": {
"fingerprint": fingerprint,
"sk": bytes(sk).hex(),
"pk": bytes(sk.get_g1()).hex(),
"seed": s,
},
}
return {"success": False, "private_key": {"fingerprint": fingerprint}}
async def generate_mnemonic(self, request: Dict):
return {"mnemonic": generate_mnemonic().split(" ")}
async def add_key(self, request):
if "mnemonic" not in request:
raise ValueError("Mnemonic not in request")
# Adding a key from 24 word mnemonic
mnemonic = request["mnemonic"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
fingerprint = sk.get_g1().get_fingerprint()
await self._stop_wallet()
# Makes sure the new key is added to config properly
started = False
check_keys(self.service.root_path)
request_type = request["type"]
if request_type == "new_wallet":
started = await self.service._start(fingerprint=fingerprint, new_wallet=True)
elif request_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif request_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
if started is True:
return {"fingerprint": fingerprint}
raise ValueError("Failed to start")
async def delete_key(self, request):
await self._stop_wallet()
fingerprint = request["fingerprint"]
self.service.keychain.delete_key_by_fingerprint(fingerprint)
path = path_from_root(
self.service.root_path,
f"{self.service.config['database_path']}-{fingerprint}",
)
if path.exists():
path.unlink()
return {}
async def delete_all_keys(self, request: Dict):
await self._stop_wallet()
self.service.keychain.delete_all_keys()
path = path_from_root(self.service.root_path, self.service.config["database_path"])
if path.exists():
path.unlink()
return {}
##########################################################################################
# Wallet Node
##########################################################################################
async def get_sync_status(self, request: Dict):
assert self.service.wallet_state_manager is not None
syncing = self.service.wallet_state_manager.sync_mode
synced = await self.service.wallet_state_manager.synced()
return {"synced": synced, "syncing": syncing, "genesis_initialized": True}
async def get_height_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
peak = self.service.wallet_state_manager.peak
if peak is None:
return {"height": 0}
else:
return {"height": peak.height}
async def get_network_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
network_name = self.service.config["selected_network"]
address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"]
return {"network_name": network_name, "network_prefix": address_prefix}
async def farm_block(self, request):
raw_puzzle_hash = decode_puzzle_hash(request["address"])
request = FarmNewBlockProtocol(raw_puzzle_hash)
msg = make_msg(ProtocolMessageTypes.farm_new_block, request)
await self.service.server.send_to_all([msg], NodeType.FULL_NODE)
return {}
##########################################################################################
# Wallet Management
##########################################################################################
async def get_wallets(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries()
return {"wallets": wallets}
async def _create_backup_and_upload(self, host) -> None:
assert self.service.wallet_state_manager is not None
try:
if "testing" in self.service.config and self.service.config["testing"] is True:
return None
now = time.time()
file_name = f"backup_{now}"
path = path_from_root(self.service.root_path, file_name)
await self.service.wallet_state_manager.create_wallet_backup(path)
backup_text = path.read_text()
response = await upload_backup(host, backup_text)
success = response["success"]
if success is False:
log.error("Failed to upload backup to wallet backup service")
elif success is True:
log.info("Finished upload of the backup file")
except Exception as e:
log.error(f"Exception in upload backup. Error: {e}")
async def create_new_wallet(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallet_state_manager = self.service.wallet_state_manager
main_wallet = wallet_state_manager.main_wallet
host = request["host"]
if request["wallet_type"] == "cc_wallet":
if request["mode"] == "new":
async with self.service.wallet_state_manager.lock:
cc_wallet: CCWallet = await CCWallet.create_new_cc(
wallet_state_manager, main_wallet, request["amount"]
)
colour = cc_wallet.get_colour()
asyncio.create_task(self._create_backup_and_upload(host))
return {
"type": cc_wallet.type(),
"colour": colour,
"wallet_id": cc_wallet.id(),
}
elif request["mode"] == "existing":
async with self.service.wallet_state_manager.lock:
cc_wallet = await CCWallet.create_wallet_for_cc(
wallet_state_manager, main_wallet, request["colour"]
)
asyncio.create_task(self._create_backup_and_upload(host))
return {"type": cc_wallet.type()}
elif request["wallet_type"] == "rl_wallet":
if request["rl_type"] == "admin":
log.info("Create rl admin wallet")
async with self.service.wallet_state_manager.lock:
rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager)
success = await rl_admin.admin_create_coin(
uint64(int(request["interval"])),
uint64(int(request["limit"])),
request["pubkey"],
uint64(int(request["amount"])),
uint64(int(request["fee"])) if "fee" in request else uint64(0),
)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_admin.rl_info.admin_pubkey is not None
return {
"success": success,
"id": rl_admin.id(),
"type": rl_admin.type(),
"origin": rl_admin.rl_info.rl_origin,
"pubkey": rl_admin.rl_info.admin_pubkey.hex(),
}
elif request["rl_type"] == "user":
log.info("Create rl user wallet")
async with self.service.wallet_state_manager.lock:
rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_user.rl_info.user_pubkey is not None
return {
"id": rl_user.id(),
"type": rl_user.type(),
"pubkey": rl_user.rl_info.user_pubkey.hex(),
}
elif request["wallet_type"] == "did_wallet":
if request["did_type"] == "new":
backup_dids = []
num_needed = 0
for d in request["backup_dids"]:
backup_dids.append(hexstr_to_bytes(d))
if len(backup_dids) > 0:
num_needed = uint64(request["num_of_backup_ids_needed"])
async with self.service.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_state_manager,
main_wallet,
int(request["amount"]),
backup_dids,
uint64(num_needed),
)
my_did = did_wallet.get_my_DID()
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
}
elif request["did_type"] == "recovery":
async with self.service.wallet_state_manager.lock:
did_wallet = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_state_manager, main_wallet, request["filename"]
)
assert did_wallet.did_info.temp_coin is not None
assert did_wallet.did_info.temp_puzhash is not None
assert did_wallet.did_info.temp_pubkey is not None
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
coin_list = did_wallet.did_info.temp_coin.as_list()
newpuzhash = did_wallet.did_info.temp_puzhash
pubkey = did_wallet.did_info.temp_pubkey
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
"coin_name": coin_name,
"coin_list": coin_list,
"newpuzhash": newpuzhash.hex(),
"pubkey": pubkey.hex(),
"backup_dids": did_wallet.did_info.backup_ids,
"num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed,
}
##########################################################################################
# Wallet
##########################################################################################
async def get_wallet_balance(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(wallet_id)
balance = await wallet.get_confirmed_balance(unspent_records)
pending_balance = await wallet.get_unconfirmed_balance(unspent_records)
spendable_balance = await wallet.get_spendable_balance(unspent_records)
pending_change = await wallet.get_pending_change_balance()
max_send_amount = await wallet.get_max_send_amount(unspent_records)
unconfirmed_removals: Dict[
bytes32, Coin
] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id)
wallet_balance = {
"wallet_id": wallet_id,
"confirmed_wallet_balance": balance,
"unconfirmed_wallet_balance": pending_balance,
"spendable_balance": spendable_balance,
"pending_change": pending_change,
"max_send_amount": max_send_amount,
"unspent_coin_count": len(unspent_records),
"pending_coin_removal_count": len(unconfirmed_removals),
}
return {"wallet_balance": wallet_balance}
async def get_transaction(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"]))
tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id)
if tr is None:
raise ValueError(f"Transaction 0x{transaction_id.hex()} not found")
return {
"transaction": tr,
"transaction_id": tr.name,
}
async def get_transactions(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
if "start" in request:
start = request["start"]
else:
start = 0
if "end" in request:
end = request["end"]
else:
end = 50
transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between(wallet_id, start, end)
formatted_transactions = []
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
for tx in transactions:
formatted = tx.to_json_dict()
formatted["to_address"] = encode_puzzle_hash(tx.to_puzzle_hash, prefix)
formatted_transactions.append(formatted)
return {
"transactions": formatted_transactions,
"wallet_id": wallet_id,
}
async def get_initial_freeze_period(self, _: Dict):
freeze_period = self.service.constants.INITIAL_FREEZE_END_TIMESTAMP
return {"INITIAL_FREEZE_END_TIMESTAMP": freeze_period}
async def get_next_address(self, request: Dict) -> Dict:
"""
Returns a new address
"""
assert self.service.wallet_state_manager is not None
if request["new_address"] is True:
create_new = True
else:
create_new = False
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
if wallet.type() == WalletType.STANDARD_WALLET:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
elif wallet.type() == WalletType.COLOURED_COIN:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
else:
raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes")
return {
"wallet_id": wallet_id,
"address": address,
}
async def send_transaction(self, request):
assert self.service.wallet_state_manager is not None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced before sending transactions")
if int(time.time()) < self.service.constants.INITIAL_FREEZE_END_TIMESTAMP:
end_date = datetime.fromtimestamp(float(self.service.constants.INITIAL_FREEZE_END_TIMESTAMP))
raise ValueError(f"No transactions before: {end_date}")
wallet_id = int(request["wallet_id"])
wallet = self.service.wallet_state_manager.wallets[wallet_id]
if not isinstance(request["amount"], int) or not isinstance(request["fee"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
puzzle_hash: bytes32 = decode_puzzle_hash(request["address"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def get_transaction_count(self, request):
wallet_id = int(request["wallet_id"])
count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id)
return {"wallet_id": wallet_id, "count": count}
async def create_backup(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["file_path"])
await self.service.wallet_state_manager.create_wallet_backup(file_path)
return {}
##########################################################################################
# Coloured Coins and Trading
##########################################################################################
async def cc_set_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
await wallet.set_name(str(request["name"]))
return {"wallet_id": wallet_id}
async def cc_get_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
name: str = await wallet.get_name()
return {"wallet_id": wallet_id, "name": name}
async def cc_spend(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"])
if not isinstance(request["amount"], int) or not isinstance(request["amount"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction([amount], [puzzle_hash], fee)
await wallet.push_transaction(tx)
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def cc_get_colour(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
colour: str = wallet.get_colour()
return {"colour": colour, "wallet_id": wallet_id}
async def create_offer_for_ids(self, request):
assert self.service.wallet_state_manager is not None
offer = request["ids"]
file_name = request["filename"]
async with self.service.wallet_state_manager.lock:
(
success,
spend_bundle,
error,
) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids(offer, file_name)
if success:
self.service.wallet_state_manager.trade_manager.write_offer_to_disk(Path(file_name), spend_bundle)
return {}
raise ValueError(error)
async def get_discrepancies_for_offer(self, request):
assert self.service.wallet_state_manager is not None
file_name = request["filename"]
file_path = Path(file_name)
async with self.service.wallet_state_manager.lock:
(
success,
discrepancies,
error,
) = await self.service.wallet_state_manager.trade_manager.get_discrepancies_for_offer(file_path)
if success:
return {"discrepancies": discrepancies}
raise ValueError(error)
async def respond_to_offer(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["filename"])
async with self.service.wallet_state_manager.lock:
(
success,
trade_record,
error,
) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(file_path)
if not success:
raise ValueError(error)
return {}
async def get_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
trade_id = request["trade_id"]
trade: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(trade_id)
if trade is None:
raise ValueError(f"No trade with trade id: {trade_id}")
result = trade_record_to_dict(trade)
return {"trade": result}
async def get_all_trades(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
all_trades = await trade_mgr.get_all_trades()
result = []
for trade in all_trades:
result.append(trade_record_to_dict(trade))
return {"trades": result}
async def cancel_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
wsm = self.service.wallet_state_manager
secure = request["secure"]
trade_id = hexstr_to_bytes(request["trade_id"])
async with self.service.wallet_state_manager.lock:
if secure:
await wsm.trade_manager.cancel_pending_offer_safely(trade_id)
else:
await wsm.trade_manager.cancel_pending_offer(trade_id)
return {}
async def get_backup_info(self, request: Dict):
file_path = Path(request["file_path"])
sk = None
if "words" in request:
mnemonic = request["words"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
elif "fingerprint" in request:
sk, seed = await self._get_private_key(request["fingerprint"])
if sk is None:
raise ValueError("Unable to decrypt the backup file.")
backup_info = get_backup_info(file_path, sk)
return {"backup_info": backup_info}
##########################################################################################
# Distributed Identities
##########################################################################################
async def did_update_recovery_ids(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = []
for _ in request["new_list"]:
recovery_list.append(hexstr_to_bytes(_))
if "num_verifications_required" in request:
new_amount_verifications_required = uint64(request["num_verifications_required"])
else:
new_amount_verifications_required = len(recovery_list)
async with self.service.wallet_state_manager.lock:
success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required)
# Update coin with new ID info
updated_puz = await wallet.get_new_puzzle()
spend_bundle = await wallet.create_spend(updated_puz.get_tree_hash())
if spend_bundle is not None and success:
return {"success": True}
return {"success": False}
async def did_spend(self, request):
wallet_id = int(request["wallet_id"])
async with self.service.wallet_state_manager.lock:
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
spend_bundle = await wallet.create_spend(request["puzzlehash"])
if spend_bundle is not None:
return {"success": True}
return {"success": False}
async def did_get_did(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did: str = wallet.get_my_DID()
async with self.service.wallet_state_manager.lock:
coins = await wallet.select_coins(1)
if coins is None or coins == set():
return {"success": True, "wallet_id": wallet_id, "my_did": my_did}
else:
coin = coins.pop()
return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()}
async def did_get_recovery_list(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = wallet.did_info.backup_ids
recover_hex_list = []
for _ in recovery_list:
recover_hex_list.append(_.hex())
return {
"success": True,
"wallet_id": wallet_id,
"recover_list": recover_hex_list,
"num_required": wallet.did_info.num_of_backup_ids_needed,
}
async def did_recovery_spend(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed:
return {"success": False, "reason": "insufficient messages"}
async with self.service.wallet_state_manager.lock:
(
info_list,
message_spend_bundle,
) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"])
if "pubkey" in request:
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
else:
assert wallet.did_info.temp_pubkey is not None
pubkey = wallet.did_info.temp_pubkey
if "puzhash" in request:
puzhash = hexstr_to_bytes(request["puzhash"])
else:
assert wallet.did_info.temp_puzhash is not None
puzhash = wallet.did_info.temp_puzhash
success = await wallet.recovery_spend(
wallet.did_info.temp_coin,
puzhash,
info_list,
pubkey,
message_spend_bundle,
)
return {"success": success}
async def did_get_pubkey(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex()
return {"success": True, "pubkey": pubkey}
async def did_create_attest(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
info = await wallet.get_info_for_recovery()
coin = hexstr_to_bytes(request["coin_name"])
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
spend_bundle = await wallet.create_attestment(
coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"]
)
if spend_bundle is not None:
return {
"success": True,
"message_spend_bundle": bytes(spend_bundle).hex(),
"info": [info[0].hex(), info[1].hex(), info[2]],
}
else:
return {"success": False}
async def did_get_information_needed_for_recovery(self, request):
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
return {
"success": True,
"wallet_id": wallet_id,
"my_did": my_did,
"coin_name": coin_name,
"newpuzhash": did_wallet.did_info.temp_puzhash,
"pubkey": did_wallet.did_info.temp_pubkey,
"backup_dids": did_wallet.did_info.backup_ids,
}
async def did_create_backup_file(self, request):
try:
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
did_wallet.create_backup(request["filename"])
return {"wallet_id": wallet_id, "success": True}
except Exception:
return {"wallet_id": wallet_id, "success": False}
##########################################################################################
# Rate Limited Wallet
##########################################################################################
async def rl_set_user_info(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
rl_user = self.service.wallet_state_manager.wallets[wallet_id]
origin = request["origin"]
async with self.service.wallet_state_manager.lock:
await rl_user.set_user_info(
uint64(request["interval"]),
uint64(request["limit"]),
origin["parent_coin_info"],
origin["puzzle_hash"],
origin["amount"],
request["admin_pubkey"],
)
return {}
async def send_clawback_transaction(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
fee = int(request["fee"])
async with self.service.wallet_state_manager.lock:
tx = await wallet.clawback_rl_coin_transaction(fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def add_rate_limited_funds(self, request):
wallet_id = uint32(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash)
request["wallet_id"] = 1
request["puzzle_hash"] = puzzle_hash
async with self.service.wallet_state_manager.lock:
await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"])
return {"status": "SUCCESS"}
async def get_farmed_amount(self, request):
tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards()
amount = 0
pool_reward_amount = 0
farmer_reward_amount = 0
fee_amount = 0
last_height_farmed = 0
for record in tx_records:
height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE)
if height > last_height_farmed:
last_height_farmed = height
if record.type == TransactionType.COINBASE_REWARD:
pool_reward_amount += record.amount
if record.type == TransactionType.FEE_REWARD:
fee_amount += record.amount - calculate_base_farmer_reward(height)
farmer_reward_amount += calculate_base_farmer_reward(height)
amount += record.amount
assert amount == pool_reward_amount + farmer_reward_amount + fee_amount
return {
"farmed_amount": amount,
"pool_reward_amount": pool_reward_amount,
"farmer_reward_amount": farmer_reward_amount,
"fee_amount": fee_amount,
"last_height_farmed": last_height_farmed,
}
async def create_signed_transaction(self, request):
if "additions" not in request or len(request["additions"]) < 1:
raise ValueError("Specify additions list")
additions: List[Dict] = request["additions"]
amount_0: uint64 = uint64(additions[0]["amount"])
assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT
puzzle_hash_0 = hexstr_to_bytes(additions[0]["puzzle_hash"])
if len(puzzle_hash_0) != 32:
raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0}")
additional_outputs = []
for addition in additions[1:]:
receiver_ph = hexstr_to_bytes(addition["puzzle_hash"])
if len(receiver_ph) != 32:
raise ValueError(f"Address must be 32 bytes. {receiver_ph}")
amount = uint64(addition["amount"])
if amount > self.service.constants.MAX_COIN_AMOUNT:
raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}")
additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount})
fee = uint64(0)
if "fee" in request:
fee = uint64(request["fee"])
coins = None
if "coins" in request and len(request["coins"]) > 0:
coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]])
async with self.service.wallet_state_manager.lock:
signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction(
amount_0, puzzle_hash_0, fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs
)
return {"signed_tx": signed_tx}
|
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
# import sys
# sys.path.append("../simulated_fqi/")
from simulated_fqi import NFQNetwork, ContrastiveNFQNetwork
import matplotlib.pyplot as plt
import numpy as np
def train(x, y, groups, network, optimizer):
predicted_q_values = network(x, groups).squeeze()
loss = F.mse_loss(predicted_q_values, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
# def test_contrastive_network():
# # Setup agent
# network = ContrastiveNFQNetwork(state_dim=0, is_contrastive=True, nonlinearity=nn.Identity)
# optimizer = optim.Rprop(network.parameters())
# # Generate data
# n, m = 100, 100
# beta_shared = -1
# beta_fg = 2.1
# x_bg, x_fg = np.linspace(-3, 3, m), np.linspace(-3, 3, n)
# x = np.concatenate([x_bg, x_fg])
# groups = np.concatenate([np.zeros(m), np.ones(n)])
# y = beta_shared * x + beta_fg * groups * x# + np.random.normal(scale=0.5, size=m+n)
# x = torch.FloatTensor(x).unsqueeze(1)
# y = torch.FloatTensor(y)
# groups = torch.FloatTensor(groups).unsqueeze(1)
# for epoch in range(200):
# loss = train(x, y, groups, network, optimizer)
# # if epoch % 10 == 0:
# # print("Epoch: {:4d}, Loss: {:4f}".format(epoch, loss))
# network.eval()
# with torch.no_grad():
# preds = network(x, groups)
# assert np.allclose(preds.squeeze().numpy(), y.squeeze().numpy(), atol=1e-4)
# plt.scatter(x, preds, c=groups)
# plt.show()
# import ipdb; ipdb.set_trace()
if __name__ == "__main__":
test_contrastive_network()
|
"""
WSGI config for tw project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tw.settings")
application = get_wsgi_application()
|
import torch
def combine_masks_with_batch(masks, n_obj, th=0.5, return_as_onehot = False):
""" Combine mask for different objects.
Different methods are the following:
* `max_per_pixel`: Computes the final mask taking the pixel with the highest
probability for every object.
# Arguments
masks: Tensor with shape[B, nobj, H, W]. H, W on batches must be same
method: String. Method that specifies how the masks are fused.
# Returns
[B, 1, H, W]
"""
# masks : B, nobj, h, w
# output : h,w
marker = torch.argmax(masks, dim=1, keepdim=True) #
if not return_as_onehot:
out_mask = torch.unsqueeze(torch.zeros_like(masks)[:,0],1) #[B, 1, H, W]
for obj_id in range(n_obj):
try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)
except: raise NotImplementedError
out_mask[tmp_mask] = obj_id + 1 # [B, 1, H, W]
if return_as_onehot:
out_mask = torch.zeros_like(masks) # [B, nobj, H, W]
for obj_id in range(n_obj):
try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)
except: raise NotImplementedError
out_mask[:, obj_id] = tmp_mask[:,0].type(torch.cuda.FloatTensor)
return out_mask
|
class Task(object):
def __init__(self,name):
self.name = name
pass
def run(self):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.