text
stringlengths 2
999k
|
|---|
from invoke import task
@task
def dist(context):
context.run("python setup.py bdist_wheel")
@task
def test(context):
context.run("tox")
|
"""Neural network operations."""
from __future__ import absolute_import as _abs
from ...expr import TupleWrapper
from . import _make
def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, conv2d takes in
a data Tensor with shape `(batch_size, in_channels, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] *
\mbox{weight}[c, k, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCHW` for data and `OIHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, out_layout, out_dtype)
def conv2d_transpose(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
output_padding=(0, 0),
out_dtype=""):
"""Two dimensional transposed convolution operator.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Tuple[int], optional
The strides of convoltution.
padding : Tuple[int], optional
The padding of convolution on both sides of inputs.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
output_padding : Tuple[int], optional
Additional zero-padding to be added to one side of the output.
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.conv2d_transpose(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, output_padding, out_dtype)
def softmax(data, axis=-1):
r"""Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator.
axis: int, optional
The axis to sum over when computing softmax
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.softmax(data, axis)
def log_softmax(data, axis=-1):
r"""Computes log softmax.
.. math::
\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.log_softmax(data, axis)
def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False):
r"""2D maximum pooling operator.
This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w) and pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)
def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False,
count_include_pad=False):
r"""2D average pooling operator.
This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w), pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)
def global_max_pool2d(data,
layout="NCHW"):
r"""2D global maximum pooling operator.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
\mbox{data}(b, c, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.global_max_pool2d(data, layout)
def global_avg_pool2d(data,
layout="NCHW"):
r"""2D global average pooling operator.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
\mbox{data}(b, c, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.global_avg_pool2d(data, layout)
def upsampling(data,
scale=1,
layout="NCHW",
method="NEAREST_NEIGHBOR"):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale, w*scale)
method indicates the algorithm to be used while calculating ghe out value
and method can be one of ("BILINEAR", "NEAREST_NEIGHBOR")
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
scale : tvm.relay.Expr
The scale factor for upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [NEAREST_NEIGHBOR, BILINEAR].
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.upsampling(data, scale, layout, method)
def batch_flatten(data):
"""BatchFlatten.
This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.
For data with shape ``(d1, d2, ..., dk)``
batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
Returns
-------
result : tvm.relay.Expr
The Flattened result.
"""
return _make.batch_flatten(data)
def bias_add(data, bias, axis=1):
"""add_bias operator.
Add 1D bias to the axis of data.
This function is a special case of add which allows
inference of shape of the bias from data.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
bias : tvm.relay.Expr
The bias to be added.
axis : int, optional
The axis to add the bias.
Returns
-------
result : tvm.relay.Expr
The final result.
"""
return _make.bias_add(data, bias, axis)
def dense(data, weight, units=None):
"""Dense operator.
Applies a linear transformation
.. math::
`Y = X * W`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
units : int, optional
Number of hidden units of the dense transformation.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dense(data, weight, units)
def relu(data):
"""Rectified linear unit.
.. math::
out = max(x, 0)
Parameters
----------
data : tvm.relay.Expr
The input data
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.relu(data)
def leaky_relu(data, alpha):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
`y = x > 0 ? x : alpha * x`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
alpha : float
Slope coefficient for the negative half axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.leaky_relu(data, alpha)
def prelu(data, alpha, axis=1):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
`y = x > 0 ? x : alpha * x`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
alpha : tvm.relay.Expr
Slope coefficient for the negative half axis.
axis : int, optional
Specify which shape axis the channel is specified.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.prelu(data, alpha, axis)
def pad(data,
pad_width,
pad_value=0.0):
r"""Padding
This operator takes in a tensor and pads each axis by the specified
widths using the specified value.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator
pad_width: tuple of <tuple of <int>>, required
Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), ..., (before_N, after_N))
pad_value: float, optional, default=0.0
The value used for padding
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.pad(data, pad_width, pad_value)
def lrn(data, size=5, axis=1, bias=2, alpha=.00001, beta=0.75):
"""This operator takes data as input and does local response normalization.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
(data / (bias + (alpha * sum_data ^2 /size))^beta)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
size : int, optional
The size of the local region to be considered for normalization.
axis : int, optional
Input data layout channel axis. Default value is 1 for NCHW format
bias : float, optional
The offset parameter to avoid dividing by 0.
alpha : float, optional
The scaling parameter.
beta : float, optional
The exponent parameter.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.lrn(data, size, axis, alpha, beta, bias)
def l2_normalize(data, eps, axis=None):
"""Perform L2 normalization on the input data
.. math::
y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
eps : float
epsilon value
axis : list of int, optional
axis over the normalization applied
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.l2_normalize(data, eps, axis)
def dropout(data, rate=0.5):
"""Applies the dropout operation to the input array.
During training, each element of the input is set to zero with
probability ``p``. The whole array is rescaled by ``1/(1-p)``
to keep the expected sum of the input unchanged.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
rate : float, optional (default=0.5)
The probability for an element to be reset to 0.
Returns
-------
result : tvm.relay.Expr
The result of dropout
"""
result = _make.dropout(data, rate)
return TupleWrapper(result, 2)[0]
def batch_norm(data,
gamma,
beta,
moving_mean,
moving_var,
axis=1,
epsilon=1e-5,
center=True,
scale=True):
r"""
Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
.. math::
data\_mean[i] = mean(data[:,i,:,...]) \\
data\_var[i] = var(data[:,i,:,...])
Then compute the normalized output, which has the same shape as input, as following:
.. math::
out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}}
* gamma[i] + beta[i]
Both *mean* and *var* returns a scalar by treating the input as a vector.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*.
Besides the inputs and the outputs, this operator accepts two auxiliary
states, ``moving_mean`` and ``moving_var``, which are *k*-length
vectors. They are global statistics for the whole dataset, which are updated by::
moving_mean = moving_mean * momentum + data_mean * (1 - momentum)
moving_var = moving_var * momentum + data_var * (1 - momentum)
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel' (separately normalized groups). The default is 1.
Specifying -1 sets the channel axis to be the last item in the input shape.
.. note::
This operator can be optimized away for inference.
Parameters
----------
data : tvm.relay.Expr
Input to which batch_norm will be applied.
gamma : tvm.relay.Expr
The gamma scale factor.
beta : tvm.relay.Expr
The beta offset factor.
moving_mean : tvm.relay.Expr
Running mean of input,
moving_var : tvm.relay.Expr
Running variance of input.
axis : int, optional, default=1
Specify along which shape axis the channel is specified.
epsilon : double, optional, default=1e-5
Small float added to variance to avoid diving by zero.
center : boolean, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : boolean, optional, default=True
If true, multiply by gamma. If False, gamma is not used.
When the next layer is piecewise linear (also e.g. nn.relu),
this can be disabled since the scaling will be done by the next layer.
Returns
-------
result : relay.Tuple([tvm.relay.Expr, tvm.relay.Expr, tvm.relay.Expr])
Tuple of normed data (same shape as input),
new running mean (k-length vector),
and new running variance (k-length vector)
"""
result = _make.batch_norm(data,
gamma,
beta,
moving_mean,
moving_var,
axis,
epsilon,
center,
scale)
return TupleWrapper(result, 3)
def contrib_conv2d_winograd_without_weight_transform(data,
weight,
tile_size,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution with winograd algorithm.
The basic parameters are the same as the ones in vanilla conv2d.
It assumes the weight is pre-transformed by nn.contrib_conv2d_winograd_weight_transform
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv2d_winograd_without_weight_transform(
data, weight, tile_size, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
kernel_layout, out_layout, out_dtype)
def contrib_conv2d_winograd_weight_transform(weight,
tile_size):
r"""Weight Transformation part for 2D convolution with winograd algorithm.
We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_winograd_without_weight_transform
Parameters
----------
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv2d_winograd_weight_transform(weight, tile_size)
|
import pathlib
from bs4 import BeautifulSoup
HTML_LEAF_PAGE_SAMPLE_PATH = pathlib.Path('tests', 'fixtures', 'html', 'leaf_page_sample.html')
HTML_TEXT = ''
def setup():
global HTML_TEXT
with open(HTML_LEAF_PAGE_SAMPLE_PATH, "rt", encoding="utf-8") as handle:
HTML_TEXT = handle.read()
def teardown():
global HTML_TEXT
HTML_TEXT = ''
def test_html_leaf_page_parse_fixture():
# soup = BeautifulSoup(HTML_TEXT, 'html.parser')
lines = [t for t in HTML_TEXT.split('\n') if t.startswith('<a href="')]
parsed = []
for line in lines:
a, x = line.split('">', 1)
f, r = x.split('</a>')
r = r.rstrip()
d, s, u = r.rsplit(' ', 2)
d = d.strip()
parsed.append((f, d, s, u))
for p in parsed:
print(p)
assert len(p) == 4
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
# -*- coding: utf-8 -*-
import os, json
if os.name == 'nt':
SLASH = '\\'
else:
SLASH = '/'
def makeOutputFolder(folder_name,counter):
try:
if counter is not None:
write_folder_name = folder_name + ' (' + str(counter) + ')'
else:
write_folder_name = folder_name
write_folder = os.mkdir(write_folder_name)
return write_folder, write_folder_name
except OSError:
if counter is not None:
return makeOutputFolder(folder_name,counter+1)
else:
return makeOutputFolder(folder_name,0)
def buildNewOutput(output_directory):
core_name = output_directory[output_directory.rfind('/')+1:]
with open(output_directory + '/' + core_name + '.ttl.graph','w') as graph_write_file:
graph_write_file.write('http://localhost:8890/DAV/')
output_text = 'PREFIX schema: <http://schema.org/>\n\n'
with open(output_directory + '/' + core_name + '.ttl','w') as write_file:
write_file.write(output_text.encode('utf-8'))
return output_directory + '/' + core_name + '.ttl'
def outputTurtleFile(write_file,turtle_strings):
with open(write_file,'a') as output_file:
for turtle_string in turtle_strings:
print turtle_string
output_file.write(turtle_string.encode('utf-8'))
def addCardsToTurtle(read_file):
with open(read_file,'r') as data_file:
card = json.load(data_file)
print(card)
end_text = ''
output_text = '\n<' + card['@id'] + '>\n'
output_text += '\ta\tschema:' + card['@type'] + ' ;\n'
output_text += '\tschema:temporalCoverage\t"' + card['temporalCoverage'] + '"^^schema:Date ;\n'
if 'dateCreated' in card:
output_text += '\tschema:dateCreated\t"' + card['dateCreated'] + '"^^schema:Date ;\n'
if type(card['name']) is list:
output_text += '\tschema:name\t"' + card['name'][0].replace('"','\\"') + '" ;\n'
output_text += '\tschema:name\t"' + card['name'][1].replace('"','\\"') + '" ;\n'
else:
output_text += '\tschema:name\t"' + card['name'].replace('"','\\"') + '" ;\n'
output_text += '\tschema:author\t<' + card['author']['@id'] + '> ;\n'
if 'mentions' in card:
if type(card['mentions']) is list:
for instance in card['mentions']:
if '@id' in instance:
output_text += '\tschema:mentions\t<' + instance['@id'] + '> ;\n'
else:
output_text += '\tschema:mentions\t[\n'
output_text += '\t\ta\tschema:CreativeWork ;\n'
output_text += '\t\tschema:name\t"' + instance['name'].replace('"','\\"') + '" ;\n'
output_text += '\t] ;\n'
else:
if '@id' in card['mentions']:
output_text += '\tschema:mentions\t<' + card['mentions']['@id'] + '> ;\n'
else:
output_text += '\tschema:mentions\t[\n'
output_text += '\t\ta\tschema:CreativeWork ;\n'
output_text += '\t\tschema:name\t"' + card['mentions']['name'].replace('"','\\"') + '"\n'
output_text += '\t] ;\n'
if 'citation' in card:
if type(card['citation']) is list:
for citation in card['citation']:
output_text += '\tschema:citation\t[\n'
output_text += '\t\ta\tschema:CreativeWork ;\n'
if 'additionalType' in citation:
output_text += '\t\tschema:additionalType\t<' + citation['additionalType'] + '> ;\n'
if 'datePublished' in citation:
output_text += '\t\tschema:datePublished\t"' + citation['datePublished'] + '"^^schema:Date ;\n'
if 'author' in citation:
output_text += '\t\tschema:author\t<' + citation['author']['@id'] + '> ;\n'
if 'editor' in citation:
output_text += '\t\tschema:editor\t<' + citation['editor']['@id'] + '> ;\n'
if 'name' in citation:
output_text += '\t\tschema:name\t"' + citation['name'].replace('"','\\"') + '" ;\n'
if 'headline' in citation:
output_text += '\t\tschema:headline\t"' + citation['headline'].replace('"','\\"') + '" ;\n'
if 'sameAs' in citation:
output_text += '\t\tschema:sameAs\t<' + citation['sameAs'] + '> ;\n'
if 'pageStart' in citation:
output_text += '\t\tschema:pageStart\t' + citation['pageStart'] + ' ;\n'
if 'pageEnd' in citation:
output_text += '\t\tschema:pageEnd\t' + citation['pageEnd'] + ' ;\n'
if 'isPartOf' in citation:
output_text += '\t\tschema:isPartOf\t[\n'
output_text += '\t\t\ta\tschema:' + citation['isPartOf']['@type'] + ' ;\n'
if 'dateCreated' in citation['isPartOf']:
output_text += '\t\t\tschema:dateCreated\t"' + citation['isPartOf']['dateCreated'] + '"^^schema:Date ;\n'
if 'issueNumber' in citation['isPartOf']:
output_text += '\t\t\tschema:issueNumber\t"' + citation['isPartOf']['issueNumber'].replace('"','\\"') + '" ;\n'
if 'volumeNumber' in citation['isPartOf']:
output_text += '\t\t\tschema:volumeNumber\t"' + citation['isPartOf']['volumeNumber'].replace('"','\\"') + '" ;\n'
if 'name' in citation['isPartOf']:
output_text += '\t\t\tschema:name\t"' + citation['isPartOf']['name'].replace('"','\\"') + '" ;\n'
if 'pageStart' in citation['isPartOf']:
output_text += '\t\t\tschema:pageStart\t' + citation['isPartOf']['pageStart'] + ' ;\n'
if 'pageEnd' in citation['isPartOf']:
output_text += '\t\t\tschema:pageEnd\t' + citation['isPartOf']['pageEnd'] + ' ;\n'
if 'isPartOf' in citation['isPartOf']:
output_text += '\t\t\tschema:isPartOf\t[\n'
output_text += '\t\t\t\ta\tschema:' + citation['isPartOf']['isPartOf']['@type'] + ' ;\n'
if 'name' in citation['isPartOf']['isPartOf']:
output_text += '\t\t\t\tschema:name\t"' + citation['isPartOf']['isPartOf']['name'].replace('"','\\"') + '" ;\n'
if 'volumeNumber' in citation['isPartOf']['isPartOf']:
output_text += '\t\t\t\tschema:volumeNumber\t"' + citation['isPartOf']['isPartOf']['volumeNumber'].replace('"','\\"') + '" ;\n'
output_text += '\t\t\t] ;\n'
output_text += '\t\t] ;\n'
output_text += '\t] ;\n'
print citation
else:
output_text += '\tschema:citation\t[\n'
output_text += '\t\ta\tschema:CreativeWork ;\n'
if 'additionalType' in card['citation']:
output_text += '\t\tschema:additionalType\t<' + card['citation']['additionalType'] + '> ;\n'
if 'datePublished' in card['citation']:
output_text += '\t\tschema:datePublished\t"' + card['citation']['datePublished'] + '"^^schema:Date ;\n'
if 'author' in card['citation']:
output_text += '\t\tschema:author\t<' + card['citation']['author']['@id'] + '> ;\n'
if 'editor' in card['citation']:
output_text += '\t\tschema:editor\t<' + card['citation']['editor']['@id'] + '> ;\n'
if 'name' in card['citation']:
output_text += '\t\tschema:name\t"' + card['citation']['name'].replace('"','\\"') + '" ;\n'
if 'headline' in card['citation']:
output_text += '\t\tschema:headline\t"' + card['citation']['headline'].replace('"','\\"') + '" ;\n'
if 'sameAs' in card['citation']:
output_text += '\t\tschema:sameAs\t<' + card['citation']['sameAs'] + '> ;\n'
if 'pageStart' in card['citation']:
output_text += '\t\tschema:pageStart\t' + card['citation']['pageStart'] + ' ;\n'
if 'pageEnd' in card['citation']:
output_text += '\t\tschema:pageEnd\t' + card['citation']['pageEnd'] + ' ;\n'
if 'isPartOf' in card['citation']:
output_text += '\t\tschema:isPartOf\t[\n'
output_text += '\t\t\ta\tschema:' + card['citation']['isPartOf']['@type'] + ' ;\n'
if 'dateCreated' in card['citation']['isPartOf']:
output_text += '\t\t\tschema:dateCreated\t"' + card['citation']['isPartOf']['dateCreated'] + '"^^schema:Date ;\n'
if 'issueNumber' in card['citation']['isPartOf']:
output_text += '\t\t\tschema:issueNumber\t"' + card['citation']['isPartOf']['issueNumber'].replace('"','\\"') + '" ;\n'
if 'volumeNumber' in card['citation']['isPartOf']:
output_text += '\t\t\tschema:volumeNumber\t"' + card['citation']['isPartOf']['volumeNumber'].replace('"','\\"') + '" ;\n'
if 'name' in card['citation']['isPartOf']:
output_text += '\t\t\tschema:name\t"' + card['citation']['isPartOf']['name'].replace('"','\\"') + '" ;\n'
if 'pageStart' in card['citation']['isPartOf']:
output_text += '\t\t\tschema:pageStart\t' + card['citation']['isPartOf']['pageStart'] + ' ;\n'
if 'pageEnd' in card['citation']['isPartOf']:
output_text += '\t\t\tschema:pageEnd\t' + card['citation']['isPartOf']['pageEnd'] + ' ;\n'
if 'isPartOf' in card['citation']['isPartOf']:
output_text += '\t\t\tschema:isPartOf\t[\n'
output_text += '\t\t\t\ta\tschema:' + card['citation']['isPartOf']['isPartOf']['@type'] + ' ;\n'
if 'name' in card['citation']['isPartOf']['isPartOf']:
output_text += '\t\t\t\tschema:name\t"' + card['citation']['isPartOf']['isPartOf']['name'].replace('"','\\"') + '" ;\n'
if 'volumeNumber' in card['citation']['isPartOf']['isPartOf']:
output_text += '\t\t\t\tschema:volumeNumber\t"' + card['citation']['isPartOf']['isPartOf']['volumeNumber'].replace('"','\\"') + '" ;\n'
output_text += '\t\t\t] ;\n'
output_text += '\t\t] ;\n'
output_text += '\t] ;\n'
print card['citation']
output_text = output_text[:-2] + '.\n'
return output_text
def traverseFullTree():
rootdir = 'tei'
results_folder, results_folder_name = makeOutputFolder('ttl',None)
cards_converted = 1
file_iterator = 1
turle_strings = []
specific_write_folder, specific_write_folder_name = makeOutputFolder(results_folder_name + '/' + str(file_iterator),None)
write_file = buildNewOutput(specific_write_folder_name)
for root, dirs, files in os.walk(rootdir):
for name in files:
if '.json' in name:
if cards_converted%1000 == 0:
outputTurtleFile(write_file,turle_strings)
file_iterator += 1
turle_strings = []
specific_write_folder, specific_write_folder_name = makeOutputFolder(results_folder_name + '/' + str(file_iterator),None)
write_file = buildNewOutput(specific_write_folder_name)
turle_strings.append(addCardsToTurtle(root+SLASH+name))
cards_converted += 1
outputTurtleFile(write_file,turle_strings)
# writeNewFile(results_folder_name+root[3:]+SLASH+name[:-3]+'json',file_contents=processorFunction(root+SLASH+name,linked_names))
#On Windows, the Command Prompt doesn't know how to display unicode characters, causing it to halt when it encounters non-ASCII characters
def setupByOS():
if os.name == 'nt':
if sys.stdout.encoding != 'cp850':
sys.stdout = codecs.getwriter('cp850')(sys.stdout, 'replace')
if sys.stderr.encoding != 'cp850':
sys.stderr = codecs.getwriter('cp850')(sys.stderr, 'replace')
def main():
setupByOS()
traverseFullTree()
main()
|
'''
Coding our First Game in PyGame
-
Creating Ground for Snakes
'''
import pygame
pygame.init()
# print(x) # All 6 pygame modules successfully imported
# Colors
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
# Creating Game Window
screen_width = 900
screen_height = 600
gameWindow = pygame.display.set_mode((screen_width, screen_height)) # Game Window of 1200x500
pygame.display.set_caption("Snake - by Anubhav Madhav") # Title of the Game, which appears at the top of the window
pygame.display.update() # We need to update our display each and everytime we make a change
# Game Specific Variables
exit_game = False
game_over = False
# Creating a Game Loop
while not exit_game:
for event in pygame.event.get(): # This gets all the events which a user can perform in a game, like mouse hover, mouse click, pressing a certain key etc.
print(event)
if event.type == pygame.QUIT:
exit_game = True
gameWindow.fill(white) # Setting background color as white
pygame.display.update() # Need to update display cause we have made changes to gameWindow
pygame.quit()
quit()
|
from .Assembly import Assemble, AssembleForces, AssembleInternalTractionForces, AssembleExplicit, AssembleMass, AssembleForm
|
import pgzrun
import gameinput
import gamemaps
from random import randint
from datetime import datetime
WIDTH = 600
HEIGHT = 660
player = Actor("pacman_o") # Load in the player Actor image
player.score = 0
player.lives = 3
level = 0
SPEED = 3
def draw(): # Pygame Zero draw function
global pacDots, player
screen.blit('header', (0, 0))
screen.blit('colourmap', (0, 80))
pacDotsLeft = 0
for a in range(len(pacDots)):
if pacDots[a].status == 0:
pacDots[a].draw()
pacDotsLeft += 1
if pacDots[a].collidepoint((player.x, player.y)):
if pacDots[a].status == 0:
if pacDots[a].type == 2:
for g in range(len(ghosts)): ghosts[g].status = 1200
else:
player.score += 10
pacDots[a].status = 1
if pacDotsLeft == 0: player.status = 2
drawGhosts()
getPlayerImage()
player.draw()
drawLives()
screen.draw.text("LEVEL "+str(level) , topleft=(10, 10), owidth=0.5, ocolor=(0,0,255), color=(255,255,0) , fontsize=40)
screen.draw.text(str(player.score) , topright=(590, 20), owidth=0.5, ocolor=(255,255,255), color=(0,64,255) , fontsize=60)
if player.status == 3: drawCentreText("GAME OVER")
if player.status == 2: drawCentreText("LEVEL CLEARED!\nPress Enter or Button A\nto Continue")
if player.status == 1: drawCentreText("CAUGHT!\nPress Enter or Button A\nto Continue")
def drawCentreText(t):
screen.draw.text(t , center=(300, 434), owidth=0.5, ocolor=(255,255,255), color=(255,64,0) , fontsize=60)
def update(): # Pygame Zero update function
global player, moveGhostsFlag, ghosts
if player.status == 0:
if moveGhostsFlag == 4: moveGhosts()
for g in range(len(ghosts)):
if ghosts[g].status > 0: ghosts[g].status -= 1
if ghosts[g].collidepoint((player.x, player.y)):
if ghosts[g].status > 0:
player.score += 100
animate(ghosts[g], pos=(290, 370), duration=1/SPEED, tween='linear', on_finished=flagMoveGhosts)
else:
player.lives -= 1
sounds.pac2.play()
if player.lives == 0:
player.status = 3
music.fadeout(3)
else:
player.status = 1
if player.inputActive:
gameinput.checkInput(player)
gamemaps.checkMovePoint(player)
if player.movex or player.movey:
inputLock()
sounds.pac1.play()
animate(player, pos=(player.x + player.movex, player.y + player.movey), duration=1/SPEED, tween='linear', on_finished=inputUnLock)
if player.status == 1:
i = gameinput.checkInput(player)
if i == 1:
player.status = 0
player.x = 290
player.y = 570
if player.status == 2:
i = gameinput.checkInput(player)
if i == 1:
init()
def init():
global player, level
initDots()
initGhosts()
player.x = 290
player.y = 570
player.status = 0
inputUnLock()
level += 1
music.play("pm1")
music.set_volume(0.2)
def drawLives():
for l in range(player.lives): screen.blit("pacman_o", (10+(l*32),40))
def getPlayerImage():
global player
dt = datetime.now()
a = player.angle
tc = dt.microsecond%(500000/SPEED)/(100000/SPEED)
if tc > 2.5 and (player.movex != 0 or player.movey !=0):
if a != 180:
player.image = "pacman_c"
else:
player.image = "pacman_cr"
else:
if a != 180:
player.image = "pacman_o"
else:
player.image = "pacman_or"
player.angle = a
def drawGhosts():
for g in range(len(ghosts)):
if ghosts[g].x > player.x:
if ghosts[g].status > 200 or (ghosts[g].status > 1 and ghosts[g].status%2 == 0):
ghosts[g].image = "ghost5"
else:
ghosts[g].image = "ghost"+str(g+1)+"r"
else:
if ghosts[g].status > 200 or (ghosts[g].status > 1 and ghosts[g].status%2 == 0):
ghosts[g].image = "ghost5"
else:
ghosts[g].image = "ghost"+str(g+1)
ghosts[g].draw()
def moveGhosts():
global moveGhostsFlag
dmoves = [(1,0),(0,1),(-1,0),(0,-1)]
moveGhostsFlag = 0
for g in range(len(ghosts)):
dirs = gamemaps.getPossibleDirection(ghosts[g])
if inTheCentre(ghosts[g]):
ghosts[g].dir = 3
else:
if g == 0: followPlayer(g, dirs)
if g == 1: ambushPlayer(g, dirs)
if dirs[ghosts[g].dir] == 0 or randint(0,50) == 0:
d = -1
while d == -1:
rd = randint(0,3)
if aboveCentre(ghosts[g]) and rd == 1:
rd = 0
if dirs[rd] == 1:
d = rd
ghosts[g].dir = d
animate(ghosts[g], pos=(ghosts[g].x + dmoves[ghosts[g].dir][0]*20, ghosts[g].y + dmoves[ghosts[g].dir][1]*20), duration=1/SPEED, tween='linear', on_finished=flagMoveGhosts)
def followPlayer(g, dirs):
d = ghosts[g].dir
if d == 1 or d == 3:
if player.x > ghosts[g].x and dirs[0] == 1: ghosts[g].dir = 0
if player.x < ghosts[g].x and dirs[2] == 1: ghosts[g].dir = 2
if d == 0 or d == 2:
if player.y > ghosts[g].y and dirs[1] == 1 and not aboveCentre(ghosts[g]): ghosts[g].dir = 1
if player.y < ghosts[g].y and dirs[3] == 1: ghosts[g].dir = 3
def ambushPlayer(g, dirs):
d = ghosts[g].dir
if player.movex > 0 and dirs[0] == 1: ghosts[g].dir = 0
if player.movex < 0 and dirs[2] == 1: ghosts[g].dir = 2
if player.movey > 0 and dirs[1] == 1 and not aboveCentre(ghosts[g]): ghosts[g].dir = 1
if player.movey < 0 and dirs[3] == 1: ghosts[g].dir = 3
def inTheCentre(ga):
if ga.x > 220 and ga.x < 380 and ga.y > 320 and ga.y < 420:
return True
return False
def aboveCentre(ga):
if ga.x > 220 and ga.x < 380 and ga.y > 300 and ga.y < 320:
return True
return False
def flagMoveGhosts():
global moveGhostsFlag
moveGhostsFlag += 1
def ghostCollided(ga,gn):
for g in range(len(ghosts)):
if ghosts[g].colliderect(ga) and g != gn:
return True
return False
def initDots():
global pacDots
pacDots = []
a = x = 0
while x < 30:
y = 0
while y < 29:
d = gamemaps.checkDotPoint(10+x*20, 10+y*20)
if d == 1:
pacDots.append(Actor("dot",(10+x*20, 90+y*20)))
pacDots[a].status = 0
pacDots[a].type = 1
a += 1
if d == 2:
pacDots.append(Actor("power",(10+x*20, 90+y*20)))
pacDots[a].status = 0
pacDots[a].type = 2
a += 1
y += 1
x += 1
def initGhosts():
global ghosts, moveGhostsFlag
moveGhostsFlag = 4
ghosts = []
g = 0
while g < 4:
ghosts.append(Actor("ghost"+str(g+1),(270+(g*20), 370)))
ghosts[g].dir = randint(0, 3)
ghosts[g].status = 0
g += 1
def inputLock():
global player
player.inputActive = False
def inputUnLock():
global player
player.movex = player.movey = 0
player.inputActive = True
init()
pgzrun.go()
|
#pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring,no-self-use,too-few-public-methods
def first(): # First should be defined after second, too keep call order
pass
def second():
first()
class Example:
def first(self): # First should be defined after second, too keep call order
pass
def second(self):
self.first()
def before(self): # 'Before' is placed correctly before 'after'
self.after()
def after(self):
pass
class ExampleInner:
def outer(self):
def inner(): # Inner functions are an exception, these must be defined before their usage
pass
inner()
|
import logging
class Calculator(object):
def __init__(self, config):
self.config = config
|
import json
import os
import pytest
from flask import Flask, url_for
from pyquery import PyQuery as pq
from flask_jsondash import charts_builder, utils
from flask_jsondash import db
URL_BASE = 'http://127.0.0.1:80'
app = Flask('test_flask_jsondash',
template_folder='../flask_jsondash/example_app/templates')
app.config.update(
# Required to fix context errors.
# See https://github.com/jarus/flask-testing/issues/21
PRESERVE_CONTEXT_ON_EXCEPTION=False,
SECRET_KEY='123',
)
app.debug = True
app.register_blueprint(charts_builder.charts)
fake_db = []
def _username():
return 'Username'
def auth_valid(**kwargs):
return True
def auth_invalid(**kwargs):
return False
def get_json_config(name):
parent = os.getcwd().replace('tests/', '')
path = '{0}/example_app/examples/config/{1}'.format(parent, name)
view = json.load(open(path, 'r'))
return view
def read(*args, **kwargs):
if 'override' in kwargs:
newkwargs = kwargs.pop('override')
def _read(*args, **kwargs):
return dict(**newkwargs)
return _read
if 'c_id' not in kwargs:
return fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == kwargs.get('c_id'):
return dash
def delete(c_id, **kwargs):
global fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == c_id:
del fake_db[i]
break
def create(*args, **kwargs):
global fake_db
fake_db.append(dict(**kwargs.get('data')))
def update(c_id, **kwargs):
global fake_db
for i, dash in enumerate(fake_db):
if dash['id'] == c_id:
fake_db[i].update(**kwargs)
break
def setup_dashboard(monkeypatch, app, test, data):
"""Helper function to setup dashboard, redirect, and get its html."""
assert len(read()) == 0
monkeypatch.setattr(charts_builder, 'auth', auth_valid)
test.post(url_for('jsondash.create'), data=data, follow_redirects=True)
view_id = read()[0]['id']
assert len(read()) == 1
url = url_for('jsondash.view', c_id=view_id)
res = test.get(url)
dom = pq(res.data)
return dom
def make_chart(**kwargs):
"""Create a fake chart."""
data = dict(
name='somechart',
width=1,
height=1,
family='C3',
type='line',
row=1,
dataSource='...',
)
data.update(**kwargs)
return json.dumps(data)
@pytest.yield_fixture(autouse=True)
def ctx(monkeypatch, request):
with app.test_request_context() as req_ctx:
global fake_db
fake_db = []
monkeypatch.setattr(utils.adapter, 'read', read)
monkeypatch.setattr(utils.adapter, 'create', create)
monkeypatch.setattr(utils.adapter, 'delete', delete)
monkeypatch.setattr(utils.adapter, 'update', update)
monkeypatch.setattr(utils.adapter, 'filter', read)
yield req_ctx
@pytest.fixture()
def adapter():
return db.get_db_handler()
@pytest.fixture()
def client():
app.config.update(
JSONDASH_GLOBALDASH=False,
JSONDASH_FILTERUSERS=False,
JSONDASH_GLOBAL_USER='global-test',
)
app.config['JSONDASH'] = dict(
metadata=dict(
created_by=_username,
username=_username,
),
static=dict(
js_path='js/vendor/',
css_path='css/vendor/',
),
auth=dict(
edit_others=auth_invalid,
edit_global=auth_invalid,
create=auth_invalid,
view=auth_invalid,
clone=auth_invalid,
delete=auth_invalid,
)
)
return app, app.test_client()
|
import json
import pytest
import os
import sys
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(f'{abs_path}/../..')
sys.path.append(f'{abs_path}/../../..')
print(sys.path[-1])
from moto import mock_dynamodb2
from redirect_handler import app
import boto_utils
from constants import TABLE_NAME
import boto3
@pytest.fixture()
def apigw_event():
""" Generates API GW Event"""
with open('./events/redirect_simple_event.json') as fp:
return json.load(fp)
def test_lambda_handler(apigw_event):
# Note put must work. You should have a test entry in your DB under the entry '1234567' for you to pass this test
@mock_dynamodb2
def mock_events():
dynamodb = boto3.resource('dynamodb')
created_table = dynamodb.create_table(
TableName=TABLE_NAME,
KeySchema=[
{
'AttributeName': 'redirect_url',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'redirect_url',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
boto_utils.put('https://example.com', '1234567', '', '')
mock_events()
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] == 302
assert 'location' in ret['headers']
failed_codes = {206, 204}
apigw_event['pathParameters']['hash'] = apigw_event['pathParameters']['hash'][:-1]
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] in failed_codes
apigw_event['pathParameters']['hash'] = 'garbage'
ret = app.lambda_handler(apigw_event, '')
assert ret['statusCode'] in failed_codes
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from timeit import time
import warnings
import cv2
import numpy as np
from PIL import Image
from yolo import YOLO
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.detection_yolo import Detection_YOLO
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
import imutils.video
from videocaptureasync import VideoCaptureAsync
warnings.filterwarnings('ignore')
def main(yolo):
# Definition of the parameters
max_cosine_distance = 0.3
nn_budget = None
nms_max_overlap = 1.0
# Deep SORT
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric(
"cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
tracking = True
writeVideo_flag = True
asyncVideo_flag = False
file_path = 'video.webm'
if asyncVideo_flag:
video_capture = VideoCaptureAsync(file_path)
else:
video_capture = cv2.VideoCapture(file_path)
if asyncVideo_flag:
video_capture.start()
if writeVideo_flag:
if asyncVideo_flag:
w = int(video_capture.cap.get(3))
h = int(video_capture.cap.get(4))
else:
w = int(video_capture.get(3))
h = int(video_capture.get(4))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
frame_index = -1
fps = 0.0
fps_imutils = imutils.video.FPS().start()
while True:
ret, frame = video_capture.read() # frame shape 640*480*3
if ret != True:
break
t1 = time.time()
image = Image.fromarray(frame[..., ::-1]) # bgr to rgb
boxes, confidence, classes = yolo.detect_image(image)
if tracking:
features = encoder(frame, boxes)
detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
zip(boxes, confidence, classes, features)]
else:
detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in
zip(boxes, confidence, classes)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = preprocessing.non_max_suppression(
boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
if tracking:
# Call the tracker
tracker.predict()
tracker.update(detections)
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(
bbox[2]), int(bbox[3])), (255, 255, 255), 2)
cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,
1.5e-3 * frame.shape[0], (0, 255, 0), 1)
for det in detections:
bbox = det.to_tlbr()
score = "%.2f" % round(det.confidence * 100, 2) + "%"
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(
bbox[2]), int(bbox[3])), (255, 0, 0), 2)
if len(classes) > 0:
cls = det.cls
cv2.putText(frame, str(cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
1.5e-3 * frame.shape[0], (0, 255, 0), 1)
cv2.imshow('', frame)
if writeVideo_flag: # and not asyncVideo_flag:
# save a frame
out.write(frame)
frame_index = frame_index + 1
fps_imutils.update()
if not asyncVideo_flag:
fps = (fps + (1./(time.time()-t1))) / 2
print("FPS = %f" % (fps))
# Press Q to stop!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps_imutils.stop()
print('imutils FPS: {}'.format(fps_imutils.fps()))
if asyncVideo_flag:
video_capture.stop()
else:
video_capture.release()
if writeVideo_flag:
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main(YOLO())
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMypy(PythonPackage):
"""Optional static typing for Python."""
homepage = "http://www.mypy-lang.org/"
pypi = "mypy/mypy-0.740.tar.gz"
version('0.910', sha256='704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150')
version('0.900', sha256='65c78570329c54fb40f956f7645e2359af5da9d8c54baa44f461cdc7f4984108')
version('0.800', sha256='e0202e37756ed09daf4b0ba64ad2c245d357659e014c3f51d8cd0681ba66940a')
version('0.790', sha256='2b21ba45ad9ef2e2eb88ce4aeadd0112d0f5026418324176fd494a6824b74975')
version('0.740', sha256='48c8bc99380575deb39f5d3400ebb6a8a1cb5cc669bbba4d3bb30f904e0a0e7d')
variant('python2', default=False, description='Enable checking of python 2 type annotations')
depends_on("python@3.5:", type=("build", "run"))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-typed-ast@1.4.0:1.4', type=('build', 'run'))
depends_on('py-typing-extensions@3.7.4:', type=('build', 'run'))
depends_on('py-mypy-extensions@0.4.3:0.4', type=('build', 'run'))
depends_on('py-toml', when='@0.900:', type=('build', 'run'))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Models for scheduled execution of jobs"""
import enum
from typing import Optional, Type
from flask_appbuilder import Model
from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from superset import security_manager
from superset.models.helpers import AuditMixinNullable, ImportMixin
metadata = Model.metadata # pylint: disable=no-member
class ScheduleType(str, enum.Enum):
slice = "slice"
dashboard = "dashboard"
class EmailDeliveryType(str, enum.Enum):
attachment = "Attachment"
inline = "Inline"
class SliceEmailReportFormat(str, enum.Enum):
visualization = "Visualization"
data = "Raw data"
class EmailSchedule:
"""Schedules for emailing slices / dashboards"""
__tablename__ = "email_schedules"
id = Column(Integer, primary_key=True)
active = Column(Boolean, default=True, index=True)
crontab = Column(String(50))
@declared_attr
def user_id(self):
return Column(Integer, ForeignKey("ab_user.id"))
@declared_attr
def user(self):
return relationship(
security_manager.user_model,
backref=self.__tablename__,
foreign_keys=[self.user_id],
)
recipients = Column(Text)
deliver_as_group = Column(Boolean, default=False)
delivery_type = Column(Enum(EmailDeliveryType))
class DashboardEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "dashboard_email_schedules"
dashboard_id = Column(Integer, ForeignKey("dashboards.id"))
dashboard = relationship(
"Dashboard", backref="email_schedules", foreign_keys=[dashboard_id]
)
class SliceEmailSchedule(Model, AuditMixinNullable, ImportMixin, EmailSchedule):
__tablename__ = "slice_email_schedules"
slice_id = Column(Integer, ForeignKey("slices.id"))
slice = relationship("Slice", backref="email_schedules", foreign_keys=[slice_id])
email_format = Column(Enum(SliceEmailReportFormat))
def get_scheduler_model(report_type: ScheduleType) -> Optional[Type[EmailSchedule]]:
if report_type == ScheduleType.dashboard:
return DashboardEmailSchedule
elif report_type == ScheduleType.slice:
return SliceEmailSchedule
return None
|
# -*- coding: utf-8 -*-
"""
Ebay Trading API
"""
import xmltodict
import requests
from . import app_settings as settings
class TradingAPIWarning(Exception):
pass
class TradingAPIFailure(Exception):
pass
class TradingAPIInvalidResponse(Exception):
pass
class TradingAPI(object):
_last_response = None
def __init__(self, production=False, site_id=0, token=None):
self.production = production
if self.production is True:
self._dev_id = settings.EBAY_PRODUCTION_DEVID
self._app_id = settings.EBAY_PRODUCTION_APPID
self._cert_id = settings.EBAY_PRODUCTION_CERTID
self._endpoint = settings.EBAY_PRODUCTION_TRADING_API_ENDPOINT
self.ru_name = settings.EBAY_PRODUCTION_RU_NAME
else:
self._dev_id = settings.EBAY_SANDBOX_DEVID
self._app_id = settings.EBAY_SANDBOX_APPID
self._cert_id = settings.EBAY_SANDBOX_CERTID
self._endpoint = settings.EBAY_SANDBOX_TRADING_API_ENDPOINT
self.ru_name = settings.EBAY_SANDBOX_RU_NAME
self.site_id = site_id
self.version = settings.EBAY_TRADING_API_VERSION
self._token = token
def _get_requester_credentials(self):
return {'eBayAuthToken': self._token}
def _get_headers(self, call):
return {
'X-EBAY-API-COMPATIBILITY-LEVEL': str(self.version),
'X-EBAY-API-DEV-NAME': self._dev_id,
'X-EBAY-API-APP-NAME': self._app_id,
'X-EBAY-API-CERT-NAME': self._cert_id,
'X-EBAY-API-SITEID': str(self.site_id),
'X-EBAY-API-CALL-NAME': call,
}
def _get_xml_request(self, call, kw_dict, include_requester_credentials):
request_key = '{call}Request'.format(call=call)
request_dict = {request_key: {
'@xmlns': 'urn:ebay:apis:eBLBaseComponents',
}}
for key, value in kw_dict.items():
request_dict[request_key][key] = value
if self._token and include_requester_credentials:
credentials = self._get_requester_credentials()
request_dict[request_key]['RequesterCredentials'] = credentials
data = xmltodict.unparse(request_dict)
return data
def _get_data_from_response(self, call, data, response):
d = xmltodict.parse(response.content)
response_key = '{call}Response'.format(call=call)
data = d[response_key]
return data
def execute(
self,
call,
kw_dict,
include_requester_credentials=True,
raise_on_warning=False,
raise_on_failure=True):
headers = self._get_headers(call)
data = self._get_xml_request(
call, kw_dict, include_requester_credentials)
response = requests.post(self._endpoint, data=data, headers=headers)
self._last_response = response
response_data = self._get_data_from_response(call, data, response)
if 'Ack' not in response_data:
raise TradingAPIInvalidResponse('No Ack field in response')
if raise_on_failure and response_data['Ack'].lower() == 'failure':
raise TradingAPIFailure('{0}'.format(response_data.get(
'Errors', 'No error list found')))
if raise_on_warning and response_data['Ack'].lower() == 'warning':
raise TradingAPIWarning('{0}'.format(response_data.get(
'Errors', 'No error list found')))
return response_data
def set_token(self, token):
self._token = token
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
|
import pathlib
import re
import pytest
from typer.testing import CliRunner
from taipo.__main__ import app
from taipo.common import nlu_path_to_dataframe
runner = CliRunner()
@pytest.mark.parametrize(
"path_in,path_out", [("nlu.yml", "nlu.yml"), ("foobar.yml", "foobar.yml")]
)
def test_keyboard_augment(tmp_path, path_in, path_out):
"""Ensure basic usage of command works."""
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/{path_in}",
]
runner.invoke(app, cmd)
expected = nlu_path_to_dataframe("tests/data/nlu/nlu.yml").shape
assert nlu_path_to_dataframe(f"{tmp_path}/{path_out}").shape == expected
def test_keyboard_augment_keeps_annotations(tmp_path):
"""Ensure the format of entity annotations is kept correctly."""
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/nlu.yml",
]
runner.invoke(app, cmd)
df_in = nlu_path_to_dataframe("tests/data/nlu/nlu.yml")
df_out = nlu_path_to_dataframe(f"{tmp_path}/nlu.yml")
annotation_pattern = r"\[\w+\]\(\w+\)"
for text_in, text_out in zip(df_in.text, df_out.text):
annotations_in = re.findall(annotation_pattern, text_in)
annotations_out = re.findall(annotation_pattern, text_out)
assert len(annotations_in) == len(annotations_out)
@pytest.mark.parametrize(
"lang", ["de", "en", "es", "fr", "he", "it", "nl", "pl", "th", "uk"]
)
def test_keyboard_lang(tmp_path, lang):
"""
Ensure that the languages listed in nlpaug indeed work.
https://github.com/makcedward/nlpaug/tree/master/nlpaug/res/char/keyboard
"""
cmd = [
"keyboard",
"augment",
"tests/data/nlu/nlu.yml",
f"{tmp_path}/nlu.yml",
"--lang",
lang,
]
runner.invoke(app, cmd)
expected = nlu_path_to_dataframe("tests/data/nlu/nlu.yml").shape
assert nlu_path_to_dataframe(f"{tmp_path}/nlu.yml").shape == expected
def test_keyboard_generate():
"""Ensure basic usage of command works."""
files = [
"data/nlu-train.yml",
"data/typod-nlu-train.yml",
"test/nlu-valid.yml",
"test/typod-nlu-valid.yml",
]
for f in files:
if pathlib.Path(f).exists():
pathlib.Path(f).unlink()
cmd = ["keyboard", "generate", "data/nlu-orig.yml", "--prefix", "typod"]
res = runner.invoke(app, cmd)
for f in files:
assert pathlib.Path(f).exists()
pathlib.Path(f).unlink()
assert res.exit_code == 0
|
from scirpy.util import (
_is_na,
_is_false,
_is_true,
_normalize_counts,
_is_symmetric,
_reduce_nonzero,
_translate_dna_to_protein,
)
from scirpy.util.graph import layout_components
from itertools import combinations
import igraph as ig
import numpy as np
import pandas as pd
import numpy.testing as npt
import pytest
import scipy.sparse
from .fixtures import adata_tra
import warnings
def test_reduce_nonzero():
A = np.array([[0, 0, 3], [1, 2, 5], [7, 0, 0]])
B = np.array([[1, 0, 3], [2, 1, 0], [6, 0, 5]])
A_csr = scipy.sparse.csr_matrix(A)
B_csr = scipy.sparse.csr_matrix(B)
A_csc = scipy.sparse.csc_matrix(A)
B_csc = scipy.sparse.csc_matrix(B)
expected = np.array([[1, 0, 3], [1, 1, 5], [6, 0, 5]])
with pytest.raises(ValueError):
_reduce_nonzero(A, B)
npt.assert_equal(_reduce_nonzero(A_csr, B_csr).toarray(), expected)
npt.assert_equal(_reduce_nonzero(A_csc, B_csc).toarray(), expected)
npt.assert_equal(_reduce_nonzero(A_csr, A_csr.copy()).toarray(), A_csr.toarray())
def test_is_symmatric():
M = np.array([[1, 2, 2], [2, 1, 3], [2, 3, 1]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert _is_symmetric(M)
assert _is_symmetric(S_csr)
assert _is_symmetric(S_csc)
assert _is_symmetric(S_lil)
M = np.array([[1, 2, 2], [2, 1, np.nan], [2, np.nan, np.nan]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert _is_symmetric(M)
assert _is_symmetric(S_csr)
assert _is_symmetric(S_csc)
assert _is_symmetric(S_lil)
M = np.array([[1, 2, 2], [2, 1, 3], [3, 2, 1]])
S_csr = scipy.sparse.csr_matrix(M)
S_csc = scipy.sparse.csc_matrix(M)
S_lil = scipy.sparse.lil_matrix(M)
assert not _is_symmetric(M)
assert not _is_symmetric(S_csr)
assert not _is_symmetric(S_csc)
assert not _is_symmetric(S_lil)
def test_is_na():
warnings.filterwarnings("error")
assert _is_na(None)
assert _is_na(np.nan)
assert _is_na("nan")
assert not _is_na(42)
assert not _is_na("Foobar")
assert not _is_na(dict())
array_test = np.array(["None", "nan", None, np.nan, "foobar"])
array_expect = np.array([True, True, True, True, False])
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([False, False, False])
npt.assert_equal(_is_na(array_test), array_expect)
npt.assert_equal(_is_na(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_na(array_test_bool), array_expect_bool)
npt.assert_equal(_is_na(pd.Series(array_test_bool)), array_expect_bool)
def test_is_false():
warnings.filterwarnings("error")
assert _is_false(False)
assert _is_false(0)
assert _is_false("")
assert _is_false("False")
assert _is_false("false")
assert not _is_false(42)
assert not _is_false(True)
assert not _is_false("true")
assert not _is_false("foobar")
assert not _is_false(np.nan)
assert not _is_false(None)
assert not _is_false("nan")
assert not _is_false("None")
array_test = np.array(
["False", "false", 0, 1, True, False, "true", "Foobar", np.nan, "nan"],
dtype=object,
)
array_test_str = array_test.astype("str")
array_expect = np.array(
[True, True, True, False, False, True, False, False, False, False]
)
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([False, True, False])
npt.assert_equal(_is_false(array_test), array_expect)
npt.assert_equal(_is_false(array_test_str), array_expect)
npt.assert_equal(_is_false(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_false(pd.Series(array_test_str)), array_expect)
npt.assert_equal(_is_false(array_test_bool), array_expect_bool)
npt.assert_equal(_is_false(pd.Series(array_test_bool)), array_expect_bool)
def test_is_true():
warnings.filterwarnings("error")
assert not _is_true(False)
assert not _is_true(0)
assert not _is_true("")
assert not _is_true("False")
assert not _is_true("false")
assert not _is_true("0")
assert not _is_true(np.nan)
assert not _is_true(None)
assert not _is_true("nan")
assert not _is_true("None")
assert _is_true(42)
assert _is_true(True)
assert _is_true("true")
assert _is_true("foobar")
assert _is_true("True")
array_test = np.array(
["False", "false", 0, 1, True, False, "true", "Foobar", np.nan, "nan"],
dtype=object,
)
array_test_str = array_test.astype("str")
array_expect = np.array(
[False, False, False, True, True, False, True, True, False, False]
)
array_test_bool = np.array([True, False, True])
array_expect_bool = np.array([True, False, True])
npt.assert_equal(_is_true(array_test), array_expect)
npt.assert_equal(_is_true(array_test_str), array_expect)
npt.assert_equal(_is_true(pd.Series(array_test)), array_expect)
npt.assert_equal(_is_true(pd.Series(array_test_str)), array_expect)
npt.assert_equal(_is_true(array_test_bool), array_expect_bool)
npt.assert_equal(_is_true(pd.Series(array_test_bool)), array_expect_bool)
@pytest.fixture
def group_df():
return pd.DataFrame().assign(
cell=["c1", "c2", "c3", "c4", "c5", "c6"],
sample=["s2", "s1", "s2", "s2", "s2", "s1"],
)
def test_normalize_counts(group_df):
with pytest.raises(ValueError):
_normalize_counts(group_df, True, None)
npt.assert_equal(_normalize_counts(group_df, False), [1] * 6)
npt.assert_equal(
_normalize_counts(group_df, "sample"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]
)
npt.assert_equal(
_normalize_counts(group_df, True, "sample"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]
)
def test_layout_components():
g = ig.Graph()
# add 100 unconnected nodes
g.add_vertices(100)
# add 50 2-node components
g.add_vertices(100)
g.add_edges([(ii, ii + 1) for ii in range(100, 200, 2)])
# add 33 3-node components
g.add_vertices(100)
for ii in range(200, 299, 3):
g.add_edges([(ii, ii + 1), (ii, ii + 2), (ii + 1, ii + 2)])
# add a couple of larger components
n = 300
for ii in np.random.randint(4, 30, size=10):
g.add_vertices(ii)
g.add_edges(combinations(range(n, n + ii), 2))
n += ii
layout_components(g, arrange_boxes="size", component_layout="fr")
try:
layout_components(g, arrange_boxes="rpack", component_layout="fr")
except ImportError:
warnings.warn(
"The 'rpack' layout-test was skipped because rectangle "
"packer is not installed. "
)
layout_components(g, arrange_boxes="squarify", component_layout="fr")
def test_translate_dna_to_protein(adata_tra):
for nt, aa in zip(adata_tra.obs["IR_VJ_1_cdr3_nt"], adata_tra.obs["IR_VJ_1_cdr3"]):
assert _translate_dna_to_protein(nt) == aa
|
"""File path encryption.
Put files to public directory by encryption.
And this anchers of relationship.
This module anable change the anchers.
"""
import glob
import logging
import os
import shutil
try:
from . import filename
from .anchor.anchor import Anchor
except:
import filename
from anchor.anchor import Anchor
def main(src, dst):
"""Main script of this code."""
# Currently, you can use only `text` type ;)
anchor = Anchor('text')
for org_f in _read_files(src):
cur_f = anchor.request_current_path(org_f)
# WARNING: Theoritically, encrypted files have very low possibility which
# have collision file name, and this script does not check duplication of
# file name.
enc_f = _make_dest_dir(dst, _encrypt_file(org_f, anchor))
logging.debug('---')
logging.debug('Original: {0}'.format(org_f))
logging.debug('Current: {0}'.format(cur_f))
logging.debug('Encrypt: {0}'.format(enc_f))
# TODO: Add transaction process.
_copy(org_f, enc_f)
anchor.change(org_f, enc_f) # Write the change to anchor file
if cur_f and os.path.exists(cur_f):
_delete(dst, cur_f)
def _read_files(file_path):
"""Read all target files with generator."""
for r, d, fs in os.walk(file_path):
for f in fs:
yield os.path.join(r, f)
def _encrypt_file(fname, anchor):
"""Encrypt file name."""
return filename.change(fname)
def _make_dest_dir(public_dir, file_path):
"""Create destination directory."""
return os.path.join(public_dir, file_path)
def _copy(org_f, enc_f):
"""Copy source file into destination file."""
os.makedirs('/'.join(enc_f.split('/')[0:-1]))
shutil.copy(org_f, enc_f)
def _delete(dst_dir, cur_f):
"""Delete old encrypt file"""
delete_base_path = cur_f.replace(dst_dir.rstrip('/')+'/', '')
delete_path = os.path.join(dst_dir, delete_base_path.split('/')[0])
shutil.rmtree(delete_path)
logging.debug('Delete: {}'.format(delete_path))
def execute():
import argparse
from os.path import expanduser
from os.path import isdir
home_dir = expanduser('~')
p = argparse.ArgumentParser(description='Encrypt files.')
# source and destination is necessary argument.
p.add_argument('source', help='Source directory')
p.add_argument('destination', help='destination of encrypttion.')
# debug mode.
p.add_argument('-v', help='Verbose mode.', dest='verbose', action='count', default=0)
args = p.parse_args()
src = str(args.source)
dst = str(args.destination)
if not isdir(src):
print('No such directory \'{}\'.'.format(src))
quit()
if not isdir(dst):
print('No such directory \'{}\'.'.format(dst))
quit()
verbose = args.verbose
if isinstance(verbose, int) and verbose > 0:
log_format = '%(asctime)s\t[%(levelname)s]\t%(message)s'
logging.basicConfig(level=10, format=log_format)
main(src, dst)
if __name__ == '__main__':
execute()
|
import copy
import logging
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import numpy as np
from skimage.segmentation import felzenszwalb, quickshift, slic
from alibi.api.defaults import DEFAULT_DATA_ANCHOR_IMG, DEFAULT_META_ANCHOR
from alibi.api.interfaces import Explainer, Explanation
from alibi.exceptions import (AlibiPredictorCallException,
AlibiPredictorReturnTypeError)
from alibi.utils.wrappers import ArgmaxTransformer
from .anchor_base import AnchorBaseBeam
from .anchor_explanation import AnchorExplanation
logger = logging.getLogger(__name__)
DEFAULT_SEGMENTATION_KWARGS = {
'felzenszwalb': {},
'quickshift': {},
'slic': {'n_segments': 10, 'compactness': 10, 'sigma': .5}
} # type: Dict[str, Dict]
def scale_image(image: np.ndarray, scale: tuple = (0, 255)) -> np.ndarray:
"""
Scales an image in a specified range.
Parameters
----------
image
Image to be scale.
scale
The scaling interval.
Returns
-------
img_scaled
Scaled image.
"""
img_max, img_min = image.max(), image.min()
img_std = (image - img_min) / (img_max - img_min)
img_scaled = img_std * (scale[1] - scale[0]) + scale[0]
return img_scaled
class AnchorImageSampler:
def __init__(
self,
predictor: Callable,
segmentation_fn: Callable,
custom_segmentation: bool,
image: np.ndarray,
images_background: Optional[np.ndarray] = None,
p_sample: float = 0.5,
n_covered_ex: int = 10,
):
"""
Initialize anchor image sampler.
Parameters
----------
predictor
A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.
segmentation_fn
Function used to segment the images.
image
Image to be explained.
images_background
Images to overlay superpixels on.
p_sample
Probability for a pixel to be represented by the average value of its superpixel.
n_covered_ex
How many examples where anchors apply to store for each anchor sampled during search
(both examples where prediction on samples agrees/disagrees with `desired_label` are stored).
"""
self.predictor = predictor
self.segmentation_fn = segmentation_fn
self.custom_segmentation = custom_segmentation
self.image = image
self.images_background = images_background
self.n_covered_ex = n_covered_ex
self.p_sample = p_sample
self.segments = self.generate_superpixels(image)
self.segment_labels = list(np.unique(self.segments))
self.instance_label = self.predictor(image[np.newaxis, ...])[0]
def __call__(
self, anchor: Tuple[int, tuple], num_samples: int, compute_labels: bool = True
) -> List[Union[np.ndarray, float, int]]:
"""
Sample images from a perturbation distribution by masking randomly chosen superpixels
from the original image and replacing them with pixel values from superimposed images
if background images are provided to the explainer. Otherwise, the superpixels from the
original image are replaced with their average values.
Parameters
----------
anchor
- ``int`` - order of anchor in the batch.
- ``tuple`` - features (= superpixels) present in the proposed anchor.
num_samples
Number of samples used.
compute_labels
If ``True``, an array of comparisons between predictions on perturbed samples and
instance to be explained is returned.
Returns
-------
If ``compute_labels=True``, a list containing the following is returned
- `covered_true` - perturbed examples where the anchor applies and the model prediction on perturbed is the \
same as the instance prediction.
- `covered_false` - perturbed examples where the anchor applies and the model prediction on pertrurbed sample \
is NOT the same as the instance prediction.
- `labels` - `num_samples` ints indicating whether the prediction on the perturbed sample matches (1) \
the label of the instance to be explained or not (0).
- `data` - Matrix with 1s and 0s indicating whether the values in a superpixel will remain unchanged (1) or \
will be perturbed (0), for each sample.
- `1.0` - indicates exact coverage is not computed for this algorithm.
- `anchor[0]` - position of anchor in the batch request
Otherwise, a list containing the data matrix only is returned.
"""
if compute_labels:
raw_data, data = self.perturbation(anchor[1], num_samples)
labels = self.compare_labels(raw_data)
covered_true = raw_data[labels][: self.n_covered_ex]
covered_true = [scale_image(img) for img in covered_true]
covered_false = raw_data[np.logical_not(labels)][: self.n_covered_ex]
covered_false = [scale_image(img) for img in covered_false]
# coverage set to -1.0 as we can't compute 'true'coverage for this model
return [covered_true, covered_false, labels.astype(int), data, -1.0, anchor[0]] # type: ignore
else:
data = self._choose_superpixels(num_samples)
data[:, anchor[1]] = 1 # superpixels in candidate anchor are not perturbed
return [data]
def compare_labels(self, samples: np.ndarray) -> np.ndarray:
"""
Compute the agreement between a classifier prediction on an instance to be explained
and the prediction on a set of samples which have a subset of perturbed superpixels.
Parameters
----------
samples
Samples whose labels are to be compared with the instance label.
Returns
-------
A boolean array indicating whether the prediction was the same as the instance label.
"""
return self.predictor(samples) == self.instance_label
def _choose_superpixels(
self, num_samples: int, p_sample: float = 0.5
) -> np.ndarray:
"""
Generates a binary mask of dimension [num_samples, M] where M is the number of
image superpixels (segments).
Parameters
----------
num_samples
Number of perturbed images to be generated
p_sample:
The probability that a superpixel is perturbed
Returns
-------
data
Binary 2D mask, where each non-zero entry in a row indicates that
the values of the particular image segment will not be perturbed.
"""
n_features = len(self.segment_labels)
data = np.random.choice(
[0, 1], num_samples * n_features, p=[p_sample, 1 - p_sample]
)
data = data.reshape((num_samples, n_features))
return data
def perturbation(
self, anchor: tuple, num_samples: int
) -> Tuple[np.ndarray, np.ndarray]:
"""
Perturbs an image by altering the values of selected superpixels. If a dataset of image
backgrounds is provided to the explainer, then the superpixels are replaced with the
equivalent superpixels from the background image. Otherwise, the superpixels are replaced
by their average value.
Parameters
----------
anchor:
Contains the superpixels whose values are not going to be perturbed.
num_samples:
Number of perturbed samples to be returned.
Returns
-------
imgs
A `[num_samples, H, W, C]` array of perturbed images.
segments_mask
A `[num_samples, M]` binary mask, where `M` is the number of image superpixels
segments. 1 indicates the values in that particular superpixels are not
perturbed.
"""
image = self.image
segments = self.segments
backgrounds: Union[np.ndarray, List[None]]
# choose superpixels to be perturbed
segments_mask = self._choose_superpixels(num_samples, p_sample=self.p_sample)
segments_mask[:, anchor] = 1
# for each sample, need to sample one of the background images if provided
if self.images_background is not None:
backgrounds = np.random.choice(
range(len(self.images_background)),
segments_mask.shape[0],
replace=True,
)
else:
backgrounds = [None] * segments_mask.shape[0]
# create fudged image where the pixel value in each superpixel is set to the
# average over the superpixel for each channel
fudged_image = image.copy()
n_channels = image.shape[-1]
for x in np.unique(segments):
fudged_image[segments == x] = [
np.mean(image[segments == x][:, i]) for i in range(n_channels)
]
pert_imgs = []
for mask, background_idx in zip(segments_mask, backgrounds):
temp = copy.deepcopy(image)
to_perturb = np.where(mask == 0)[0]
# create mask for each superpixel not present in the sample
mask = np.zeros(segments.shape).astype(bool)
for superpixel in to_perturb:
mask[segments == superpixel] = True
if background_idx is not None:
# replace values with those of background image
temp[mask] = self.images_background[background_idx][mask] # type: ignore[index]
else:
# ... or with the averaged superpixel value
temp[mask] = fudged_image[mask]
pert_imgs.append(temp)
return np.array(pert_imgs), segments_mask
def generate_superpixels(self, image: np.ndarray) -> np.ndarray:
"""
Generates superpixels from (i.e., segments) an image.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A `[H, W]` array of integers. Each integer is a segment (superpixel) label.
"""
image_preproc = self._preprocess_img(image)
return self.segmentation_fn(image_preproc)
def _preprocess_img(self, image: np.ndarray) -> np.ndarray:
"""
Applies necessary transformations to the image prior to segmentation.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A preprocessed image.
"""
# Grayscale images are repeated across channels
if not self.custom_segmentation and image.shape[-1] == 1:
image_preproc = np.repeat(image, 3, axis=2)
else:
image_preproc = image.copy()
return image_preproc
class AnchorImage(Explainer):
def __init__(self,
predictor: Callable[[np.ndarray], np.ndarray],
image_shape: tuple,
dtype: Type[np.generic] = np.float32,
segmentation_fn: Any = 'slic',
segmentation_kwargs: Optional[dict] = None,
images_background: Optional[np.ndarray] = None,
seed: Optional[int] = None) -> None:
"""
Initialize anchor image explainer.
Parameters
----------
predictor
A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.
image_shape
Shape of the image to be explained. The channel axis is expected to be last.
dtype
A `numpy` scalar type that corresponds to the type of input array expected by `predictor`. This may be
used to construct arrays of the given type to be passed through the `predictor`. For most use cases
this argument should have no effect, but it is exposed for use with predictors that would break when
called with an array of unsupported type.
segmentation_fn
Any of the built in segmentation function strings: ``'felzenszwalb'``, ``'slic'`` or ``'quickshift'`` or
a custom segmentation function (callable) which returns an image mask with labels for each superpixel.
See http://scikit-image.org/docs/dev/api/skimage.segmentation.html for more info.
segmentation_kwargs
Keyword arguments for the built in segmentation functions.
images_background
Images to overlay superpixels on.
seed
If set, ensures different runs with the same input will yield same explanation.
Raises
------
:py:class:`alibi.exceptions.AlibiPredictorCallException`
If calling `predictor` fails at runtime.
:py:class:`alibi.exceptions.AlibiPredictorReturnTypeError`
If the return type of `predictor` is not `np.ndarray`.
"""
super().__init__(meta=copy.deepcopy(DEFAULT_META_ANCHOR))
np.random.seed(seed)
# TODO: this logic needs improvement. We should check against a fixed set of strings
# for built-ins instead of any `str`.
if isinstance(segmentation_fn, str) and segmentation_kwargs is None:
try:
segmentation_kwargs = DEFAULT_SEGMENTATION_KWARGS[segmentation_fn]
except KeyError:
logger.warning(
'DEFAULT_SEGMENTATION_KWARGS did not contain any entry'
'for segmentation method {}. No kwargs will be passed to'
'the segmentation function!'.format(segmentation_fn)
)
segmentation_kwargs = {}
elif callable(segmentation_fn) and segmentation_kwargs:
logger.warning(
'Specified both a segmentation function to create superpixels and '
'keyword arguments for built-in segmentation functions. By default '
'the specified segmentation function will be used.'
)
# set the predictor
self.image_shape = tuple(image_shape) # coerce lists
self.dtype = dtype
self.predictor = self._transform_predictor(predictor)
# segmentation function is either a user-defined function or one of the values in
fn_options = {'felzenszwalb': felzenszwalb, 'slic': slic, 'quickshift': quickshift}
if callable(segmentation_fn):
self.custom_segmentation = True
self.segmentation_fn = segmentation_fn
else:
self.custom_segmentation = False
self.segmentation_fn = partial(fn_options[segmentation_fn], **segmentation_kwargs) # type: ignore[arg-type]
self.images_background = images_background
# a superpixel is perturbed with prob 1 - p_sample
self.p_sample = 0.5 # type: float
# update metadata
self.meta['params'].update(
custom_segmentation=self.custom_segmentation,
segmentation_kwargs=segmentation_kwargs,
p_sample=self.p_sample,
seed=seed,
image_shape=self.image_shape,
images_background=self.images_background
)
if not self.custom_segmentation:
self.meta['params'].update(segmentation_fn=segmentation_fn)
else:
self.meta['params'].update(segmentation_fn='custom')
def generate_superpixels(self, image: np.ndarray) -> np.ndarray:
"""
Generates superpixels from (i.e., segments) an image.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A `[H, W]` array of integers. Each integer is a segment (superpixel) label.
"""
image_preproc = self._preprocess_img(image)
return self.segmentation_fn(image_preproc)
def _preprocess_img(self, image: np.ndarray) -> np.ndarray:
"""
Applies necessary transformations to the image prior to segmentation.
Parameters
----------
image
A grayscale or RGB image.
Returns
-------
A preprocessed image.
"""
# Grayscale images are repeated across channels
if not self.custom_segmentation and image.shape[-1] == 1:
image_preproc = np.repeat(image, 3, axis=2)
else:
image_preproc = image.copy()
return image_preproc
def explain(self, # type: ignore[override]
image: np.ndarray,
p_sample: float = 0.5,
threshold: float = 0.95,
delta: float = 0.1,
tau: float = 0.15,
batch_size: int = 100,
coverage_samples: int = 10000,
beam_size: int = 1,
stop_on_first: bool = False,
max_anchor_size: Optional[int] = None,
min_samples_start: int = 100,
n_covered_ex: int = 10,
binary_cache_size: int = 10000,
cache_margin: int = 1000,
verbose: bool = False,
verbose_every: int = 1,
**kwargs: Any) -> Explanation:
"""
Explain instance and return anchor with metadata.
Parameters
----------
image
Image to be explained.
p_sample
Probability for a pixel to be represented by the average value of its superpixel.
threshold
Minimum precision threshold.
delta
Used to compute `beta`.
tau
Margin between lower confidence bound and minimum precision of upper bound.
batch_size
Batch size used for sampling.
coverage_samples
Number of samples used to estimate coverage from during result search.
beam_size
The number of anchors extended at each step of new anchors construction.
stop_on_first
If ``True``, the beam search algorithm will return the first anchor that has satisfies the
probability constraint.
max_anchor_size
Maximum number of features in result.
min_samples_start
Min number of initial samples.
n_covered_ex
How many examples where anchors apply to store for each anchor sampled during search
(both examples where prediction on samples agrees/disagrees with `desired_label` are stored).
binary_cache_size
The result search pre-allocates `binary_cache_size` batches for storing the binary arrays
returned during sampling.
cache_margin
When only ``max(cache_margin, batch_size)`` positions in the binary cache remain empty, a new cache
of the same size is pre-allocated to continue buffering samples.
verbose
Display updates during the anchor search iterations.
verbose_every
Frequency of displayed iterations during anchor search process.
Returns
-------
explanation
`Explanation` object containing the anchor explaining the instance with additional metadata as attributes.
See usage at `AnchorImage examples`_ for details.
.. _AnchorImage examples:
https://docs.seldon.io/projects/alibi/en/stable/methods/Anchors.html
"""
# get params for storage in meta
params = locals()
remove = ['image', 'self']
for key in remove:
params.pop(key)
sampler = AnchorImageSampler(
predictor=self.predictor,
segmentation_fn=self.segmentation_fn,
custom_segmentation=self.custom_segmentation,
image=image,
images_background=self.images_background,
p_sample=p_sample,
n_covered_ex=n_covered_ex,
)
# get anchors and add metadata
mab = AnchorBaseBeam(
samplers=[sampler],
sample_cache_size=binary_cache_size,
cache_margin=cache_margin,
**kwargs)
result = mab.anchor_beam(
desired_confidence=threshold,
delta=delta,
epsilon=tau,
batch_size=batch_size,
coverage_samples=coverage_samples,
beam_size=beam_size,
stop_on_first=stop_on_first,
max_anchor_size=max_anchor_size,
min_samples_start=min_samples_start,
verbose=verbose,
verbose_every=verbose_every,
**kwargs,
) # type: Any
return self._build_explanation(
image, result, sampler.instance_label, params, sampler
)
def _build_explanation(
self,
image: np.ndarray,
result: dict,
predicted_label: int,
params: dict,
sampler: AnchorImageSampler,
) -> Explanation:
"""
Uses the metadata returned by the anchor search algorithm together with
the instance to be explained to build an explanation object.
Parameters
----------
image
Instance to be explained.
result
Dictionary containing the search anchor and metadata.
predicted_label
Label of the instance to be explained.
params
Parameters passed to `:py:meth:alibi.explainers.anchor_image.AnchorImage.explain`.
"""
result['instance'] = image
result['instances'] = np.expand_dims(image, 0)
result['prediction'] = np.array([predicted_label])
# overlay image with anchor mask
anchor = self.overlay_mask(image, sampler.segments, result['feature'])
exp = AnchorExplanation('image', result)
# output explanation dictionary
data = copy.deepcopy(DEFAULT_DATA_ANCHOR_IMG)
data.update(
anchor=anchor,
segments=sampler.segments,
precision=exp.precision(),
coverage=exp.coverage(),
raw=exp.exp_map
)
# create explanation object
explanation = Explanation(meta=copy.deepcopy(self.meta), data=data)
# params passed to explain
explanation.meta['params'].update(params)
return explanation
def overlay_mask(self, image: np.ndarray, segments: np.ndarray, mask_features: list,
scale: tuple = (0, 255)) -> np.ndarray:
"""
Overlay image with mask described by the mask features.
Parameters
----------
image
Image to be explained.
segments
Superpixels.
mask_features
List with superpixels present in mask.
scale
Pixel scale for masked image.
Returns
-------
masked_image
Image overlaid with mask.
"""
mask = np.zeros(segments.shape)
for f in mask_features:
mask[segments == f] = 1
image = scale_image(image, scale=scale)
masked_image = (image * np.expand_dims(mask, 2)).astype(int)
return masked_image
def _transform_predictor(self, predictor: Callable) -> Callable:
# check if predictor returns predicted class or prediction probabilities for each class
# if needed adjust predictor so it returns the predicted class
x = np.zeros((1,) + self.image_shape, dtype=self.dtype)
try:
prediction = predictor(x)
except Exception as e:
msg = f"Predictor failed to be called on {type(x)} of shape {x.shape} and dtype {x.dtype}. " \
f"Check that the parameter `image_shape` is correctly specified."
raise AlibiPredictorCallException(msg) from e
if not isinstance(prediction, np.ndarray):
msg = f"Excepted predictor return type to be {np.ndarray} but got {type(prediction)}."
raise AlibiPredictorReturnTypeError(msg)
if np.argmax(prediction.shape) == 0:
return predictor
else:
transformer = ArgmaxTransformer(predictor)
return transformer
def reset_predictor(self, predictor: Callable) -> None:
"""
Resets the predictor function.
Parameters
----------
predictor
New predictor function.
"""
self.predictor = self._transform_predictor(predictor)
|
import os
import sys
import time
import hashlib
import zlib
import random
import string
import subprocess as sb
import redis
import json
from collections import Counter
digestsize = 20
class RedisDataStore:
def __init__(self, loc, db=0):
self.conn = redis.StrictRedis(loc, db=db)
def post_experiment(self, jobhash, N, params):
"""
Sets (in order) the:
jobs:githashes
params:sources
experiments:times
then adds experiments to jobs:new
N: number of repeats requested
params: JSON param string
"""
r = self.conn
self.check_githash(jobhash)
if params.strip() == "" or params is None:
params = '{}'
# cleanedparams = yaml.dump(yaml.load(params)).strip()
print(params)
cleanedparams = json.dumps(json.loads(params)).strip()
cleanedparams = zlib.compress(cleanedparams)
paramhash = self.hash(cleanedparams)
exp = jobhash + '|' + paramhash
r.hset('params:sources', paramhash, cleanedparams)
r.hset('experiments:times', exp, r.time()[0])
r.lpush('jobs:new', *([exp]*N))
def check_githash(self, jobhash):
r = self.conn
if not os.path.exists('.git'):
return
githash = sb.check_output('git rev-parse HEAD'.split()).strip()
storedgithash = r.hget('jobs:githashes', jobhash)
if storedgithash is not None and githash != storedgithash:
print('ERROR: This jobfile has already been run ' +
'under a different version of the code.')
sys.exit(-1)
# githash = githash + ' + ' + storedgithash
r.hset('jobs:githashes', jobhash, githash)
def post_jobfile(self, source, desc):
"""
Posts job in jobs:sources
source: path to source or [partial] existing hash
desc: string description saved to jobs:descs
"""
r = self.conn
jobhash = self.get_jobhash(source)
if r.hexists('jobs:sources', jobhash):
print("WARNING: This jobfile has already been submitted.\n" +
"Modifying file and resubmitting.")
N = 12
rstr = "\n#" + ''.join(
random.choice(string.ascii_uppercase +
string.digits) for x in range(N))
if not os.path.exists(source):
print("ERROR: Cannot change source {} quiting.".format(source))
sys.exit(-1)
sb.check_call('echo "{}" >> {}'.format(rstr, source), shell=True)
jobhash = self.get_jobhash(source)
r.hset('jobs:sources', jobhash, self.get_jobfile_disk(source))
r.hset('jobs:descs', jobhash, desc)
r.hset('jobs:times', jobhash, r.time()[0])
print "Posted hash: %s" % jobhash[:8]
#if not os.path.exists('.exps'):
#os.makedirs('.exps')
#newfile = os.path.join('.exps', jobhash+'.py')
#if not os.path.exists(newfile):
#with open(newfile,'w') as fid:
#fid.write(zlib.decompress(self.get_jobfile(source)))
return jobhash
def describe_jobfile(self, source, desc):
""" Describes job in jobs:descs:<hash>
Needs r: redis object
source: path to source or [partial] existing hash
desc: short textual description.
"""
r = self.conn
jobhash = self.get_jobhash(source)
if r.hexists('jobs:descs', jobhash):
old_desc = r.hget('jobs:descs', jobhash)
if desc != old_desc:
print("Warning: This job already has description:")
cont = raw_input("Would you like to override? [y/n]: ")
if cont.upper().strip()[0] == 'Y':
print("Overwriting.")
else:
print("Exiting.")
sys.exit(0)
r.hset('jobs:descs', jobhash, desc)
def get_description(self, jobhash):
""" Gets job description in jobs:descs:<hash> """
return self.conn.hget('jobs:descs', jobhash)
def get_jobfile_disk(self, val):
""" Returns compressed source from file path"""
if os.path.exists(val):
with open(val,'r') as fid:
return zlib.compress(fid.read())
sys.exit('Could not find valid source that began with hash %s' % val)
def get_jobfile_db(self, val):
""" Returns compressed source from (partial) hash"""
r = self.conn
if len(val) == digestsize:
return r.hget('jobs:sources', val)
for h in r.hkeys('jobs:sources'):
if h.startswith(val):
return r.hget('jobs:sources', h)
sys.exit('Could not find valid source that began with hash %s' % val)
def get_jobhash(self, val):
""" Returns hash from file path or (partial) hash"""
if len(val) == digestsize and val.isalnum():
return val
if os.path.exists(val):
with open(val,'r') as fid:
return self.hash(fid.read())
r = self.conn
for h in r.hkeys('jobs:sources'):
if h.startswith(val):
return h
sys.exit('Could not find valid hash that began with hash %s' % val)
def get_params(self, phash):
""" Returns value of the parameter hash from params:sources """
return zlib.decompress(self.conn.hget('params:sources', phash))
def hash(self, data):
return hashlib.sha1(data).hexdigest()
def kill_workers(self):
r = self.conn
if r.zcard('workers:hb') == 0:
print 'No living clients to kill.'
sys.exit(0)
assert not r.exists('workers:stop')
r.set('workers:stop','ALL')
print('Waiting for all workers to stop...')
try:
num = r.zcard('workers:hb')
while num > 0:
print("...%d workers remaining." % num)
time.sleep(1)
num = r.zcard('workers:hb')
print("All workers stopped.")
except KeyboardInterrupt:
print("Stopping")
finally:
r.delete('workers:stop')
def job_status(self, argv):
r = self.conn
if len(argv) == 3:
verbose=True
else:
verbose=False
new = r.llen('jobs:new') or '0'
working = r.llen('jobs:working') or '0'
done = r.get('jobs:numdone') or '0'
failed = r.get('jobs:failed') or '0'
if not verbose:
print("\t%s jobs pending\n\t%s running\n\t%s completed\n\t%s failed"%
(new, working, done, failed))
else:
print("Pending jobs (%s):" % new)
joblist = r.lrange('jobs:new', 0, -1)
jobcounts = Counter(joblist)
for h,count in jobcounts.iteritems():
print('\t%4d: %s' % (count, h[:8]))
print("\nIn-progress jobs (%s):"% working)
joblist = r.lrange('jobs:working', 0, -1)
jobcounts = Counter(joblist)
for h,count in jobcounts.iteritems():
print('\t%4d: %s' % (count, h[:8]))
print("\nDone jobs (%s)" % done)
#keys = r.keys('jobs:done:*')
#for k in sorted(keys):
#print('\t%4s: %s' % (r.llen(k),k.split(':')[-1][:8]))
print("\nFailed jobs (%s)" % failed)
def worker_status(self, argv):
r = self.conn
clients = r.zrevrange('workers:hb', 0, -1)
num = len(clients)
if len(argv) == 3:
verbose=True
else:
verbose=False
if num == 0:
print('There are currently no clients alive.')
elif not verbose:
print("There are %d clients alive." % num)
else:
print("The %d clients alive are:" % num)
curr_time = r.time()
for x in clients:
cl = x #js.loads(zlib.decompress(x))
print '\t{0:<15} with hb {1:3.1f} seconds ago'\
.format(cl, curr_time[0] + (curr_time[1]*1e-6) - int(r.zscore('workers:hb',x)))
def select_jobfile(self, sel=None, fullhashes=False):
return self.select_jobfiles(sel, fullhashes)[0]
def select_jobfiles(self, sel=None, fullhashes=False):
r = self.conn
hashes = sorted(r.hkeys('jobs:sources'), key=lambda x: int(r.hget('jobs:times', x) or '0'))
if sel is None:
for i, d in enumerate(hashes):
desc = r.hget('jobs:descs', d) or ''
if fullhashes:
print "%4d. %s %s" % (i, d, desc)
else:
print "%4d. %s %s" % (i, d[:5], desc)
sel = raw_input("Choose a dataset or range of datasets or 'q' to exit: ")
sel = [x.strip() for x in sel.split('-')]
if len(sel) == 1:
if not sel[0].isdigit() or int(sel[0]) not in range(i+1):
sys.exit()
a = b = int(sel[0])
else:
a,b = int(sel[0]), int(sel[1])
else:
a,b = sel, sel
return [hashes[i] for i in range(a,b+1)]
def clean_jobfiles(self):
for res in self.select_jobfiles():
self.conn.hdel('jobs:descs', res)
self.conn.hdel('jobs:sources', res)
self.conn.hdel('jobs:times', res)
self.conn.hdel('jobs:githashes', res)
def gc(self):
r = self.conn
r.delete('jobs:failed')
r.delete('jobs:numdone')
clients = r.zrevrange('workers:hb', 0, -1)
num = len(clients)
if num == 0:
r.delete('jobs:working')
print("Done!")
def push_heartbeat(self, idstring):
self.conn.zadd('workers:hb', self.conn.time()[0], idstring)
def remove_heartbeat(self, idstring):
self.conn.zrem('workers:hb', idstring)
def query_stop(self, host):
cmd = self.conn.get('workers:stop')
if cmd == 'ALL' or cmd == host:
return True
else:
return False
def remove_working_job(self, exp):
self.conn.lrem('jobs:working', 1, exp)
def reload_working_job(self, exp):
self.conn.lrem('jobs:working', 1, exp)
if exp is not None:
self.conn.lpush('jobs:new', exp)
def poll_work(self):
return self.conn.rpoplpush('jobs:new', 'jobs:working')
def job_fail(self):
self.conn.incr('jobs:failed')
def job_succeed(self):
self.conn.incr('jobs:numdone')
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture-of-experts code.
Interfaces and algorithms are under development and subject to rapid change
without notice.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import tensorflow as tf
def transformer_moe_layer_v1(inputs, output_dim, hparams, train,
master_dtype=tf.bfloat16,
slice_dtype=tf.float32):
"""Local mixture of experts that works well on TPU.
Adapted from the paper https://arxiv.org/abs/1701.06538
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_num_experts: number of experts
hparams.moe_hidden_size: size of hidden layer in each expert
hparams.moe_group_size: size of each "group" for gating purposes
hparams.moe_capacity_factor_train: a float
hparams.moe_capacity_factor_eval: a float
hparams.moe_gating: a string
+ all hyperparmeters used by _top_2_gating()
The number of parameters in the gating network is:
(input_dim.size * hparams.num_experts) +
The number of parameters in the experts themselves is:
(hparams.num_experts
* (input_dim.size + output_dim.size)
* hparams.moe_hidden_size)
The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting
of the representations of all positions in a batch of sequences.
Each position of each sequence is sent to 0-2 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
but due to our hacked-up gather-by-matmul implementation, we need to divide
the batch into "groups". For each group, the same number of elements
are sent to each expert.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves.
Args:
inputs: a mtf.Tensor with shape [<batch_dims...>, length_dim, input_dim]
output_dim: a mtf.Dimension (for Transformer, this is input_dim)
hparams: model hyperparameters
train: a boolean
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
Returns:
outputs: a Tensor with shape [<batch_dims...>, length_dim, output_dim]
loss: a mtf scalar
Raises:
ValueError: on unrecognized hparams.moe_gating
"""
orig_inputs = inputs
input_dim = inputs.shape.dims[-1]
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
experts_dim = mtf.Dimension("experts", hparams.moe_num_experts)
group_size_dim = mtf.Dimension("group", hparams.moe_group_size)
batch_dim = mtf.Dimension(
orig_inputs.shape[0].name,
orig_inputs.shape.size // (group_size_dim.size * input_dim.size))
inputs = mtf.reshape(inputs, [batch_dim, group_size_dim, input_dim])
# Each sequence sends expert_capacity positions to each expert.
capacity_factor = (
hparams.moe_capacity_factor_train if train else
hparams.moe_capacity_factor_eval)
expert_capacity = min(
group_size_dim.size,
int((group_size_dim.size * capacity_factor) / experts_dim.size))
expert_capacity_dim = mtf.Dimension("expert_capacity", expert_capacity)
experts_dim_unsplit = mtf.Dimension("expert_unsplit", experts_dim.size)
batch_dim_unsplit = mtf.Dimension("batch_unsplit", batch_dim.size)
if hparams.moe_gating == "top_2":
dispatch_tensor, combine_tensor, loss = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=experts_dim_unsplit,
expert_capacity_dim=expert_capacity_dim,
hparams=hparams,
train=train)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# put num_experts dimension first to make split easier in alltoall
expert_inputs = mtf.einsum([inputs, dispatch_tensor], mtf.Shape(
[experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))
expert_inputs = mtf.reshape(expert_inputs, mtf.Shape(
[experts_dim, batch_dim_unsplit, expert_capacity_dim, input_dim]))
# Now feed the expert inputs through the experts.
h = mtf.layers.dense(
expert_inputs, hidden_dim, expert_dims=[experts_dim],
activation=mtf.relu, use_bias=False, master_dtype=master_dtype,
slice_dtype=slice_dtype, name="x0")
expert_output = mtf.layers.dense(
h, output_dim, expert_dims=[experts_dim], use_bias=False,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="x1")
expert_output = mtf.reshape(expert_output, mtf.Shape(
[experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))
output = mtf.einsum([expert_output, combine_tensor], mtf.Shape(
[batch_dim, group_size_dim, output_dim]))
output = mtf.reshape(output, orig_inputs.shape.dims[:-1] + [output_dim])
return output, loss * hparams.moe_loss_coef
def transformer_moe_layer_v2(inputs, output_dim, hparams, train,
master_dtype=tf.bfloat16, slice_dtype=tf.float32):
"""2-level mixture of experts.
Adapted from the paper https://arxiv.org/abs/1701.06538
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_num_experts: number of experts
hparams.moe_hidden_size: size of hidden layer in each expert
hparams.moe_group_size: size of each "group" for gating purposes
hparams.moe_capacity_factor_train: a float
hparams.moe_capacity_factor_eval: a float
hparams.moe_capacity_factor_second_level: a float
hparams.moe_gating: a string
+ all hyperparmeters used by _top_2_gating()
One set of params for experts in first level and different of hparams
per expert in the second level.
The number of parameters in the gating network is:
(input_dim.size * (hparams.num_experts) +
(moe_hidden_size * hparams.num_experts) * hparams.num_experts
The number of parameters in the experts themselves is:
(hparams.num_experts
* (input_dim.size + output_dim.size)
* hparams.moe_hidden_size)
The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting
of the representations of all positions in a batch of sequences.
Each position of each sequence is sent to 0-3 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
but due to our hacked-up gather-by-matmul implementation, we need to divide
the batch into "groups". For each group, the same number of elements
are sent to each expert.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves.
Dimensions cheat sheet:
a, b: batch size
l: original sequence length
m: input depth
n: output depth
g, h: number of groups
s, t: group size
x, y: number of experts
c, d: expert capacity
input: [a0, b1, l, m]
input: [a0, g1, s, m]
dispatch_tensor_x: [a0, g1, s, x, c]
expert_input: [a0, g1, x, c, m]
alltoall: [a0, g, x1, c, m]
alltoall: [a0, g, x1, c, m]
transpose: [x1, a0, g, c, m]
reshape: [x1, h0, s, m]
assignment2: [x1, h0, t, y, d]
expert_input2: [x1, h0, y, d, m]
alltoall: [x1, h, y0, d, m]
...
reverse of that
gating params 0: [m, x]
gating params 1: [x1, m, y]
expert params:
[x1, y0, m, hidden]
[x1, y0, hidden, n]
Args:
inputs: a mtf.Tensor with shape [a, b, l, m]
output_dim: a mtf.Dimension (for Transformer, this is input_dim)
hparams: model hyperparameters
train: a boolean
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
Returns:
outputs: a Tensor with shape [a, b, l, n]
loss: a mtf scalar
Raises:
ValueError: on unrecognized hparams.moe_gating
"""
insert_outer_batch_dim = (len(inputs.shape.dims) == 3)
if insert_outer_batch_dim:
inputs = mtf.reshape(
inputs, [mtf.Dimension("outer_batch", 1)] + inputs.shape.dims)
assert len(hparams.moe_num_experts) == 2
a0, b1, l, m = inputs.shape.dims
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
x1 = mtf.Dimension("expert_x", hparams.moe_num_experts[0])
y0 = mtf.Dimension("expert_y", hparams.moe_num_experts[1])
x = mtf.Dimension("expert_x_unsplit", hparams.moe_num_experts[0])
y = mtf.Dimension("expert_y_unsplit", hparams.moe_num_experts[1])
n = output_dim
# We "cheat" here and look at the mesh shape and layout. This is to ensure
# that the number of groups (g.size) is a multiple of the mesh dimension
# over which those groups are split.
num_groups, group_size = _split_into_groups(
b1.size * l.size, hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, b1))
g1 = mtf.Dimension(b1.name, num_groups)
g = mtf.Dimension(b1.name + "_unsplit", g1.size)
s = mtf.Dimension("group_size_x", group_size)
# Each sequence sends (at most?) expert_capacity positions to each expert.
# Static expert_capacity dimension is needed for expert batch sizes
capacity_factor = (
hparams.moe_capacity_factor_train if train else
hparams.moe_capacity_factor_eval)
expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size))
expert_capacity = max(expert_capacity, 4)
c = mtf.Dimension("expert_capacity_x", expert_capacity)
# We "cheat" here and look at the mesh shape and layout. This is to ensure
# that the number of groups (h.size) is a multiple of the mesh dimension
# over which those groups are split.
num_groups, group_size = _split_into_groups(
a0.size * g.size * c.size,
hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, a0))
t = mtf.Dimension("group_size_y", group_size)
h0 = mtf.Dimension(a0.name, num_groups)
h = mtf.Dimension(a0.name + "_unsplit", h0.size)
expert_capacity = min(
t.size,
int((t.size * hparams.moe_capacity_factor_second_level) / y.size))
expert_capacity = max(expert_capacity, 4)
d = mtf.Dimension("expert_capacity_y", expert_capacity)
# First level of expert routing
# Reshape the inner batch size to a multiple of group_dim g1 and
# group_size_dim s.
inputs = mtf.reshape(inputs, [a0, g1, s, m])
# Get the assignments for the first level.
# dispatch_tensor_x has shape [a0, g1, s, x, c]
if hparams.moe_gating == "top_2":
dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=x,
expert_capacity_dim=c,
hparams=hparams,
train=train)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# Now create expert_inputs based on the assignments.
# put num_experts dimension first to make split easier in alltoall
expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m])
# we construct an "importance" Tensor for the inputs to the second-level
# gating. The importance of an input is 1.0 if it represents the
# first-choice expert-group and 0.5 if it represents the second-choice expert
# group. This is used by the second-level gating.
importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c])
importance = 0.5 * (
mtf.to_float(mtf.greater(importance, 0.5)) +
mtf.to_float(mtf.greater(importance, 0.0)))
# First level, all to all. Here we change the split dimension from g1 to x1.
expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape(
[x1, a0, g, c, m]))
importance = mtf.reshape(importance, [x1, a0, g, c])
# Second level of expert routing
# Reshape the expert_inputs outer batch dim to be a multiple of group_dim h0
# and group_size_dim t.
inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m])
importance = mtf.reshape(importance, [x1, h0, t])
# Get the assignments for the second level.
# dispatch_tensor_y has shape [x1, h0, t, y, d]
if hparams.moe_gating == "top_2":
dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating(
inputs=inputs_y,
outer_expert_dims=[x1],
experts_dim=y,
expert_capacity_dim=d,
hparams=hparams,
train=train,
importance=importance)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# Now create expert_inputs based on the assignments.
# put num_experts dimension first to make split easier in alltoall
expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m])
# Second level, all to all. Here we change the split dimension from h0 to y0.
expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape(
[y0, x1, h, d, m]))
hidden_output = mtf.layers.dense(
expert_inputs_y, hidden_dim, expert_dims=[y0, x1],
activation=mtf.relu, use_bias=False, master_dtype=master_dtype,
slice_dtype=slice_dtype, name="expert0")
expert_output = mtf.layers.dense(
hidden_output, output_dim, expert_dims=[y0, x1],
use_bias=False, master_dtype=master_dtype, slice_dtype=slice_dtype,
name="expert1")
# NOW COMBINE EXPERT OUTPUTS (reversing everything we have done)
# expert_output has shape [y0, x1, h, d, n]
# alltoall
expert_output = mtf.reshape(expert_output, mtf.Shape(
[y, x1, h0, d, n]))
# combine results from inner level
output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n])
# Reshape the combined tensor from inner level to now contain outer_batch_dim
# a0 and group_dim g
output = mtf.reshape(output_y, [x1, a0, g, c, n])
# alltoall from expert_dim x to group_dim g1
expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n]))
# combine results from outer level
output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n])
# Reshape the combined tensor to now contain inner_batch_dim
# b1 and the original sequence length
output = mtf.reshape(output_x, [a0, b1, l, n])
if insert_outer_batch_dim:
output = mtf.reshape(output, [b1, l, n])
return output, (loss_outer + loss_inner) * hparams.moe_loss_coef
def _top_2_gating(
inputs, outer_expert_dims, experts_dim, expert_capacity_dim,
hparams, train, importance=None):
"""Compute gating for mixture-of-experts in TensorFlow.
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_use_second_place_loss: a boolean
hparams.moe_second_policy_train: a string
hparams.moe_second_policy_eval: a string
hparams.moe_second_threshold: a float
The returned forward assignment is a tensor used to map (via einsum) from the
inputs to the expert_inputs. Likewise, the returned combine_tensor is
used to map (via einsum) from the expert outputs to the outputs. Both the
forward and backward assignments are mostly zeros. The shapes of the tensors
are as follows.
inputs: [<batch_dims>, group_size_dim, input_dim]
importance: [<batch_dims>, group_size_dim]
dispatch_tensor:
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
expert_inputs:
[<batch_dims>, experts_dim, expert_capacity_dim, input_dim]
expert_outputs: [<batch_dims>, experts_dim, expert_capacity_dim, output_dim]
combine_tensor:
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
outputs: [<batch_dims>, group_size_dim, output_dim]
"importance" is an optional tensor with one floating-point value for each
input vector. If the importance of an input is 1.0, then we send it to
up to 2 experts. If 0.0 < importance < 1.0, then we send it to at most
one expert. If importance == 0.0, then we send it to no experts.
We use "importance" at the second-level gating function of a hierarchical
mixture of experts. Inputs to the first-choice expert-group get importance
1.0. Inputs to the second-choice expert group get importance 0.5.
Inputs that represent padding get importance 0.0.
Args:
inputs: a mtf.Tensor with shape [<batch_dims>, group_size_dim, input_dim]
outer_expert_dims: an optional list of dimensions. This is for the case
where we are at an inner level of a hierarchical MoE.
experts_dim: a Dimension (the number of experts)
expert_capacity_dim: a Dimension (number of examples per group per expert)
hparams: model hyperparameters.
train: a boolean
importance: an optional tensor with shape [<batch_dims>, group_size_dim]
Returns:
dispatch_tensor: a Tensor with shape
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
combine_tensor: a Tensor with shape
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
loss: a mtf scalar
Raises:
ValueError: on illegal hyperparameters
"""
group_size_dim, unused_input_dim = inputs.shape.dims[-2:]
raw_gates = mtf.softmax(mtf.layers.dense(
inputs, experts_dim, use_bias=False,
expert_dims=outer_expert_dims), experts_dim)
# The internals of this function run in float32.
# bfloat16 seems to reduce quality.
raw_gates = mtf.to_float(raw_gates)
expert_capacity_f = float(expert_capacity_dim.size)
# FIND TOP 2 EXPERTS PER POSITON
# Find the top expert for each position. shape=[batch, group]
index_1, gate_1 = mtf.top_1(raw_gates, experts_dim)
# [batch, group, experts]
mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype)
density_1_proxy = raw_gates
if importance is not None:
mask_1 *= mtf.to_float(mtf.equal(importance, 1.0))
gate_1 *= mtf.to_float(mtf.equal(importance, 1.0))
density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0))
gates_without_top_1 = raw_gates * (1.0 - mask_1)
# [batch, group]
index_2, gate_2 = mtf.top_1(gates_without_top_1, experts_dim)
# [batch, group, experts]
mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype)
if importance is not None:
mask_2 *= mtf.to_float(mtf.greater(importance, 0.0))
denom = gate_1 + gate_2 + 1e-9
gate_1 /= denom
gate_2 /= denom
# BALANCING LOSSES
# shape = [batch, experts]
# We want to equalize the fraction of the batch assigned to each expert
density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim)
# Something continuous that is correlated with what we want to equalize.
density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim)
density_1 = mtf.Print(
density_1, [mtf.reduce_mean(density_1, output_shape=[experts_dim])],
"density_1", summarize=1000)
loss = (mtf.reduce_mean(density_1_proxy * density_1)
* float(experts_dim.size * experts_dim.size))
if hparams.moe_use_second_place_loss:
# Also add a loss to encourage all experts to be used equally also as the
# second-place expert. Experimentally, this seems to be a wash.
# We want to equalize the fraction of the batch assigned to each expert:
density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim)
# As a proxy for density_2, we renormalize the raw gates after the top one
# has been removed.
normalized = gates_without_top_1 / (
mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9)
density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim)
loss_2 = (mtf.reduce_mean(density_2_proxy * density_2)
* float(experts_dim.size * experts_dim.size))
loss += loss_2 * 0.5
# Depending on the policy in the hparams, we may drop out some of the
# second-place experts.
policy = (
hparams.moe_second_policy_train if train else
hparams.moe_second_policy_eval)
threshold = (
hparams.moe_second_threshold_train if train else
hparams.moe_second_threshold_eval)
if policy == "all":
# Use second-place experts for all examples.
pass
elif policy == "none":
# Never use second-place experts for all examples.
mask_2 = mtf.zeros_like(mask_2)
elif policy == "threshold":
# Use second-place experts if gate_2 > threshold.
mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold))
elif policy == "random":
# Use second-place experts with probablity min(1.0, gate_2 / threshold).
mask_2 *= mtf.to_float(
mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape),
gate_2 / max(threshold, 1e-9)))
else:
raise ValueError("Unknown policy %s" % policy)
mask_2 = mtf.Print(
mask_2, [mtf.reduce_mean(mask_2, output_shape=[experts_dim])],
"density_2", summarize=1000)
# COMPUTE ASSIGNMENT TO EXPERTS
# [batch, group, experts]
# This is the position within the expert's mini-batch for this sequence
position_in_expert_1 = mtf.cumsum(
mask_1, group_size_dim, exclusive=True) * mask_1
# Remove the elements that don't fit. [batch, group, experts]
mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f))
# [batch, experts]
# How many examples in this sequence go to this expert
mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim)
# [batch, group] - mostly ones, but zeros where something didn't fit
mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim)
# [batch, group]
position_in_expert_1 = mtf.reduce_sum(
position_in_expert_1, reduced_dim=experts_dim)
# Weight assigned to first expert. [batch, group]
gate_1 *= mask_1_flat
# [batch, group, experts]
position_in_expert_2 = (
mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count)
position_in_expert_2 *= mask_2
mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f))
# mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
gate_2 *= mask_2_flat
position_in_expert_2 = mtf.reduce_sum(
position_in_expert_2, reduced_dim=experts_dim)
# [batch, group, experts, expert_capacity]
combine_tensor = (
gate_1 * mask_1_flat
* mtf.one_hot(index_1, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) +
gate_2 * mask_2_flat
* mtf.one_hot(index_2, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim))
combine_tensor = mtf.cast(combine_tensor, inputs.dtype)
loss = mtf.cast(loss, inputs.dtype)
dispatch_tensor = mtf.cast(
mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)
return dispatch_tensor, combine_tensor, loss
def set_default_moe_hparams(hparams):
"""Add necessary hyperparameters for mixture-of-experts."""
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-2
hparams.add_hparam("moe_gating", "top_2")
# Experts have fixed capacity per batch. We need some extra capacity
# in case gating is not perfectly balanced.
# moe_capacity_factor_* should be set to a value >=1.
hparams.add_hparam("moe_capacity_factor_train", 1.25)
hparams.add_hparam("moe_capacity_factor_eval", 2.0)
hparams.add_hparam("moe_capacity_factor_second_level", 1.0)
# Each expert has a hidden layer with this size.
hparams.add_hparam("moe_hidden_size", 4096)
# For gating, divide inputs into groups of this size before gating.
# Each group sends the same number of inputs to each expert.
# Ideally, the group size would be the whole batch, but this is expensive
# due to our use of matrix multiplication for reordering.
hparams.add_hparam("moe_group_size", 1024)
# For top_2 gating, whether to impose an additional loss in order to make
# the experts equally used as the second-place expert.
hparams.add_hparam("moe_use_second_place_loss", 0)
# In top_2 gating, policy for whether to use a second-place expert.
# Legal values are:
# "all": always
# "none": never
# "threshold": if gate value > the given threshold
# "random": if gate value > threshold*random_uniform(0,1)
hparams.add_hparam("moe_second_policy_train", "random")
hparams.add_hparam("moe_second_policy_eval", "random")
hparams.add_hparam("moe_second_threshold_train", 0.2)
hparams.add_hparam("moe_second_threshold_eval", 0.2)
def _split_into_groups(n, max_group_size, mesh_dim_size):
"""Helper function for figuring out how to split a dimensino into groups.
We have a dimension with size n and we want to split it into
two dimensions: n = num_groups * group_size
group_size should be the largest possible value meeting the constraints:
group_size <= max_group_size
(num_groups = n/group_size) is a multiple of mesh_dim_size
Args:
n: an integer
max_group_size: an integer
mesh_dim_size: an integer
Returns:
num_groups: an integer
group_size: an integer
Raises:
ValueError: if n is not a multiple of mesh_dim_size
"""
if n % mesh_dim_size != 0:
raise ValueError(
"n=%d is not a multiple of mesh_dim_size=%d" % (n, mesh_dim_size))
num_groups = max(1, n // max_group_size)
while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):
num_groups += 1
group_size = n // num_groups
tf.logging.info(
"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)"
" = (num_groups=%d group_size=%d)" %
(n, max_group_size, mesh_dim_size, num_groups, group_size))
return num_groups, group_size
|
import random
from sklearn.datasets import fetch_mldata
from util import open_file_in_directory
MNIST_DIR = './tmp/mnist'
MNIST_TRAIN_DIR = './mnist/train'
MNIST_TEST_DIR = './mnist/test'
MNIST_SAMPLE_DIR = './mnist/sample'
TEST_CASES = 60000
def mnist_img_to_file(mnist_img, file):
for x in range(28):
for y in range(28):
file.write(str(mnist_img[x * 28 + y]) + " ")
file.write('\n')
def generate_samples(data, labels, directory='.', filename='results.txt', sampleNumber=100):
result = open_file_in_directory(directory, filename)
for i in range(sampleNumber):
index = random.randrange(data.shape[0])
label = labels[index]
img = data[index]
img_filename = str(index) + ".txt"
line = img_filename + ' ' + str(label) + '\n'
result.write(line)
file = open_file_in_directory(directory, img_filename)
mnist_img_to_file(img, file)
file.close()
result.close()
def generate_test_file(data, labels, directory='.', filename='results.txt'):
result = open_file_in_directory(directory, filename)
result.write(str(data.shape[0]) + '\n')
indexes = [i for i in range(data.shape[0])]
random.shuffle(indexes)
for i in indexes:
label = labels[i]
img = data[i]
line = str(label) + '\n'
result.write(line)
mnist_img_to_file(img, result)
result.close()
def generate_test_data(data, labels):
test_data = data[TEST_CASES:]
test_labels = labels[TEST_CASES:]
generate_test_file(test_data, test_labels, MNIST_TEST_DIR)
def generate_train_data(data, labels):
train_data = data[:TEST_CASES]
train_labels = labels[:TEST_CASES]
generate_test_file(train_data, train_labels, MNIST_TRAIN_DIR)
def main():
mnist = fetch_mldata('MNIST original', data_home=MNIST_DIR)
labels = mnist.target.astype(int)
data = mnist.data
generate_train_data(data, labels)
generate_test_data(data, labels)
generate_samples(data, labels, MNIST_SAMPLE_DIR)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Abstract Transport
"""
import typing
import abc
from apt.transport.directorylisting import DirectoryListing
class Transport:
"""
Abstract class for retrieving information from repos
The functions 'exists' and 'open_read' are required to be implemented.
"""
@abc.abstractmethod
def exists(self, uri: str) -> bool:
"""
Returns whether a given uri exists.
:param str uri:
:return bool:
:raises URIMismatchError:
"""
@abc.abstractmethod
def open_read(self, uri: str) -> typing.IO:
"""
Opens a file as an IO-like for reading
:param string uri:
:return IO:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
@abc.abstractmethod
def open_write(self, uri: str) -> typing.IO:
"""
Opens a file as an IO-like for writing
This function is required to handle the operation of creating directories
if the underlying data store has such a concept.
:param string uri:
:return:
:raises NotImplementedError:
:raises URIMismatchError:
"""
@abc.abstractmethod
def list_directory(self, uri: str) -> DirectoryListing:
"""
Returns a list of files and directories in a directory
:param string uri:
:return List[str]:
:raises NotImplementedError:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
|
"""
CPDParser parses the ConsensusPathDB_human_PPI data file and yields
a generated dictionary of values.
Source Project: biothings.interactions
Author: Greg Taylor: greg.k.taylor@gmail.com
"""
import hashlib
import re
from hub.dataload.BiointeractParser import BiointeractParser
class CPDParser(BiointeractParser):
# Static Constants
EMPTY_FIELD = 'NA'
SEPARATOR = ','
HUMAN = '_HUMAN'
@staticmethod
def parse_interaction_participants(entry):
"""
Parse all interaction participants given as string from the tsv file.
The resulting participant identifier strings will be returned with a
trailing '_HUMAN' removed at the end.
:param entry: a string representing the list
:return: list of strings
"""
vals = CPDParser.parse_list(entry, CPDParser.SEPARATOR)
return list(map((lambda x: x.replace(CPDParser.HUMAN, '')), vals)) if vals else None
@staticmethod
def parse_interaction_publications(entry):
"""
Parse all interaction publications given as a string from the tsv file.
The resulting publication identifier strings will be converted to a
list of integers representing pubmed identifiers.
:param entry: a string representing the list
:return: list of integers
"""
vals = CPDParser.parse_list(entry, CPDParser.SEPARATOR)
return list(map(CPDParser.safe_int, vals)) if vals else None
@staticmethod
def parse_source_databases(entry):
"""
Parse all source databases given as a string from the tsv file.
:param entry: a string representing the list
:return: list of strings
"""
return CPDParser.parse_list(entry, CPDParser.SEPARATOR)
@staticmethod
def parse_cpd_tsv_line(line_dict):
"""
Parse a dictionary representing a tsv line with a key, value pair for
each column in the tsv file.
:param line_dict: a tsv line dictionary
:return: a dictionary representing a parsed biogrid record
"""
# Replace all empty fields with None
r = {k: v if v != CPDParser.EMPTY_FIELD else None for k, v in line_dict.items()}
r['interaction_confidence'] = CPDParser.safe_float(r['interaction_confidence'])
r['interaction_participants'] = CPDParser.parse_interaction_participants(r['interaction_participants'])
r['interaction_publications'] = CPDParser.parse_interaction_publications(r['interaction_publications'])
r['source_databases'] = CPDParser.parse_source_databases(r['source_databases'])
# Readjust for biothings.api record format
new_record = dict()
new_record['cpd'] = r
new_record['_id'] = CPDParser.compute_id(r['interaction_participants'])
# Sweep all empty values
new_record = CPDParser.sweep_record(new_record)
return new_record
@staticmethod
def parse_cpd_tsv_file(f):
"""
Parse a tab-separated biogrid file opened in binary mode.
:param f: file opened for reading in binary mode
:return: yields a generator of parsed objects
"""
for (i, line) in enumerate(f):
line = line.strip('\n')
# The first commented line is the database description
# The second commented line contains the column headers
if i == 1:
line = line.replace("# ", '') # Delete the comment prefix
header_dict = dict(enumerate(line.split('\t')))
print(header_dict)
# All subsequent lines contain row data
elif i > 1:
_r = {}
for (pos, val) in enumerate(line.split('\t')):
_r[header_dict[pos]] = val
yield CPDParser.parse_cpd_tsv_line(_r)
@staticmethod
def compute_id(participate_lst):
"""
Calculate an id field given a list of participants (which are gene symbols).
:param participate_lst:
:return:
"""
symbols = '-'.join(participate_lst)
hash_object = hashlib.md5(symbols.encode('utf-8'))
symbol_hash = hash_object.hexdigest()
return 'symbol:{}'.format(symbol_hash)
|
import pytest
from distributed_asgi import create_path_distributor
def test_path_distributor():
dist = create_path_distributor(routes={
"/api/([a-z-]+)": r"\1"
})
for path, expected_key in [
("/api/banana", "banana"),
("/banana", None),
()
]:
instance = dist({"path":path})
assert instance.key == expected_key
|
import ast
import importlib
import logging
import os
import sys
from typing import Dict, Any # noqa: F401
from flask import Flask, Blueprint
from flask_restful import Api
from metadata_service.api.column import ColumnDescriptionAPI
from metadata_service.api.healthcheck import healthcheck
from metadata_service.api.popular_tables import PopularTablesAPI
from metadata_service.api.system import Neo4jDetailAPI
from metadata_service.api.table \
import TableDetailAPI, TableOwnerAPI, TableTagAPI, TableDescriptionAPI
from metadata_service.api.tag import TagAPI
from metadata_service.api.user import UserDetailAPI, UserFollowAPI, UserOwnAPI, UserReadAPI
# For customized flask use below arguments to override.
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
def create_app(*, config_module_class: str) -> Flask:
"""
Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config (TODO: Implement config.py)
:return: Flask
"""
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print('Using requested Flask module {module_name} and class {class_name}'
.format(module_name=FLASK_APP_MODULE_NAME, class_name=FLASK_APP_CLASS_NAME), file=sys.stderr)
class_obj = getattr(importlib.import_module(FLASK_APP_MODULE_NAME), FLASK_APP_CLASS_NAME)
flask_kwargs_dict = {} # type: Dict[str, Any]
if FLASK_APP_KWARGS_DICT_STR:
print('Using kwargs {kwargs} to instantiate Flask'.format(kwargs=FLASK_APP_KWARGS_DICT_STR),
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
config_module_class = \
os.getenv('METADATA_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Created app with config name {}'.format(config_module_class))
logging.info('Using backend {}'.format(app.config.get('PROXY_CLIENT')))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
api.add_resource(PopularTablesAPI, '/popular_tables/')
api.add_resource(TableDetailAPI, '/table/<path:table_uri>')
api.add_resource(TableDescriptionAPI,
'/table/<path:table_uri>/description',
'/table/<path:table_uri>/description/<path:description_val>')
api.add_resource(TableTagAPI,
'/table/<path:table_uri>/tag',
'/table/<path:table_uri>/tag/<tag>')
api.add_resource(TableOwnerAPI,
'/table/<path:table_uri>/owner/<owner>')
api.add_resource(ColumnDescriptionAPI,
'/table/<path:table_uri>/column/<column_name>/description',
'/table/<path:table_uri>/column/<column_name>/description/<path:description_val>')
api.add_resource(Neo4jDetailAPI,
'/latest_updated_ts')
api.add_resource(TagAPI,
'/tags/')
api.add_resource(UserDetailAPI,
'/user/<path:user_id>')
api.add_resource(UserFollowAPI,
'/user/<path:user_id>/follow/',
'/user/<path:user_id>/follow/<resource_type>/<path:table_uri>')
api.add_resource(UserOwnAPI,
'/user/<path:user_id>/own/',
'/user/<path:user_id>/own/<resource_type>/<path:table_uri>')
api.add_resource(UserReadAPI,
'/user/<path:user_id>/read/',
'/user/<path:user_id>/read/<resource_type>/<path:table_uri>')
app.register_blueprint(api_bp)
return app
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2008-2018 (ita)
"""
MacOSX related tools
"""
import os, shutil, platform
from waflib import Task, Utils
from waflib.TaskGen import taskgen_method, feature, after_method, before_method
app_info = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>{app_name}</string>
</dict>
</plist>
'''
"""
plist template
"""
@feature('c', 'cxx')
def set_macosx_deployment_target(self):
"""
see WAF issue 285 and also and also http://trac.macports.org/ticket/17059
"""
if self.env.MACOSX_DEPLOYMENT_TARGET:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET
elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
if Utils.unversioned_sys_platform() == 'darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self, name, out):
"""
Creates bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp`
"""
dir = out.parent.find_or_declare(name)
dir.mkdir()
macos = dir.find_or_declare(['Contents', 'MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name = out.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + '.app'
else:
name = name + '.app'
return name
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
"""
To compile an executable into a Mac application (a .app), set its *mac_app* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_app=True)
To force *all* executables to be transformed into Mac applications::
def build(bld):
bld.env.MACAPP = True
bld.shlib(source='a.c', target='foo')
"""
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
self.apptask = self.create_task('macapp', self.link_task.outputs, n1)
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name
self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755)
if getattr(self, 'mac_files', None):
# this only accepts files; they will be installed as seen from mac_files_root
mac_files_root = getattr(self, 'mac_files_root', None)
if isinstance(mac_files_root, str):
mac_files_root = self.path.find_node(mac_files_root)
if not mac_files_root:
self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root)
res_dir = n1.parent.parent.make_node('Resources')
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name
for node in self.to_nodes(self.mac_files):
relpath = node.path_from(mac_files_root or node.parent)
self.create_task('macapp', node, res_dir.make_node(relpath))
self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node)
if getattr(self.bld, 'is_install', None):
# disable regular binary installation
self.install_task.hasrun = Task.SKIP_ME
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
"""
Creates a :py:class:`waflib.Tools.c_osx.macplist` instance.
"""
if self.env.MACAPP or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'Info.plist'])
self.plisttask = plisttask = self.create_task('macplist', [], n1)
plisttask.context = {
'app_name': self.link_task.outputs[0].name,
'env': self.env
}
plist_ctx = getattr(self, 'plist_context', None)
if (plist_ctx):
plisttask.context.update(plist_ctx)
if getattr(self, 'mac_plist', False):
node = self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code = self.mac_plist
else:
plisttask.code = app_info
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name
self.add_install_files(install_to=inst_to, install_from=n1)
@feature('cshlib', 'cxxshlib')
@before_method('apply_link', 'propagate_uselib_vars')
def apply_bundle(self):
"""
To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_bundle = True)
To force *all* executables to be transformed into bundles::
def build(bld):
bld.env.MACBUNDLE = True
bld.shlib(source='a.c', target='foo')
"""
if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False):
self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = [] # disable the '-dynamiclib' flag
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN
use = self.use = self.to_list(getattr(self, 'use', []))
if not 'MACBUNDLE' in use:
use.append('MACBUNDLE')
app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
class macapp(Task.Task):
"""
Creates mac applications
"""
color = 'PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath())
class macplist(Task.Task):
"""
Creates plist files
"""
color = 'PINK'
ext_in = ['.bin']
def run(self):
if getattr(self, 'code', None):
txt = self.code
else:
txt = self.inputs[0].read()
context = getattr(self, 'context', {})
txt = txt.format(**context)
self.outputs[0].write(txt)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from unittest.mock import Mock
import superset.connectors.druid.models as models
from superset.connectors.druid.models import DruidColumn, DruidDatasource, DruidMetric
from superset.exceptions import SupersetException
from .base_tests import SupersetTestCase
try:
from pydruid.utils.dimensions import (
MapLookupExtraction,
RegexExtraction,
RegisteredLookupExtraction,
)
import pydruid.utils.postaggregator as postaggs
except ImportError:
pass
def mock_metric(metric_name, is_postagg=False):
metric = Mock()
metric.metric_name = metric_name
metric.metric_type = "postagg" if is_postagg else "metric"
return metric
def emplace(metrics_dict, metric_name, is_postagg=False):
metrics_dict[metric_name] = mock_metric(metric_name, is_postagg)
# Unit tests that can be run without initializing base tests
class DruidFuncTestCase(SupersetTestCase):
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_map(self):
filters = [{"col": "deviceName", "val": ["iPhone X"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "device",
"outputName": "deviceName",
"outputType": "STRING",
"extractionFn": {
"type": "lookup",
"dimension": "dimensionName",
"outputName": "dimensionOutputName",
"replaceMissingValueWith": "missing_value",
"retainMissingValue": False,
"lookup": {
"type": "map",
"map": {
"iPhone10,1": "iPhone 8",
"iPhone10,4": "iPhone 8",
"iPhone10,2": "iPhone 8 Plus",
"iPhone10,5": "iPhone 8 Plus",
"iPhone10,3": "iPhone X",
"iPhone10,6": "iPhone X",
},
"isOneToOne": False,
},
},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="deviceName", dimension_spec_json=spec_json)
column_dict = {"deviceName": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, MapLookupExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
f_ext_fn = f.extraction_function
self.assertEqual(dim_ext_fn["lookup"]["map"], f_ext_fn._mapping)
self.assertEqual(dim_ext_fn["lookup"]["isOneToOne"], f_ext_fn._injective)
self.assertEqual(
dim_ext_fn["replaceMissingValueWith"], f_ext_fn._replace_missing_values
)
self.assertEqual(
dim_ext_fn["retainMissingValue"], f_ext_fn._retain_missing_values
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_regex(self):
filters = [{"col": "buildPrefix", "val": ["22B"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "build",
"outputName": "buildPrefix",
"outputType": "STRING",
"extractionFn": {"type": "regex", "expr": "(^[0-9A-Za-z]{3})"},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="buildPrefix", dimension_spec_json=spec_json)
column_dict = {"buildPrefix": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, RegexExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
f_ext_fn = f.extraction_function
self.assertEqual(dim_ext_fn["expr"], f_ext_fn._expr)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extraction_fn_registered_lookup_extraction(self):
filters = [{"col": "country", "val": ["Spain"], "op": "in"}]
dimension_spec = {
"type": "extraction",
"dimension": "country_name",
"outputName": "country",
"outputType": "STRING",
"extractionFn": {"type": "registeredLookup", "lookup": "country_name"},
}
spec_json = json.dumps(dimension_spec)
col = DruidColumn(column_name="country", dimension_spec_json=spec_json)
column_dict = {"country": col}
f = DruidDatasource.get_filters(filters, [], column_dict)
assert isinstance(f.extraction_function, RegisteredLookupExtraction)
dim_ext_fn = dimension_spec["extractionFn"]
self.assertEqual(dim_ext_fn["type"], f.extraction_function.extraction_type)
self.assertEqual(dim_ext_fn["lookup"], f.extraction_function._lookup)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_ignores_invalid_filter_objects(self):
filtr = {"col": "col1", "op": "=="}
filters = [filtr]
col = DruidColumn(column_name="col1")
column_dict = {"col1": col}
self.assertIsNone(DruidDatasource.get_filters(filters, [], column_dict))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_in(self):
filtr = {"col": "A", "op": "in", "val": ["a", "b", "c"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIn("filter", res.filter)
self.assertIn("fields", res.filter["filter"])
self.assertEqual("or", res.filter["filter"]["type"])
self.assertEqual(3, len(res.filter["filter"]["fields"]))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_not_in(self):
filtr = {"col": "A", "op": "not in", "val": ["a", "b", "c"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIn("filter", res.filter)
self.assertIn("type", res.filter["filter"])
self.assertEqual("not", res.filter["filter"]["type"])
self.assertIn("field", res.filter["filter"])
self.assertEqual(
3, len(res.filter["filter"]["field"].filter["filter"]["fields"])
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_equals(self):
filtr = {"col": "A", "op": "==", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
self.assertEqual("A", res.filter["filter"]["dimension"])
self.assertEqual("h", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_filter_not_equals(self):
filtr = {"col": "A", "op": "!=", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("not", res.filter["filter"]["type"])
self.assertEqual("h", res.filter["filter"]["field"].filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_bounds_filter(self):
filtr = {"col": "A", "op": ">=", "val": "h"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertFalse(res.filter["filter"]["lowerStrict"])
self.assertEqual("A", res.filter["filter"]["dimension"])
self.assertEqual("h", res.filter["filter"]["lower"])
self.assertFalse(res.filter["filter"]["alphaNumeric"])
filtr["op"] = ">"
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertTrue(res.filter["filter"]["lowerStrict"])
filtr["op"] = "<="
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertFalse(res.filter["filter"]["upperStrict"])
self.assertEqual("h", res.filter["filter"]["upper"])
filtr["op"] = "<"
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertTrue(res.filter["filter"]["upperStrict"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_is_null_filter(self):
filtr = {"col": "A", "op": "IS NULL"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
self.assertEqual("", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_is_not_null_filter(self):
filtr = {"col": "A", "op": "IS NOT NULL"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("not", res.filter["filter"]["type"])
self.assertIn("field", res.filter["filter"])
self.assertEqual(
"selector", res.filter["filter"]["field"].filter["filter"]["type"]
)
self.assertEqual("", res.filter["filter"]["field"].filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_regex_filter(self):
filtr = {"col": "A", "op": "regex", "val": "[abc]"}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("regex", res.filter["filter"]["type"])
self.assertEqual("[abc]", res.filter["filter"]["pattern"])
self.assertEqual("A", res.filter["filter"]["dimension"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_composes_multiple_filters(self):
filtr1 = {"col": "A", "op": "!=", "val": "y"}
filtr2 = {"col": "B", "op": "in", "val": ["a", "b", "c"]}
cola = DruidColumn(column_name="A")
colb = DruidColumn(column_name="B")
column_dict = {"A": cola, "B": colb}
res = DruidDatasource.get_filters([filtr1, filtr2], [], column_dict)
self.assertEqual("and", res.filter["filter"]["type"])
self.assertEqual(2, len(res.filter["filter"]["fields"]))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_ignores_in_not_in_with_empty_value(self):
filtr1 = {"col": "A", "op": "in", "val": []}
filtr2 = {"col": "A", "op": "not in", "val": []}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr1, filtr2], [], column_dict)
self.assertIsNone(res)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_constructs_equals_for_in_not_in_single_value(self):
filtr = {"col": "A", "op": "in", "val": ["a"]}
cola = DruidColumn(column_name="A")
colb = DruidColumn(column_name="B")
column_dict = {"A": cola, "B": colb}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("selector", res.filter["filter"]["type"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_handles_arrays_for_string_types(self):
filtr = {"col": "A", "op": "==", "val": ["a", "b"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a", res.filter["filter"]["value"])
filtr = {"col": "A", "op": "==", "val": []}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIsNone(res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_handles_none_for_string_types(self):
filtr = {"col": "A", "op": "==", "val": None}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertIsNone(res)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_extracts_values_in_quotes(self):
filtr = {"col": "A", "op": "in", "val": ['"a"']}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_keeps_trailing_spaces(self):
filtr = {"col": "A", "op": "in", "val": ["a "]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], [], column_dict)
self.assertEqual("a ", res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_filters_converts_strings_to_num(self):
filtr = {"col": "A", "op": "in", "val": ["6"]}
col = DruidColumn(column_name="A")
column_dict = {"A": col}
res = DruidDatasource.get_filters([filtr], ["A"], column_dict)
self.assertEqual(6, res.filter["filter"]["value"])
filtr = {"col": "A", "op": "==", "val": "6"}
res = DruidDatasource.get_filters([filtr], ["A"], column_dict)
self.assertEqual(6, res.filter["filter"]["value"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_no_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = []
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
# no groupby calls client.timeseries
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(1, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args = client.timeseries.call_args_list[0][1]
self.assertNotIn("dimensions", called_args)
self.assertIn("post_aggregations", called_args)
# restore functions
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_with_adhoc_metric(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
all_metrics = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(all_metrics, post_aggs))
groupby = []
metrics = [
{
"expressionType": "SIMPLE",
"column": {"type": "DOUBLE", "column_name": "col1"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
# no groupby calls client.timeseries
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(1, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args = client.timeseries.call_args_list[0][1]
self.assertNotIn("dimensions", called_args)
self.assertIn("post_aggregations", called_args)
# restore functions
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_single_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = ["metric1"]
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = ["col1"]
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder.last_query.query_dict = {"mock": 0}
# client.topn is called twice
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
timeseries_limit=100,
client=client,
order_desc=True,
filter=[],
)
self.assertEqual(2, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args_pre = client.topn.call_args_list[0][1]
self.assertNotIn("dimensions", called_args_pre)
self.assertIn("dimension", called_args_pre)
called_args = client.topn.call_args_list[1][1]
self.assertIn("dimension", called_args)
self.assertEqual("col1", called_args["dimension"])
# not order_desc
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
order_desc=False,
filter=[],
row_limit=100,
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(1, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
self.assertIn("dimensions", client.groupby.call_args_list[0][1])
self.assertEqual(["col1"], client.groupby.call_args_list[0][1]["dimensions"])
# order_desc but timeseries and dimension spec
# calls topn with single dimension spec 'dimension'
spec = {"outputName": "hello", "dimension": "matcho"}
spec_json = json.dumps(spec)
col3 = DruidColumn(column_name="col3", dimension_spec_json=spec_json)
ds.columns.append(col3)
groupby = ["col3"]
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
order_desc=True,
timeseries_limit=5,
filter=[],
row_limit=100,
)
self.assertEqual(2, len(client.topn.call_args_list))
self.assertEqual(0, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
self.assertIn("dimension", client.topn.call_args_list[0][1])
self.assertIn("dimension", client.topn.call_args_list[1][1])
# uses dimension for pre query and full spec for final query
self.assertEqual("matcho", client.topn.call_args_list[0][1]["dimension"])
self.assertEqual(spec, client.topn.call_args_list[1][1]["dimension"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_multiple_groupby(self):
client = Mock()
from_dttm = Mock()
to_dttm = Mock()
from_dttm.replace = Mock(return_value=from_dttm)
to_dttm.replace = Mock(return_value=to_dttm)
from_dttm.isoformat = Mock(return_value="from")
to_dttm.isoformat = Mock(return_value="to")
timezone = "timezone"
from_dttm.tzname = Mock(return_value=timezone)
ds = DruidDatasource(datasource_name="datasource")
metric1 = DruidMetric(metric_name="metric1")
metric2 = DruidMetric(metric_name="metric2")
ds.metrics = [metric1, metric2]
col1 = DruidColumn(column_name="col1")
col2 = DruidColumn(column_name="col2")
ds.columns = [col1, col2]
aggs = []
post_aggs = ["some_agg"]
ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
groupby = ["col1", "col2"]
metrics = ["metric1"]
ds.get_having_filters = Mock(return_value=[])
client.query_builder = Mock()
client.query_builder.last_query = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
# no groupby calls client.timeseries
ds.run_query(
groupby,
metrics,
None,
from_dttm,
to_dttm,
client=client,
row_limit=100,
filter=[],
)
self.assertEqual(0, len(client.topn.call_args_list))
self.assertEqual(1, len(client.groupby.call_args_list))
self.assertEqual(0, len(client.timeseries.call_args_list))
# check that there is no dimensions entry
called_args = client.groupby.call_args_list[0][1]
self.assertIn("dimensions", called_args)
self.assertEqual(["col1", "col2"], called_args["dimensions"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_post_agg_returns_correct_agg_type(self):
get_post_agg = DruidDatasource.get_post_agg
# javascript PostAggregators
function = "function(field1, field2) { return field1 + field2; }"
conf = {
"type": "javascript",
"name": "postagg_name",
"fieldNames": ["field1", "field2"],
"function": function,
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, models.JavascriptPostAggregator))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["type"], "javascript")
self.assertEqual(postagg.post_aggregator["fieldNames"], ["field1", "field2"])
self.assertEqual(postagg.post_aggregator["name"], "postagg_name")
self.assertEqual(postagg.post_aggregator["function"], function)
# Quantile
conf = {"type": "quantile", "name": "postagg_name", "probability": "0.5"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Quantile))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["probability"], "0.5")
# Quantiles
conf = {
"type": "quantiles",
"name": "postagg_name",
"probabilities": "0.4,0.5,0.6",
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Quantiles))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["probabilities"], "0.4,0.5,0.6")
# FieldAccess
conf = {"type": "fieldAccess", "name": "field_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Field))
self.assertEqual(postagg.name, "field_name")
# constant
conf = {"type": "constant", "value": 1234, "name": "postagg_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Const))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["value"], 1234)
# hyperUniqueCardinality
conf = {"type": "hyperUniqueCardinality", "name": "unique_name"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.HyperUniqueCardinality))
self.assertEqual(postagg.name, "unique_name")
# arithmetic
conf = {
"type": "arithmetic",
"fn": "+",
"fields": ["field1", "field2"],
"name": "postagg_name",
}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, postaggs.Postaggregator))
self.assertEqual(postagg.name, "postagg_name")
self.assertEqual(postagg.post_aggregator["fn"], "+")
self.assertEqual(postagg.post_aggregator["fields"], ["field1", "field2"])
# custom post aggregator
conf = {"type": "custom", "name": "custom_name", "stuff": "more_stuff"}
postagg = get_post_agg(conf)
self.assertTrue(isinstance(postagg, models.CustomPostAggregator))
self.assertEqual(postagg.name, "custom_name")
self.assertEqual(postagg.post_aggregator["stuff"], "more_stuff")
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_find_postaggs_for_returns_postaggs_and_removes(self):
find_postaggs_for = DruidDatasource.find_postaggs_for
postagg_names = set(["pa2", "pa3", "pa4", "m1", "m2", "m3", "m4"])
metrics = {}
for i in range(1, 6):
emplace(metrics, "pa" + str(i), True)
emplace(metrics, "m" + str(i), False)
postagg_list = find_postaggs_for(postagg_names, metrics)
self.assertEqual(3, len(postagg_list))
self.assertEqual(4, len(postagg_names))
expected_metrics = ["m1", "m2", "m3", "m4"]
expected_postaggs = set(["pa2", "pa3", "pa4"])
for postagg in postagg_list:
expected_postaggs.remove(postagg.metric_name)
for metric in expected_metrics:
postagg_names.remove(metric)
self.assertEqual(0, len(expected_postaggs))
self.assertEqual(0, len(postagg_names))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_recursive_get_fields(self):
conf = {
"type": "quantile",
"fieldName": "f1",
"field": {
"type": "custom",
"fields": [
{"type": "fieldAccess", "fieldName": "f2"},
{"type": "fieldAccess", "fieldName": "f3"},
{
"type": "quantiles",
"fieldName": "f4",
"field": {"type": "custom"},
},
{
"type": "custom",
"fields": [
{"type": "fieldAccess", "fieldName": "f5"},
{
"type": "fieldAccess",
"fieldName": "f2",
"fields": [
{"type": "fieldAccess", "fieldName": "f3"},
{"type": "fieldIgnoreMe", "fieldName": "f6"},
],
},
],
},
],
},
}
fields = DruidDatasource.recursive_get_fields(conf)
expected = set(["f1", "f2", "f3", "f4", "f5"])
self.assertEqual(5, len(fields))
for field in fields:
expected.remove(field)
self.assertEqual(0, len(expected))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_metrics_and_post_aggs_tree(self):
metrics = ["A", "B", "m1", "m2"]
metrics_dict = {}
for i in range(ord("A"), ord("K") + 1):
emplace(metrics_dict, chr(i), True)
for i in range(1, 10):
emplace(metrics_dict, "m" + str(i), False)
def depends_on(index, fields):
dependents = fields if isinstance(fields, list) else [fields]
metrics_dict[index].json_obj = {"fieldNames": dependents}
depends_on("A", ["m1", "D", "C"])
depends_on("B", ["B", "C", "E", "F", "m3"])
depends_on("C", ["H", "I"])
depends_on("D", ["m2", "m5", "G", "C"])
depends_on("E", ["H", "I", "J"])
depends_on("F", ["J", "m5"])
depends_on("G", ["m4", "m7", "m6", "A"])
depends_on("H", ["A", "m4", "I"])
depends_on("I", ["H", "K"])
depends_on("J", "K")
depends_on("K", ["m8", "m9"])
aggs, postaggs = DruidDatasource.metrics_and_post_aggs(metrics, metrics_dict)
expected_metrics = set(aggs.keys())
self.assertEqual(9, len(aggs))
for i in range(1, 10):
expected_metrics.remove("m" + str(i))
self.assertEqual(0, len(expected_metrics))
self.assertEqual(11, len(postaggs))
for i in range(ord("A"), ord("K") + 1):
del postaggs[chr(i)]
self.assertEqual(0, len(postaggs))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_metrics_and_post_aggs(self):
"""
Test generation of metrics and post-aggregations from an initial list
of superset metrics (which may include the results of either). This
primarily tests that specifying a post-aggregator metric will also
require the raw aggregation of the associated druid metric column.
"""
metrics_dict = {
"unused_count": DruidMetric(
metric_name="unused_count",
verbose_name="COUNT(*)",
metric_type="count",
json=json.dumps({"type": "count", "name": "unused_count"}),
),
"some_sum": DruidMetric(
metric_name="some_sum",
verbose_name="SUM(*)",
metric_type="sum",
json=json.dumps({"type": "sum", "name": "sum"}),
),
"a_histogram": DruidMetric(
metric_name="a_histogram",
verbose_name="APPROXIMATE_HISTOGRAM(*)",
metric_type="approxHistogramFold",
json=json.dumps({"type": "approxHistogramFold", "name": "a_histogram"}),
),
"aCustomMetric": DruidMetric(
metric_name="aCustomMetric",
verbose_name="MY_AWESOME_METRIC(*)",
metric_type="aCustomType",
json=json.dumps({"type": "customMetric", "name": "aCustomMetric"}),
),
"quantile_p95": DruidMetric(
metric_name="quantile_p95",
verbose_name="P95(*)",
metric_type="postagg",
json=json.dumps(
{
"type": "quantile",
"probability": 0.95,
"name": "p95",
"fieldName": "a_histogram",
}
),
),
"aCustomPostAgg": DruidMetric(
metric_name="aCustomPostAgg",
verbose_name="CUSTOM_POST_AGG(*)",
metric_type="postagg",
json=json.dumps(
{
"type": "customPostAgg",
"name": "aCustomPostAgg",
"field": {"type": "fieldAccess", "fieldName": "aCustomMetric"},
}
),
),
}
adhoc_metric = {
"expressionType": "SIMPLE",
"column": {"type": "DOUBLE", "column_name": "value"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
metrics = ["some_sum"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == {"some_sum"}
assert post_aggs == {}
metrics = [adhoc_metric]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == set([adhoc_metric["label"]])
assert post_aggs == {}
metrics = ["some_sum", adhoc_metric]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
assert set(saved_metrics.keys()) == {"some_sum", adhoc_metric["label"]}
assert post_aggs == {}
metrics = ["quantile_p95"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
result_postaggs = set(["quantile_p95"])
assert set(saved_metrics.keys()) == {"a_histogram"}
assert set(post_aggs.keys()) == result_postaggs
metrics = ["aCustomPostAgg"]
saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
result_postaggs = set(["aCustomPostAgg"])
assert set(saved_metrics.keys()) == {"aCustomMetric"}
assert set(post_aggs.keys()) == result_postaggs
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_druid_type_from_adhoc_metric(self):
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "DOUBLE", "column_name": "value"},
"aggregate": "SUM",
"label": "My Adhoc Metric",
}
)
assert druid_type == "doubleSum"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "LONG", "column_name": "value"},
"aggregate": "MAX",
"label": "My Adhoc Metric",
}
)
assert druid_type == "longMax"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "VARCHAR(255)", "column_name": "value"},
"aggregate": "COUNT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "count"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "VARCHAR(255)", "column_name": "value"},
"aggregate": "COUNT_DISTINCT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "cardinality"
druid_type = DruidDatasource.druid_type_from_adhoc_metric(
{
"column": {"type": "hyperUnique", "column_name": "value"},
"aggregate": "COUNT_DISTINCT",
"label": "My Adhoc Metric",
}
)
assert druid_type == "hyperUnique"
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_run_query_order_by_metrics(self):
client = Mock()
client.query_builder.last_query.query_dict = {"mock": 0}
from_dttm = Mock()
to_dttm = Mock()
ds = DruidDatasource(datasource_name="datasource")
ds.get_having_filters = Mock(return_value=[])
dim1 = DruidColumn(column_name="dim1")
dim2 = DruidColumn(column_name="dim2")
metrics_dict = {
"count1": DruidMetric(
metric_name="count1",
metric_type="count",
json=json.dumps({"type": "count", "name": "count1"}),
),
"sum1": DruidMetric(
metric_name="sum1",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum1"}),
),
"sum2": DruidMetric(
metric_name="sum2",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum2"}),
),
"div1": DruidMetric(
metric_name="div1",
metric_type="postagg",
json=json.dumps(
{
"fn": "/",
"type": "arithmetic",
"name": "div1",
"fields": [
{"fieldName": "sum1", "type": "fieldAccess"},
{"fieldName": "sum2", "type": "fieldAccess"},
],
}
),
),
}
ds.columns = [dim1, dim2]
ds.metrics = list(metrics_dict.values())
groupby = ["dim1"]
metrics = ["count1"]
granularity = "all"
# get the counts of the top 5 'dim1's, order by 'sum1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="sum1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.topn.call_args_list[0][1]
self.assertEqual("dim1", qry_obj["dimension"])
self.assertEqual("sum1", qry_obj["metric"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1"}, set(aggregations.keys()))
self.assertEqual(set(), set(post_aggregations.keys()))
# get the counts of the top 5 'dim1's, order by 'div1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="div1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.topn.call_args_list[1][1]
self.assertEqual("dim1", qry_obj["dimension"])
self.assertEqual("div1", qry_obj["metric"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1", "sum2"}, set(aggregations.keys()))
self.assertEqual({"div1"}, set(post_aggregations.keys()))
groupby = ["dim1", "dim2"]
# get the counts of the top 5 ['dim1', 'dim2']s, order by 'sum1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="sum1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.groupby.call_args_list[0][1]
self.assertEqual({"dim1", "dim2"}, set(qry_obj["dimensions"]))
self.assertEqual("sum1", qry_obj["limit_spec"]["columns"][0]["dimension"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1"}, set(aggregations.keys()))
self.assertEqual(set(), set(post_aggregations.keys()))
# get the counts of the top 5 ['dim1', 'dim2']s, order by 'div1'
ds.run_query(
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
timeseries_limit=5,
timeseries_limit_metric="div1",
client=client,
order_desc=True,
filter=[],
)
qry_obj = client.groupby.call_args_list[1][1]
self.assertEqual({"dim1", "dim2"}, set(qry_obj["dimensions"]))
self.assertEqual("div1", qry_obj["limit_spec"]["columns"][0]["dimension"])
aggregations = qry_obj["aggregations"]
post_aggregations = qry_obj["post_aggregations"]
self.assertEqual({"count1", "sum1", "sum2"}, set(aggregations.keys()))
self.assertEqual({"div1"}, set(post_aggregations.keys()))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_get_aggregations(self):
ds = DruidDatasource(datasource_name="datasource")
metrics_dict = {
"sum1": DruidMetric(
metric_name="sum1",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum1"}),
),
"sum2": DruidMetric(
metric_name="sum2",
metric_type="doubleSum",
json=json.dumps({"type": "doubleSum", "name": "sum2"}),
),
"div1": DruidMetric(
metric_name="div1",
metric_type="postagg",
json=json.dumps(
{
"fn": "/",
"type": "arithmetic",
"name": "div1",
"fields": [
{"fieldName": "sum1", "type": "fieldAccess"},
{"fieldName": "sum2", "type": "fieldAccess"},
],
}
),
),
}
metric_names = ["sum1", "sum2"]
aggs = ds.get_aggregations(metrics_dict, metric_names)
expected_agg = {name: metrics_dict[name].json_obj for name in metric_names}
self.assertEqual(expected_agg, aggs)
metric_names = ["sum1", "col1"]
self.assertRaises(
SupersetException, ds.get_aggregations, metrics_dict, metric_names
)
metric_names = ["sum1", "div1"]
self.assertRaises(
SupersetException, ds.get_aggregations, metrics_dict, metric_names
)
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import ListView, DetailView, DeleteView, UpdateView
from django import forms
from django.urls import reverse_lazy, reverse
from django.views import View
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from pprint import pprint
from django.db.models import Q
# Create your views here.
class IndexView(View):
'''トップページを表示'''
def get(self, request):
template_name = 'esuits/index.html'
return render(request, template_name)
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from six import moves
from essential import local
from essential import test
class Dict(dict):
"""Make weak referencable object."""
pass
class LocalStoreTestCase(test.BaseTestCase):
v1 = Dict(a='1')
v2 = Dict(a='2')
v3 = Dict(a='3')
def setUp(self):
super(LocalStoreTestCase, self).setUp()
# NOTE(mrodden): we need to make sure that local store
# gets imported in the current python context we are
# testing in (eventlet vs normal python threading) so
# we test the correct type of local store for the current
# threading model
moves.reload_module(local)
def test_thread_unique_storage(self):
"""Make sure local store holds thread specific values."""
expected_set = []
local.store.a = self.v1
def do_something():
local.store.a = self.v2
expected_set.append(getattr(local.store, 'a'))
def do_something2():
local.store.a = self.v3
expected_set.append(getattr(local.store, 'a'))
t1 = threading.Thread(target=do_something)
t2 = threading.Thread(target=do_something2)
t1.start()
t2.start()
t1.join()
t2.join()
expected_set.append(getattr(local.store, 'a'))
self.assertTrue(self.v1 in expected_set)
self.assertTrue(self.v2 in expected_set)
self.assertTrue(self.v3 in expected_set)
|
from random import randint
from tnnp import nn as tnnp
nn = tnnp.NeuralNetwork(2, 2, 1)
if nn is None:
raise Exception("Initialization failed!", m.matrix)
nn = tnnp.NeuralNetwork(2, 2, 1)
input = [1, 0]
output = nn.feedforward(input)
if output < [-1] or output > [1]:
raise Exception(".feedforward function failed!", m.matrix)
def formula(x):
# f(x) = mx + b
if x == [0, 0]:
return [-1]
if x == [0, 1]:
return [1]
if x == [1, 0]:
return [1]
if x == [1, 1]:
return [-1]
nn = tnnp.NeuralNetwork(2, 2, 1)
for i in range(50000):
data = [randint(0, 1), randint(0, 1)]
nn.train(data, formula(data))
values = []
for data in [[0, 0], [0, 1], [1, 0], [1, 1]]:
output = nn.feedforward(data)
values.append(round(output[0]))
if not values == [-1, 1, 1, -1]:
raise Exception(
".train function failed! You might want to try running this script again.", values)
nn = tnnp.NeuralNetwork(2, 2, 1)
cp = nn.copy()
if not cp:
raise Exception(".copy function failed!", cp)
nn = tnnp.NeuralNetwork(2, 2, 1)
nn.mutate(lambda n: n * 2)
nn = tnnp.NeuralNetwork(2, 2, 1)
nn.save("test.pkl")
nn2 = tnnp.load("test.pkl")
if nn2.hidden_nodes != 2:
raise Exception(".save/.load function failed!", nn2)
print("No errors were found!")
|
import tensorflow as tf
from tensorflow import layers as tfl
from .base_model import BaseModel, Mode
class SimpleClassifier(BaseModel):
input_spec = {
'image': {'shape': [None, None, None, 1], 'type': tf.float32}
}
required_config_keys = []
default_config = {'data_format': 'channels_first'}
def _model(self, inputs, mode, **config):
x = inputs['image']
if config['data_format'] == 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
params = {'padding': 'SAME', 'data_format': config['data_format']}
x = tfl.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1', **params)
x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params)
x = tfl.conv2d(x, 64, 5, activation=tf.nn.relu, name='conv2', **params)
x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params)
x = tfl.flatten(x)
x = tfl.dense(x, 1024, activation=tf.nn.relu, name='fc1')
x = tfl.dense(x, 10, name='fc2')
if mode == Mode.TRAIN:
return {'logits': x}
else:
return {'logits': x, 'prob': tf.nn.softmax(x), 'pred': tf.argmax(x, axis=-1)}
def _loss(self, outputs, inputs, **config):
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=inputs['label'], logits=outputs['logits']))
return loss
def _metrics(self, outputs, inputs, **config):
metrics = {}
with tf.name_scope('metrics'):
correct_count = tf.equal(outputs['pred'], inputs['label'])
correct_count = tf.cast(correct_count, tf.float32)
metrics['accuracy'] = tf.reduce_mean(correct_count)
return metrics
|
#!/usr/bin/env python
# Copyright 2020 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, os, sys, json, traceback
from pipelines_utils import utils
from pipelines_utils import utils
def gen_filename(id, generate_filenames):
if generate_filenames:
return str(count)
else:
return id
def execute(candidates_json, generate_filenames):
with open(candidates_json, 'r') as f:
candidates = json.load(f)
queries = candidates['queries']['molecules']
results = candidates['results']
hitCounts = candidates['hitCounts']
utils.log('Processing', len(queries), 'queries and', len(results), 'results')
num_mols = 0
num_hits = 0
count = 0
ids2Filenames = {}
for query in queries:
id = query['id']
if id in hitCounts:
molfile = query['originalMol']
if generate_filenames:
fname = str(count).zfil(3)
else:
fname = id
utils.log('Using file name of', fname)
with open(fname + '.mol', 'w') as f:
f.write(molfile)
num_hits += 1
ids2Filenames[id] = fname
count += 1
writers = {}
for result in results:
num_mols += 1
for id in result['sourceMols']:
if id in writers:
writer = writers[id]
else:
fname = ids2Filenames[id]
writer = open(fname + '.smi', 'w')
writers[id] = writer
smiles = result['smiles']
#utils.log('Processing', smiles)
writer.write(smiles + '\n')
for w in writers.values():
w.close()
utils.log('Totals - hits:', num_hits, 'outputs:', num_mols)
def main():
"""
Example usage:
python -m pipelines.xchem.split-fragnet-candidates -i ../../data/mpro/expanded-17.json
:return:
"""
parser = argparse.ArgumentParser(description='Split fragnet candidates - Split fragment network expansion into individual sets')
parser.add_argument('-i', '--input', help='JSON containing the expanded candidates)')
parser.add_argument('-g', '--generate-filenames', action='store_true', help='Use automatically generated file names instead of the title field)')
args = parser.parse_args()
utils.log("Split fragnet candidates args: ", args)
infile = args.input
execute(infile, args.generate_filenames)
if __name__ == "__main__":
main()
|
"""
Function taken from IceCube astro package.
"""
import numpy as np
def angular_distance(lon1, lat1, lon2, lat2):
"""
calculate the angular distince along the great circle
on the surface of a shpere between the points
(`lon1`,`lat1`) and (`lon2`,`lat2`)
This function Works for equatorial coordinates
with right ascension as longitude and declination
as latitude. This function uses the Vincenty formula
for calculating the distance.
Parameters
----------
lon1 : array_like
longitude of first point in radians
lat1 : array_like
latitude of the first point in radians
lon2 : array_like
longitude of second point in radians
lat2 : array_like
latitude of the second point in radians
"""
c1 = np.cos(lat1)
c2 = np.cos(lat2)
s1 = np.sin(lat1)
s2 = np.sin(lat2)
sd = np.sin(lon2 - lon1)
cd = np.cos(lon2 - lon1)
return np.arctan2(np.hypot(c2 * sd, c1 * s2 - s1 * c2 * cd), s1 * s2 + c1 * c2 * cd)
|
from django.contrib import admin
from .models import Action
@admin.register(Action)
class ActionAdmin(admin.ModelAdmin):
list_display = ('user', 'verb', 'target', 'created')
list_filter = ('created',)
search_fields = ('verb',)
|
# Copyright 2021-present citharus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use utils.py except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Union, Dict
import azury.asynczury as asynczury
from azury.utils import parse_iso
__all__: list[str] = ['to_file', 'to_user', 'to_team']
async def to_file(
client: asynczury.Client,
service: str,
data: Dict[str, Union[str, bool, int, list]],
team: str = '',
) -> asynczury.File:
"""A function to convert the files' data to a :class:`File` object.
Parameters
----------
client: Client
The :class`Client` used to initialize the :class:`User`.
service: str
The service the file is bound to e.g. teams or users.
data: Dict[str, Union[str, bool, int, list]]
The files' data.
team: str
The team id, if the file belongs to a team.
Defaults to an empty string.
Return
------
File
The converted :class:`File` object.
"""
return asynczury.File(
client,
service,
team,
flags=data['flags'] if 'flags' in data else None,
id=data['_id'] if '_id' in data else data['id'],
archived='archived' in data['flags'] if 'flags' in data else None,
trashed='trashed' in data['flags'] if 'flags' in data else None,
favorite='favorite' in data['flags'] if 'flags' in data else None,
downloads=data['downloads'] if 'downloads' in data else None,
views=data['views'] if 'views' in data else None,
user=int(data['user']) if 'user' in data else int(data['author']),
name=data['name'],
size=data['size'],
type=data['type'],
created_at=parse_iso(data['createdAt'])
if 'createdAt' in data else parse_iso(data['uploadedAt']),
updated_at=parse_iso(data['updatedAt']),
)
async def to_user(
client: asynczury.Client,
data: dict,
) -> asynczury.User:
"""A function to convert the user's data to a :class:`User` object.
Parameters
----------
client: Client
The :class`Client` used to initialize the :class:`User`.
data: Dict[str, Union[str, list]]
The user's data.
Returns
-------
User
The converted :class:`User` object.
"""
return asynczury.User(
client,
avatar=data['avatar'],
flags=data['flags'],
connections=data['connections'],
access=data['access'],
id=int(data['_id']),
ip=data['ip'],
token=data['token'],
created_at=parse_iso(data['createdAt']),
updated_at=parse_iso(data['updatedAt']),
username=data['username'],
)
async def to_team(
client: asynczury.Client,
data: Dict[str, Union[str, list]],
) -> asynczury.Team:
"""A function to convert the teams's data to a :class:`Team` object.
Parameters
----------
client: Client
The :class`Client` used to initialize the :class:`User`.
data: Dict[str, Union[str, list]]
The teams's data.
Returns
-------
Team
The converted :class:`Team` object.
"""
return asynczury.Team(
client,
members=[int(user) for user in data['members']],
icon=data['icon'],
flags=data['flags'],
id=data['_id'],
name=data['name'],
owner=int(data['owner']),
created_at=parse_iso(data['createdAt']),
updated_at=parse_iso(data['updatedAt']),
)
|
"""
Pipeline for text processing implementation
"""
from pathlib import Path
import re
import pymorphy2
from pymystem3 import Mystem
from constants import ASSETS_PATH
from core_utils.article import Article, ArtifactType
class EmptyDirectoryError(Exception):
"""
No data to process
"""
class InconsistentDatasetError(Exception):
"""
Corrupt data:
- numeration is expected to start from 1 and to be continuous
- a number of text files must be equal to the number of meta files
- text files must not be empty
"""
class MorphologicalToken:
"""
Stores language params for each processed token
"""
def __init__(self, original_word):
self.original_word = original_word
self.normalized_form = ''
self.tags_mystem = ''
self.tags_pymorphy = ''
def get_cleaned(self):
"""
Returns lowercased original form of a token
"""
return self.original_word.lower()
def get_single_tagged(self):
"""
Returns normalized lemma with MyStem tags
"""
return f'{self.normalized_form}<{self.tags_mystem}>'
def get_multiple_tagged(self):
"""
Returns normalized lemma with PyMorphy tags
"""
return f'{self.normalized_form}<{self.tags_mystem}>({self.tags_pymorphy})'
class CorpusManager:
"""
Works with articles and stores them
"""
def __init__(self, path_to_raw_txt_data: str):
self.path = Path(path_to_raw_txt_data)
self._storage = {}
self._scan_dataset()
def _scan_dataset(self):
"""
Register each dataset entry
"""
files = self.path.glob('*_raw.txt')
pattern = re.compile(r'(\d+)')
for file in files:
if re.match(pattern, file.name) is not None:
article_id = int(re.match(pattern, file.name).group(0))
self._storage[article_id] = Article(url=None, article_id=article_id)
else:
print("Unsuccessful article id extraction")
def get_articles(self):
"""
Returns storage params
"""
return self._storage
class TextProcessingPipeline:
"""
Process articles from corpus manager
"""
def __init__(self, corpus_manager: CorpusManager):
self.corpus_manager = corpus_manager
def run(self):
"""
Runs pipeline process scenario
"""
articles = self.corpus_manager.get_articles().values()
for article in articles:
raw_text = article.get_raw_text()
processed_tokens = self._process(raw_text)
cleaned_tokens = []
single_tagged_tokens = []
multiple_tagged_tokens = []
for processed_token in processed_tokens:
cleaned_tokens.append(processed_token.get_cleaned())
single_tagged_tokens.append(processed_token.get_single_tagged())
multiple_tagged_tokens.append(processed_token.get_multiple_tagged())
article.save_as(' '.join(cleaned_tokens), ArtifactType.cleaned)
article.save_as(' '.join(single_tagged_tokens), ArtifactType.single_tagged)
article.save_as(' '.join(multiple_tagged_tokens), ArtifactType.multiple_tagged)
def _process(self, raw_text: str):
"""
Processes each token and creates MorphToken class instance
"""
# txt from pdf comes with words like след-ующий
# this replace deals with them
text = raw_text.replace('-\n', '').replace('\n', ' ')
result = Mystem().analyze(text)
# launching morph_tokens list which then is appended with MorphologicalToken class instances
morph_tokens = []
# pymorphy analyzer which will be used for filling pymorphy tags
morph = pymorphy2.MorphAnalyzer()
for token in result:
# pre requisites for the token to be usable
if "analysis" not in token:
continue
if not token.get('analysis'):
continue
if not (token['analysis'][0].get("gr") or token['analysis'][0].get("lex")):
continue
original_word = token["text"]
morph_token = MorphologicalToken(original_word=original_word)
# mystem tags
morph_token.normalized_form = token['analysis'][0]['lex']
morph_token.tags_mystem = token['analysis'][0]['gr']
# pymorphy tags
one_word = morph.parse(original_word)[0]
morph_token.tags_pymorphy = one_word.tag
morph_tokens.append(morph_token)
return morph_tokens
def validate_dataset(path_to_validate):
"""
Validates folder with assets
"""
path = Path(path_to_validate)
if not path.exists():
raise FileNotFoundError
if not path.is_dir():
raise NotADirectoryError
if not any(path.iterdir()):
raise EmptyDirectoryError
file_formats = [".json", ".txt", ".pdf", ".png"]
checker = {}
# creating a dictionary of file indexes
# and checking the formats
pattern = re.compile(r'\d+')
for file in path.iterdir():
match_to = re.match(pattern, file.name)
if not match_to:
raise InconsistentDatasetError("There is a file with incorrect name pattern.")
if file.stat().st_size == 0:
raise InconsistentDatasetError("File is empty.")
file_index = file.name.split("_")[0]
if file_index not in checker.keys():
checker[file_index] = 1
else:
checker[file_index] += 1
if file.suffix not in file_formats:
raise FileNotFoundError("File with incorrect format.")
# checking that there are necessary files with said index
if not all(value >= 2 for value in checker.values()):
raise InconsistentDatasetError("There are files missing.")
# checking whether keys are consistent from 1 to N (max in files indices)
current_i = list(int(x) for x in checker)
ideal_i = range(1, max(current_i) + 1)
if not set(current_i) & set(ideal_i) == set(ideal_i):
raise InconsistentDatasetError("The numbering is inconsistent.")
def main():
validate_dataset(ASSETS_PATH)
corpus_manager = CorpusManager(ASSETS_PATH)
pipeline = TextProcessingPipeline(corpus_manager)
pipeline.run()
if __name__ == "__main__":
main()
|
import os
from setuptools import (
find_packages,
setup
)
__version__ = open("VERSION", 'r').read().strip()
REQUIREMENTS_FOLDER = os.getenv('REQUIREMENTS_PATH', '')
requirements = [line.strip() for line in open(os.path.join(REQUIREMENTS_FOLDER, "requirements.txt"), 'r')]
setup(
name='ninjin',
version=__version__,
keywords="ninjin",
packages=find_packages(exclude=['tests']),
install_requires=requirements,
extras_require={
'dev': [
'mock',
'async-generator==1.10',
'faker',
'flake8',
'flake8-builtins',
'flake8-coding',
'flake8-commas',
'flake8-comprehensions',
'flake8-debugger',
'flake8-docstrings',
'flake8-pep3101',
'flake8-quotes',
'flake8-string-format',
'flake8-super-call',
'flake8-eradicate',
'flake8-print',
'flake8-isort',
'pytest',
'pytest-factoryboy',
'pytest-pep8',
'pytest-mock==3.1.0',
'pytest-asyncio==0.11.0',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
]
)
|
"""
**************
SparseGraph 6
**************
Read graphs in graph6 and sparse6 format.
Format
------
"graph6 and sparse6 are formats for storing undirected graphs in a
compact manner, using only printable ASCII characters. Files in these
formats have text type and contain one line per graph."
http://cs.anu.edu.au/~bdm/data/formats.html
See http://cs.anu.edu.au/~bdm/data/formats.txt for details.
"""
# Original author: D. Eppstein, UC Irvine, August 12, 2003.
# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['read_graph6', 'parse_graph6', 'read_graph6_list',
'read_sparse6', 'parse_sparse6', 'read_sparse6_list']
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file
# graph6
def read_graph6(path):
"""Read simple undirected graphs in graph6 format from path.
Returns a single Graph.
"""
return read_graph6_list(path)[0]
def parse_graph6(str):
"""Read a simple undirected graph in graph6 format from string.
Returns a single Graph.
"""
def bits():
"""Return sequence of individual bits from 6-bit-per-value
list of data values."""
for d in data:
for i in [5,4,3,2,1,0]:
yield (d>>i)&1
if str.startswith('>>graph6<<'):
str = str[10:]
data = graph6data(str)
n, data = graph6n(data)
nd = (n*(n-1)//2 + 5) // 6
if len(data) != nd:
raise NetworkXError(\
'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6))
G=nx.Graph()
G.add_nodes_from(range(n))
for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()):
if b: G.add_edge(i,j)
return G
@open_file(0,mode='rt')
def read_graph6_list(path):
"""Read simple undirected graphs in graph6 format from path.
Returns a list of Graphs, one for each line in file.
"""
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_graph6(line))
return glist
# sparse6
def read_sparse6(path):
"""Read simple undirected graphs in sparse6 format from path.
Returns a single MultiGraph."""
return read_sparse6_list(path)[0]
@open_file(0,mode='rt')
def read_sparse6_list(path):
"""Read undirected graphs in sparse6 format from path.
Returns a list of MultiGraphs, one for each line in file."""
glist=[]
for line in path:
line = line.strip()
if not len(line): continue
glist.append(parse_sparse6(line))
return glist
def parse_sparse6(string):
"""Read undirected graph in sparse6 format from string.
Returns a MultiGraph.
"""
if string.startswith('>>sparse6<<'):
string = str[10:]
if not string.startswith(':'):
raise NetworkXError('Expected colon in sparse6')
n, data = graph6n(graph6data(string[1:]))
k = 1
while 1<<k < n:
k += 1
def parseData():
"""Return stream of pairs b[i], x[i] for sparse6 format."""
chunks = iter(data)
d = None # partial data word
dLen = 0 # how many unparsed bits are left in d
while 1:
if dLen < 1:
d = next(chunks)
dLen = 6
dLen -= 1
b = (d>>dLen) & 1 # grab top remaining bit
x = d & ((1<<dLen)-1) # partially built up value of x
xLen = dLen # how many bits included so far in x
while xLen < k: # now grab full chunks until we have enough
d = next(chunks)
dLen = 6
x = (x<<6) + d
xLen += 6
x = (x >> (xLen - k)) # shift back the extra bits
dLen = xLen - k
yield b,x
v = 0
G=nx.MultiGraph()
G.add_nodes_from(range(n))
for b,x in parseData():
if b: v += 1
if x >= n: break # padding with ones can cause overlarge number here
elif x > v: v = x
else:
G.add_edge(x,v)
return G
# helper functions
def graph6data(str):
"""Convert graph6 character sequence to 6-bit integers."""
v = [ord(c)-63 for c in str]
if min(v) < 0 or max(v) > 63:
return None
return v
def graph6n(data):
"""Read initial one or four-unit value from graph6 sequence.
Return value, rest of seq."""
if data[0] <= 62:
return data[0], data[1:]
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from ...serialize.core import TupleField, ValueType, Int8Field
from ...operands import Fetch, FetchShuffle
from ...utils import on_serialize_shape, on_deserialize_shape
from ..operands import DataFrameOperandMixin, ObjectType
class DataFrameFetchMixin(DataFrameOperandMixin):
def check_inputs(self, inputs):
# no inputs
if inputs and len(inputs) > 0:
raise ValueError("%s has no inputs" % type(self).__name__)
@classmethod
def tile(cls, op):
raise NotImplementedError('Fetch tile cannot be handled by operand itself')
@classmethod
def execute(cls, ctx, op):
# fetch op need to do nothing
pass
class DataFrameFetch(Fetch, DataFrameFetchMixin):
# required fields
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_object_type = Int8Field('object_type', on_serialize=operator.attrgetter('value'),
on_deserialize=ObjectType)
def __init__(self, to_fetch_key=None, sparse=False, object_type=None, **kw):
super(DataFrameFetch, self).__init__(
_to_fetch_key=to_fetch_key, _sparse=sparse, _object_type=object_type, **kw)
@property
def object_type(self):
return self._object_type
def _new_chunks(self, inputs, kws=None, **kw):
if '_key' in kw and self._to_fetch_key is None:
self._to_fetch_key = kw['_key']
if '_shape' in kw and self._shape is None:
self._shape = kw['_shape']
return super(DataFrameFetch, self)._new_chunks(inputs, kws=kws, **kw)
def _new_tileables(self, inputs, kws=None, **kw):
if '_key' in kw and self._to_fetch_key is None:
self._to_fetch_key = kw['_key']
return super(DataFrameFetch, self)._new_tileables(inputs, kws=kws, **kw)
class DataFrameFetchShuffle(FetchShuffle, DataFrameFetchMixin):
# required fields
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_object_type = Int8Field('object_type', on_serialize=operator.attrgetter('value'),
on_deserialize=ObjectType)
def __init__(self, to_fetch_keys=None, to_fetch_idxes=None, object_type=None, **kw):
super(DataFrameFetchShuffle, self).__init__(
_to_fetch_keys=to_fetch_keys, _to_fetch_idxes=to_fetch_idxes,
_object_type=object_type, **kw)
@property
def object_type(self):
return self._object_type
|
'''define the config file for cocostuff and resnet101os8'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'cocostuff',
'rootdir': os.path.join(os.getcwd(), 'COCO'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 30
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 182,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'memorynet_resnet101os8_cocostuff_train',
'logfilepath': 'memorynet_resnet101os8_cocostuff_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'memorynet_resnet101os8_cocostuff_test',
'logfilepath': 'memorynet_resnet101os8_cocostuff_test/test.log',
'resultsavepath': 'memorynet_resnet101os8_cocostuff_test/memorynet_resnet101os8_cocostuff_results.pkl'
}
)
|
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import pkg_resources
import logging
from collections import namedtuple
logger = logging.getLogger(__name__)
CONTROL_QUEST = '/source/dir/simglucose/params/Quest.csv'
PATIENT_PARA_FILE = '/source/dir/simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
class BBController(Controller):
def __init__(self, target=140):
self.quest = pd.read_csv(CONTROL_QUEST)
self.patient_params = pd.read_csv(
PATIENT_PARA_FILE)
self.target = target
def policy(self, observation, reward, done, **kwargs):
sample_time = kwargs.get('sample_time', 1)
pname = kwargs.get('patient_name')
meal = kwargs.get('meal')
action = self._bb_policy(
pname,
meal,
observation.CGM,
sample_time)
return action
def _bb_policy(self, name, meal, glucose, env_sample_time):
if any(self.quest.Name.str.match(name)):
q = self.quest[self.quest.Name.str.match(name)]
params = self.patient_params[self.patient_params.Name.str.match(
name)]
u2ss = np.asscalar(params.u2ss.values)
BW = np.asscalar(params.BW.values)
else:
q = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],
columns=['Name', 'CR', 'CF', 'TDI', 'Age'])
u2ss = 1.43
BW = 57.0
basal = u2ss * BW / 6000
if meal > 0:
logger.info('Calculating bolus ...')
logger.debug('glucose = {}'.format(glucose))
bolus = np.asscalar(meal / q.CR.values + (glucose > 150)
* (glucose - self.target) / q.CF.values)
else:
bolus = 0
bolus = bolus / env_sample_time
action = Action(basal=basal, bolus=bolus)
return action
def reset(self):
pass
class ManualBBController(Controller):
def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,
corrected=True, use_low_lim=False, low_lim=70):
super().__init__(self)
self.target = target
self.orig_cr = self.cr = cr
self.orig_cf = self.cf = cf
self.orig_basal = self.basal = basal
self.sample_rate = sample_rate
self.use_cf = use_cf
self.use_bol = use_bol
self.cooldown = cooldown
self.last_cf = np.inf
self.corrected = corrected
self.use_low_lim = low_lim
self.low_lim = low_lim
def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):
self.cr += cr_incr
self.cf += cf_incr
self.basal += basal_incr
def policy(self, observation, reward, done, **kwargs):
carbs = kwargs.get('carbs')
glucose = kwargs.get('glucose')
action = self.manual_bb_policy(carbs, glucose)
return action
def manual_bb_policy(self, carbs, glucose, log=False):
if carbs > 0:
if self.corrected:
carb_correct = carbs / self.cr
else:
# assuming carbs are already multiplied by sampling rate
carb_correct = (carbs/self.sample_rate) / self.cr # TODO: not sure about this
hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf
hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf
bolus = 0
if self.use_low_lim:
bolus -= hypo_correct
if self.use_cf:
if self.last_cf > self.cooldown and hyper_correct > 0:
bolus += hyper_correct
self.last_cf = 0
if self.use_bol:
bolus += carb_correct
bolus = bolus / self.sample_rate
else:
bolus = 0
carb_correct = 0
hyper_correct = 0
hypo_correct = 0
self.last_cf += self.sample_rate
if log:
return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct
else:
return Action(basal=self.basal, bolus=bolus)
def get_params(self):
return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)
def adjust(self, basal_adj, cr_adj):
self.basal += self.orig_basal * basal_adj
self.cr += self.orig_cr * cr_adj
def reset(self):
self.cr = self.orig_cr
self.cf = self.orig_cf
self.basal = self.orig_basal
self.last_cf = np.inf
class MyController(Controller):
def __init__(self, init_state):
self.init_state = init_state
self.state = init_state
def policy(self, observation, reward, done, **info):
'''
Every controller must have this implementation!
----
Inputs:
observation - a namedtuple defined in simglucose.simulation.env. For
now, it only has one entry: blood glucose level measured
by CGM sensor.
reward - current reward returned by environment
done - True, game over. False, game continues
info - additional information as key word arguments,
simglucose.simulation.env.T1DSimEnv returns patient_name
and sample_time
----
Output:
action - a namedtuple defined at the beginning of this file. The
controller action contains two entries: basal, bolus
'''
self.state = observation
action = Action(basal=0, bolus=0)
return action
def reset(self):
'''
Reset the controller state to inital state, must be implemented
'''
self.state = self.init_state
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared implementation of connections to API servers."""
import json
from pkg_resources import get_distribution
import six
from six.moves.urllib.parse import urlencode # pylint: disable=F0401
import httplib2
from gcloud.credentials import get_credentials
from gcloud.exceptions import make_exception
API_BASE_URL = 'https://www.googleapis.com'
"""The base of the API call URL."""
class Connection(object):
"""A generic connection to Google Cloud Platform.
Subclasses should understand only the basic types in method arguments,
however they should be capable of returning advanced types.
If no value is passed in for ``http``, a :class:`httplib2.Http` object
will be created and authorized with the ``credentials``. If not, the
``credentials`` and ``http`` need not be related.
Subclasses may seek to use the private key from ``credentials`` to sign
data.
A custom (non-``httplib2``) HTTP object must have a ``request`` method
which accepts the following arguments:
* ``uri``
* ``method``
* ``body``
* ``headers``
In addition, ``redirections`` and ``connection_type`` may be used.
Without the use of ``credentials.authorize(http)``, a custom ``http``
object will also need to be able to add a bearer token to API
requests and handle token refresh on 401 errors.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for this connection.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests.
"""
USER_AGENT = "gcloud-python/{0}".format(get_distribution('gcloud').version)
"""The user agent for gcloud-python requests."""
def __init__(self, credentials=None, http=None):
self._http = http
self._credentials = credentials
@property
def credentials(self):
"""Getter for current credentials.
:rtype: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:returns: The credentials object associated with this connection.
"""
return self._credentials
@property
def http(self):
"""A getter for the HTTP transport used in talking to the API.
:rtype: :class:`httplib2.Http`
:returns: A Http object used to transport data.
"""
if self._http is None:
self._http = httplib2.Http()
if self._credentials:
self._http = self._credentials.authorize(self._http)
return self._http
class JSONConnection(Connection):
"""A connection to a Google JSON-based API.
These APIs are discovery based. For reference:
https://developers.google.com/discovery/
This defines :meth:`Connection.api_request` for making a generic JSON
API request and API requests are created elsewhere.
The class constants
* ``API_BASE_URL``
* ``API_VERSION``
* ``API_URL_TEMPLATE``
must be updated by subclasses.
"""
API_BASE_URL = None
"""The base of the API call URL."""
API_VERSION = None
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = None
"""A template for the URL of a particular API call."""
@classmethod
def build_api_url(cls, path, query_params=None,
api_base_url=None, api_version=None):
"""Construct an API url given a few components, some optional.
Typically, you shouldn't need to use this method.
:type path: string
:param path: The path to the resource (ie, ``'/b/bucket-name'``).
:type query_params: dict
:param query_params: A dictionary of keys and values to insert into
the query string of the URL.
:type api_base_url: string
:param api_base_url: The base URL for the API endpoint.
Typically you won't have to provide this.
:type api_version: string
:param api_version: The version of the API to call.
Typically you shouldn't provide this and instead
use the default for the library.
:rtype: string
:returns: The URL assembled from the pieces provided.
"""
api_base_url = api_base_url or cls.API_BASE_URL
url = cls.API_URL_TEMPLATE.format(
api_base_url=(api_base_url or cls.API_BASE_URL),
api_version=(api_version or cls.API_VERSION),
path=path)
query_params = query_params or {}
if query_params:
url += '?' + urlencode(query_params)
return url
def _make_request(self, method, url, data=None, content_type=None,
headers=None):
"""A low level method to send a request to the API.
Typically, you shouldn't need to use this method.
:type method: string
:param method: The HTTP method to use in the request.
:type url: string
:param url: The URL to send the request to.
:type data: string
:param data: The data to send as the body of the request.
:type content_type: string
:param content_type: The proper MIME type of the data provided.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response,
returned by :meth:`_do_request`.
"""
headers = headers or {}
headers['Accept-Encoding'] = 'gzip'
if data:
content_length = len(str(data))
else:
content_length = 0
headers['Content-Length'] = content_length
if content_type:
headers['Content-Type'] = content_type
headers['User-Agent'] = self.USER_AGENT
return self._do_request(method, url, headers, data)
def _do_request(self, method, url, headers, data):
"""Low-level helper: perform the actual API request over HTTP.
Allows batch context managers to override and defer a request.
:type method: string
:param method: The HTTP method to use in the request.
:type url: string
:param url: The URL to send the request to.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:type data: string
:param data: The data to send as the body of the request.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response.
"""
return self.http.request(uri=url, method=method, headers=headers,
body=data)
def api_request(self, method, path, query_params=None,
data=None, content_type=None,
api_base_url=None, api_version=None,
expect_json=True):
"""Make a request over the HTTP transport to the API.
You shouldn't need to use this method, but if you plan to
interact with the API using these primitives, this is the
correct one to use.
:type method: string
:param method: The HTTP method name (ie, ``GET``, ``POST``, etc).
Required.
:type path: string
:param path: The path to the resource (ie, ``'/b/bucket-name'``).
Required.
:type query_params: dict
:param query_params: A dictionary of keys and values to insert into
the query string of the URL. Default is
empty dict.
:type data: string
:param data: The data to send as the body of the request. Default is
the empty string.
:type content_type: string
:param content_type: The proper MIME type of the data provided. Default
is None.
:type api_base_url: string
:param api_base_url: The base URL for the API endpoint.
Typically you won't have to provide this.
Default is the standard API base URL.
:type api_version: string
:param api_version: The version of the API to call. Typically
you shouldn't provide this and instead use
the default for the library. Default is the
latest API version supported by
gcloud-python.
:type expect_json: boolean
:param expect_json: If True, this method will try to parse the
response as JSON and raise an exception if
that cannot be done. Default is True.
:raises: Exception if the response code is not 200 OK.
"""
url = self.build_api_url(path=path, query_params=query_params,
api_base_url=api_base_url,
api_version=api_version)
# Making the executive decision that any dictionary
# data will be sent properly as JSON.
if data and isinstance(data, dict):
data = json.dumps(data)
content_type = 'application/json'
response, content = self._make_request(
method=method, url=url, data=data, content_type=content_type)
if not 200 <= response.status < 300:
raise make_exception(response, content)
if content and expect_json:
content_type = response.get('content-type', '')
if not content_type.startswith('application/json'):
raise TypeError('Expected JSON, got %s' % content_type)
if isinstance(content, six.binary_type):
content = content.decode('utf-8')
return json.loads(content)
return content
def get_scoped_connection(klass, scopes):
"""Create a scoped connection to GCloud.
:type klass: subclass of :class:`gcloud.connection.Connection`
:param klass: the specific ``Connection`` class to instantiate.
:type scopes: list of URLs
:param scopes: the effective service auth scopes for the connection.
:rtype: instance of ``klass``
:returns: A connection defined with the proper credentials.
"""
implicit_credentials = get_credentials()
scoped_credentials = implicit_credentials.create_scoped(scopes)
return klass(credentials=scoped_credentials)
|
from __future__ import division
from thorpy.elements.element import Element
from thorpy.miscgui.constants import STATE_NORMAL
class OneLineText(Element):
def __init__(self, text="", elements=None, normal_params=None):
Element.__init__(self, text, elements, normal_params)
def finish(self):
self.set_style("text")
Element.finish(self)
class MultilineText(Element):
def __init__(self, text="", size=None, elements=None, normal_params=None):
Element.__init__(self, text, elements, normal_params)
self._size = size
self.visible = False
def finish(self):
Element.finish(self)
if not self._size:
self._size = self.get_fus_rect()
self.set_size(self._size)
for line in self.get_lines(STATE_NORMAL):
e = OneLineText(line)
e.finish()
e.set_writer(self.current_state.fusionner.title._writer)
self.add_elements([e])
self.format_txt()
def build_elements(self):
for e in self._elements:
e.father = None
self._elements = []
self._blit_before = []
self._blit_after = []
self.set_size(self._size)
for line in self.get_lines(STATE_NORMAL):
e = OneLineText(line)
e.finish()
e.set_writer(self.current_state.fusionner.title._writer)
self.add_elements([e])
self.format_txt()
def format_txt(self):
title = self._states[STATE_NORMAL].fusionner.title
(x, y) = title._pos
r = title.get_rect()
for i in self._elements:
(w, h) = i.get_fus_size()
if title._align is "left":
x = title._pos[0]
elif title._align is "center":
x = (r.width - w) // 2
elif title._align is "right":
x = r.width - w
i.set_topleft((x, y))
y += title._space + h
def set_font_color(self, color, state=None, center_title=True):
"""set font color for a given state"""
Element.set_font_color(self, color, state, center_title)
self.build_elements()
# remettre bonne couleur, etc
def set_font_size(self, size, state=None, center_title=True):
"""set font color for a given state"""
Element.set_font_size(self, size, state, center_title)
self.build_elements()
def set_font_effects(self, biu, state=None, center=True, preserve=False):
"""biu = tuple : (bold, italic, underline)"""
Element.set_font_effects(self, biu, state, center, preserve)
self.build_elements()
|
# -*- coding: utf-8 -*-
"""
@FileName: __init__.py
@Time: 2020/2/7 20:11
@Author: zhaojm
Module Description
"""
|
from django.db import models
from djangae import patches
class CounterShard(models.Model):
count = models.PositiveIntegerField()
label = models.CharField(max_length=500)
class Meta:
app_label = "djangae"
|
# Generated by Django 3.2.5 on 2021-11-11 05:59
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galeria', '0005_auto_20211111_0052'),
]
operations = [
migrations.AlterField(
model_name='post',
name='published',
field=models.DateTimeField(default=datetime.datetime(2021, 11, 11, 5, 59, 15, 363915), verbose_name='Fecha de publicación'),
),
]
|
from scrapy import Spider
class AuthorSpider(Spider):
name = 'author'
start_urls = [
'http://quotes.toscrape.com/',
]
def parse(self, response):
#follow links to author pages
for href in response.css('.author + a::attr(href)'):
yield response.follow(href, callback=self.parse_author)
#follow pagination links
for href in response.css('li.next a::attr(href)'):
yield response.follow(href, callback=self.parse)
def parse_author(self, response):
def extract_with_css(query):
return response.css(query).extract_first().strip()
yield{
'name': extract_with_css('h3.author-title::text'),
'birthdate': extract_with_css('.author-born-date::text'),
'bio': extract_with_css('.author-description::text')
}
|
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialogButtonBox
from . import get_main_window, close_application
NO_OF_ENVIRONMENTS = 5
NO_OF_ENVIRONMENTS_TO_DELETE = 3
NO_OF_ENVIRONMENTS_TO_RE_ADD = 1
def get_toolbar_environments_combo(window):
return window.environment_list_view.get_environment_list_combo()
def show_window(qtbot, clear_environments=True):
window = get_main_window()
qtbot.addWidget(window)
if clear_environments:
window.world.environment_store.clear_environments()
window.environment_view.show_dialog()
return window
def add_environments(qtbot, window, number):
for i in range(number):
qtbot.mouseClick(window.environment_view.btn_add_environment, QtCore.Qt.LeftButton)
def remove_environments(qtbot, window, number):
for i in range(number):
qtbot.mouseClick(window.environment_view.btn_remove_environment, QtCore.Qt.LeftButton)
def close_and_save_environments(qtbot, window):
ok_button = window.environment_view.btn_dialog_close.button(QDialogButtonBox.Ok)
qtbot.mouseClick(ok_button, QtCore.Qt.LeftButton)
def close_and_discard_changes(qtbot, window):
cancel_button = window.environment_view.btn_dialog_close.button(QDialogButtonBox.Cancel)
qtbot.mouseClick(cancel_button, QtCore.Qt.LeftButton)
def test_adding_removing_env(qtbot):
# given
window = show_window(qtbot)
# when
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# then
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS
# remove
remove_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# and close dialog
close_and_save_environments(qtbot, window)
# and re-open
window.environment_view.show_dialog()
# check environments in toolbar
assert get_toolbar_environments_combo(window).count() == 0
# then
assert window.environment_view.lst_environments.count() == 0
def test_renaming_environment(qtbot):
# given a window
window = show_window(qtbot)
# add a few environments
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# select an environment from list
window.environment_view.lst_environments.setCurrentRow(2)
currently_selected = window.environment_view.lst_environments.currentItem()
# edit list item
new_environment_name = "Development"
currently_selected.setText(new_environment_name)
# save and close application
close_and_save_environments(qtbot, window)
# get environments from controller
environments = [e.name for e in window.environment_list_view.world.environment_store.get_environments()]
assert new_environment_name in environments
def test_saving_envs(qtbot):
# given
window = show_window(qtbot)
# and (adding a few environments)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# when
close_and_save_environments(qtbot, window)
# then
environments = window.world.environment_store.get_environments()
assert len(environments) == NO_OF_ENVIRONMENTS, "Environments not being saved in database"
# and (re-opening the dialog box after close)
window.environment_view.show_dialog()
# then
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS, \
"Seems like the dialog box is reloading environments"
def test_loading_envs(qtbot):
# given
window = show_window(qtbot)
# and (adding a few environments)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# and (save)
close_and_save_environments(qtbot, window)
# and (close app)
close_application(window)
# when
window = show_window(qtbot, clear_environments=False)
# then
env_list_combo = get_toolbar_environments_combo(window)
assert env_list_combo.count() == NO_OF_ENVIRONMENTS, \
"Environments not loaded in toolbar on fresh re-start"
# and
assert window.environment_view.lst_environments.count() == NO_OF_ENVIRONMENTS, \
"Environments not being loaded from database on a fresh re-start"
def test_discard_envs_changes_on_cancel(qtbot):
# given
window = show_window(qtbot)
# when
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# then
close_and_discard_changes(qtbot, window)
# then
environments = window.world.environment_store.get_environments()
assert len(environments) == 0
def test_discard_envs_changes_on_esc(qtbot):
# given
window = show_window(qtbot)
# when
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# then
qtbot.keyClick(window.environment_view.lst_environments, Qt.Key_Escape)
# then
environments = window.world.environment_store.get_environments()
assert len(environments) == 0
def test_refresh_toolbar_after_adding_deleting_envs(qtbot):
# given
window = show_window(qtbot)
# and (adding a few environments)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# when (click ok to save environments)
close_and_save_environments(qtbot, window)
# then (check toolbar environments)
assert get_toolbar_environments_combo(window).count() == NO_OF_ENVIRONMENTS, \
"Environments not loaded in toolbar on after Environments Dialog close"
# and (re-opening the dialog box after close)
window.environment_view.show_dialog()
# and (delete 3 and add 1 environment(s))
remove_environments(qtbot, window, NO_OF_ENVIRONMENTS_TO_DELETE)
add_environments(qtbot, window, NO_OF_ENVIRONMENTS_TO_RE_ADD)
# and (click ok to save environments)
close_and_save_environments(qtbot, window)
# then (check toolbar environments)
remaining_environments = NO_OF_ENVIRONMENTS - NO_OF_ENVIRONMENTS_TO_DELETE + NO_OF_ENVIRONMENTS_TO_RE_ADD
assert get_toolbar_environments_combo(window).count() == remaining_environments, \
"Environments not loaded in toolbar on (deleting/re-adding) after Environments Dialog close"
def test_update_currently_selected_environment(qtbot):
# given (a window with few environments)
window = show_window(qtbot)
# and
add_environments(qtbot, window, NO_OF_ENVIRONMENTS)
# and
close_and_save_environments(qtbot, window)
# when (a new environment is selected from toolbar)
toolbar_environments = get_toolbar_environments_combo(window)
toolbar_environments.setCurrentIndex(3)
selected_environment = toolbar_environments.currentText()
# and application is closed
window.toolbar_controller.trigger_quit_application()
# and window is re-opened
window = show_window(qtbot)
# then the selected environment should be same as before
toolbar_environments = get_toolbar_environments_combo(window)
selected_environment_after_restart = toolbar_environments.currentText()
assert selected_environment == selected_environment_after_restart
|
"""
Integration tests for __main__.py
"""
# pragma pylint: disable=redefined-outer-name
from click.testing import CliRunner
import pytest
from traveling_salesperson import __main__ as main
def test_main_runs(mocker, filename_fixture):
"""Ensures that main() runs smoothly over a test file."""
mock_etl = mocker.spy(main, 'etl')
mock_distance = mocker.spy(main, 'distance_matrix')
mock_path = mocker.spy(main, 'determine_path')
mock_plot = mocker.spy(main, 'plot_path')
# Test cli interface
runner = CliRunner()
result = runner.invoke(main.main, ['-f', filename_fixture])
assert result.exit_code == 0
mock_etl.assert_called_once_with(filename_fixture)
mock_distance.assert_called_once()
mock_path.assert_called_once()
mock_plot.assert_called_once()
@pytest.mark.parametrize('arg_list,error_code',
[(['-x', 'bad_arg'], 2), # Command line error
(['-m', 'de-sitter'], 2), # Command line error
(['-f', 'bad_file'], 1)]) # File not found error
def test_main_fails_with_bad_argument(arg_list, error_code):
"""Ensures that main() has an error (code -1) when run with unsupported arguments."""
runner = CliRunner()
result = runner.invoke(main.main, arg_list)
assert result.exit_code == error_code
|
#!_PYTHONLOC
#
# (C) COPYRIGHT 2020 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision: 418 $
# Date: $Date: 2019-05-15 10:10:07 -0400 (Wed, 15 May 2019) $
import cgi
import sys
import os
import string
import MySQLdb
from localdefs import *
from library import *
def Date_or_None(s):
return s
def IsfdbConvSetup():
import MySQLdb.converters
IsfdbConv = MySQLdb.converters.conversions
IsfdbConv[10] = Date_or_None
return(IsfdbConv)
if __name__ == '__main__':
db = MySQLdb.connect(DBASEHOST, USERNAME, PASSWORD, conv=IsfdbConvSetup())
db.select_db(DBASE)
query = """select sub_id, sub_data from submissions
where sub_type=%d
and sub_state='I'
and affected_record_id is null""" % MOD_TITLE_MKVARIANT
db.query(query)
result = db.store_result()
record = result.fetch_row()
while record:
sub_id = record[0][0]
sub_data = record[0][1]
doc = minidom.parseString(XMLunescape2(sub_data))
merge = doc.getElementsByTagName('MakeVariant')
record_id = GetElementValue(merge, 'Record')
print sub_id, record_id
update = "update submissions set affected_record_id = %d where sub_id = %d" % (int(record_id), int(sub_id))
db.query(update)
record = result.fetch_row()
print "Total processed: %d" % int(result.num_rows())
|
""" TODO Module docstring
"""
# Threshold value under which a float will be treated as zero
MAX_ZERO_THRESHOLD_VALUE = 1.0e-14
# Minimum integration step size, in seconds
MINIMUM_STEP_SIZE_IN_SECONDS = 1.0e-9
# Number of whole nanoseconds per second
NANOSECONDS_PER_SECOND = int(1e9)
# Number of seconds per mean solar day
SECONDS_PER_SOLAR_DAY = 86400.0
# Number of seconds per minute
SECONDS_PER_MINUTE = 60.0
# Number of seconds per hour
SECONDS_PER_HOUR = 3600.0
# Earth gravitational constant, km^3 / s^2
EARTH_MU = 3.986004418e5
|
'''data_load module is for loading individual genedocs from various data sources.'''
from __future__ import print_function
import sys
import copy
import types
import time
import datetime
import importlib
from biothings.utils.mongo import get_src_conn, get_src_dump, get_data_folder
from biothings.utils.common import get_timestamp, get_random_string, timesofar, dump2gridfs, iter_n
from config import DATA_SRC_DATABASE, DATA_SRC_MASTER_COLLECTION
__sources_dict__ = {
'entrez': [
'entrez.entrez_gene',
'entrez.entrez_homologene',
'entrez.entrez_genesummary',
'entrez.entrez_accession',
'entrez.entrez_refseq',
'entrez.entrez_unigene',
'entrez.entrez_go',
'entrez.entrez_ec',
'entrez.entrez_retired',
'entrez.entrez_generif',
'entrez.entrez_genomic_pos',
],
'ensembl': [
'ensembl.ensembl_gene',
'ensembl.ensembl_acc',
'ensembl.ensembl_genomic_pos',
'ensembl.ensembl_prosite',
'ensembl.ensembl_interpro',
'ensembl.ensembl_pfam'
],
'uniprot': [
'uniprot',
'uniprot.uniprot_pdb',
# 'uniprot.uniprot_ipi', # IPI is now discontinued, last update is still in the db, but won't be updated.
'uniprot.uniprot_pir'
],
'pharmgkb': ['pharmgkb'],
'reporter': ['reporter'],
'ucsc': ['ucsc.ucsc_exons'],
'exac': ['exac.broadinstitute_exac'],
'cpdb': ['cpdb'],
'reagent': ['reagent'],
}
__sources__ = None # should be a list defined at runtime
conn = get_src_conn()
doc_register = {}
class GeneDocSourceMaster(dict):
'''A class to manage various genedoc data sources.'''
__collection__ = DATA_SRC_MASTER_COLLECTION
__database__ = DATA_SRC_DATABASE
use_dot_notation = True
use_schemaless = True
structure = {
'name': str,
'timestamp': datetime.datetime,
}
class GeneDocSource(dict):
'''A base class for all source data.'''
__collection__ = None # should be specified individually
__database__ = DATA_SRC_DATABASE
use_dot_notation = True
use_schemaless = True
DEFAULT_FIELDTYPE = str
temp_collection = None # temp collection is for dataloading
def make_temp_collection(self):
'''Create a temp collection for dataloading, e.g., entrez_geneinfo_INEMO.'''
new_collection = None
while 1:
new_collection = self.__collection__ + '_temp_' + get_random_string()
if new_collection not in self.db.collection_names():
break
self.temp_collection = self.db[new_collection]
return new_collection
def doc_iterator(self, genedoc_d, batch=True, step=10000):
if isinstance(genedoc_d, types.GeneratorType) and batch:
for doc_li in iter_n(genedoc_d, n=step):
yield doc_li
else:
if batch:
doc_li = []
i = 0
for _id, doc in genedoc_d.items():
doc['_id'] = _id
_doc = copy.copy(self)
_doc.clear()
_doc.update(doc)
#if validate:
# _doc.validate()
if batch:
doc_li.append(_doc)
i += 1
if i % step == 0:
yield doc_li
doc_li = []
else:
yield _doc
if batch:
yield doc_li
def load(self, genedoc_d=None, update_data=True, update_master=True, test=False, step=10000):
if not self.temp_collection:
self.make_temp_collection()
self.temp_collection.drop() # drop all existing records just in case.
if update_data:
genedoc_d = genedoc_d or self.load_genedoc()
print("genedoc_d mem: %s" % sys.getsizeof(genedoc_d))
print("Uploading to the DB...", end='')
t0 = time.time()
# for doc in self.doc_iterator(genedoc_d, batch=False):
# if not test:
# doc.save()
for doc_li in self.doc_iterator(genedoc_d, batch=True, step=step):
if not test:
self.temp_collection.insert(doc_li, manipulate=False, check_keys=False)
print('Done[%s]' % timesofar(t0))
self.switch_collection()
if getattr(self, 'ENTREZ_GENEDOC_ROOT', False):
print('Uploading "geneid_d" to GridFS...', end='')
t0 = time.time()
geneid_d = self.get_geneid_d()
dump2gridfs(geneid_d, self.__collection__ + '__geneid_d.pyobj', self.db)
print('Done[%s]' % timesofar(t0))
if getattr(self, 'ENSEMBL_GENEDOC_ROOT', False):
print('Uploading "mapping2entrezgene" to GridFS...', end='')
t0 = time.time()
x2entrezgene_list = self.get_mapping_to_entrez()
dump2gridfs(x2entrezgene_list, self.__collection__ + '__2entrezgene_list.pyobj', self.db)
print('Done[%s]' % timesofar(t0))
if update_master:
# update src_master collection
if not test:
_doc = {"_id": str(self.__collection__),
"name": str(self.__collection__),
"timestamp": datetime.datetime.now()}
for attr in ['ENTREZ_GENEDOC_ROOT', 'ENSEMBL_GENEDOC_ROOT', 'id_type']:
if hasattr(self, attr):
_doc[attr] = getattr(self, attr)
if hasattr(self, 'get_mapping'):
_doc['mapping'] = getattr(self, 'get_mapping')()
coll = conn[GeneDocSourceMaster.__database__][GeneDocSourceMaster.__collection__]
dkey = {"_id": _doc["_id"]}
prev = coll.find_one(dkey)
if prev:
coll.replace_one(dkey, _doc)
else:
coll.insert_one(_doc)
def switch_collection(self):
'''after a successful loading, rename temp_collection to regular collection name,
and renaming existing collection to a temp name for archiving purpose.
'''
if self.temp_collection and self.temp_collection.count() > 0:
if self.collection.count() > 0:
# renaming existing collections
new_name = '_'.join([self.__collection__, 'archive', get_timestamp(), get_random_string()])
self.collection.rename(new_name, dropTarget=True)
self.temp_collection.rename(self.__collection__)
else:
print("Error: load data first.")
@property
def collection(self):
return self.db[self.__collection__]
#def validate_all(self, genedoc_d=None):
# """validate all genedoc_d."""
# genedoc_d = genedoc_d or self.load_genedoc()
# for doc in self.doc_iterator(genedoc_d, batch=False, validate=True):
# pass
def register_sources():
for src in __sources__:
src_m = importlib.import_module('dataload.sources.' + src)
metadata = src_m.__metadata__
name = src + '_doc'
metadata['load_genedoc'] = src_m.load_genedoc
metadata['get_mapping'] = src_m.get_mapping
if metadata.get('ENTREZ_GENEDOC_ROOT', False):
metadata['get_geneid_d'] = src_m.get_geneid_d
if metadata.get('ENSEMBL_GENEDOC_ROOT', False):
metadata['get_mapping_to_entrez'] = src_m.get_mapping_to_entrez
src_cls = type(name, (GeneDocSource,), metadata)
# manually propagate db attr
src_cls.db = conn[src_cls.__database__]
doc_register[name] = src_cls
conn.register(src_cls)
# register_sources()
def get_src(src):
_src = conn[src + '_doc']()
return _src
def load_src(src, **kwargs):
_src = doc_register[src + '_doc']()
_src.load(**kwargs)
def update_mapping(src):
_src = conn[src + '_doc']()
_src.load(update_data=False, update_master=True)
def load_all(**kwargs):
for src in __sources__:
load_src(src, **kwargs)
def get_mapping():
mapping = {}
properties = {}
for src in __sources__:
print("Loading mapping from %s..." % src)
_src = conn[src + '_doc']()
_field_properties = _src.get_mapping()
properties.update(_field_properties)
mapping["properties"] = properties
# enable _source compression
mapping["_source"] = {"enabled": True,
"compress": True,
"compression_threshold": "1kb"}
return mapping
def update_mapping():
for src in __sources__:
colname = src.split(".")[-1]
col = conn[colname]
regdoc = doc_register[src + '_doc']
mastercol = conn[GeneDocSourceMaster.__database__][GeneDocSourceMaster.__collection__]
_doc = {"_id": str(colname),
"name": str(colname),
"timestamp": datetime.datetime.now(),
"mapping" : regdoc.get_mapping(regdoc)}
print("Updating mapping for source: %s" % repr(colname))
dkey = {"_id": _doc["_id"]}
prev = mastercol.find_one(dkey)
if prev:
mastercol.replace_one(dkey, _doc)
else:
mastercol.insert_one(_doc)
def main():
'''
Example:
python -m dataload ensembl.ensembl_gene ensembl.ensembl_acc ensembl.ensembl_genomic_pos ensembl.ensembl_prosite ensembl.ensembl_interpro
python -m dataload/__init__ entrez.entrez_gene entrez.entrez_homologene entrez.entrez_genesummary
entrez.entrez_accession entrez.entrez_refseq entrez.entrez_unigene entrez.entrez_go
entrez.entrez_ec entrez.entrez_retired
'''
global __sources__
__sources__ = sys.argv[1:]
register_sources()
load_all()
if __name__ == '__main__':
main()
|
"""Rendering Related Tasks"""
from celery import shared_task
import newrelic.agent
from rendering.render_email import compose_email
from mailer.mailserver import deliver
@shared_task
def sample_email(to_address, user_id, email_id, election_id, district_ids):
"""Sample an email to an end user"""
result = compose_email(
user_id,
email_id,
election_id,
district_ids)
newrelic.agent.add_custom_parameter(
'organization_id', result['organization_id'])
newrelic.agent.add_custom_parameter(
'email_id', result['email_id'])
final_subject = u'[sample] {}'.format(result['subject'])
deliver(
to_address=to_address,
from_address=result['from_address'],
subject=final_subject,
html=result['body'])
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: idcrack_unit_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='idcrack_unit_info.proto',
package='cmdb_extend',
syntax='proto3',
serialized_options=_b('ZEgo.easyops.local/contracts/protorepo-models/easyops/model/cmdb_extend'),
serialized_pb=_b('\n\x17idcrack_unit_info.proto\x12\x0b\x63mdb_extend\x1a\x1cgoogle/protobuf/struct.proto\"m\n\x0fIdcrackUnitInfo\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12\x0c\n\x04unum\x18\x02 \x01(\x05\x12\x0c\n\x04name\x18\x03 \x01(\t\x12)\n\x08unitInfo\x18\x04 \x01(\x0b\x32\x17.google.protobuf.StructBGZEgo.easyops.local/contracts/protorepo-models/easyops/model/cmdb_extendb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_IDCRACKUNITINFO = _descriptor.Descriptor(
name='IdcrackUnitInfo',
full_name='cmdb_extend.IdcrackUnitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instance_id', full_name='cmdb_extend.IdcrackUnitInfo.instance_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unum', full_name='cmdb_extend.IdcrackUnitInfo.unum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='cmdb_extend.IdcrackUnitInfo.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unitInfo', full_name='cmdb_extend.IdcrackUnitInfo.unitInfo', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=179,
)
_IDCRACKUNITINFO.fields_by_name['unitInfo'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['IdcrackUnitInfo'] = _IDCRACKUNITINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IdcrackUnitInfo = _reflection.GeneratedProtocolMessageType('IdcrackUnitInfo', (_message.Message,), {
'DESCRIPTOR' : _IDCRACKUNITINFO,
'__module__' : 'idcrack_unit_info_pb2'
# @@protoc_insertion_point(class_scope:cmdb_extend.IdcrackUnitInfo)
})
_sym_db.RegisterMessage(IdcrackUnitInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from Jumpscale import j
class Package(j.baseclasses.threebot_package):
def prepare(self):
"""
is called at install time
:return:
"""
pass
def start(self):
"""
called when the 3bot starts
:return:
"""
## TODO: BAD
# self.db.models_add(path=self.package_root + "/models")
# self.gedis_server.actors_add(j.sal.fs.joinPaths(self.package_root, "actors"))
server = self.openresty
website = server.get_from_port(443)
locations = website.locations.get("threebotapp_locations")
website_location = locations.locations_spa.new()
website_location.name = "capacity"
website_location.path_url = "/capacity"
# website_location.use_jumpscale_weblibs = False
fullpath = j.sal.fs.joinPaths(self.package_root, "html/")
website_location.path_location = fullpath
locations.configure()
website.configure()
def stop(self):
"""
called when the 3bot stops
:return:
"""
pass
def uninstall(self):
"""
called when the package is no longer needed and will be removed from the threebot
:return:
"""
# TODO: clean up bcdb ?
pass
|
from django.apps import AppConfig
class Classworkapp1Config(AppConfig):
name = 'classworkApp1'
|
#!/usr/bin/python
import sys
def to_claim(line):
cid, _, location, dimensions = line.split()
cid = int(cid[1:])
x, y = map(int, location[:-1].split(','))
w, h = map(int, dimensions.split('x'))
return cid, x, y, w, h
claims = map(to_claim, sys.stdin)
# build bitmap
bitmap = [None] * (1000 * 1000)
for cid, x0, y0, w, h in claims:
for x in range(x0, x0 + w):
for y in range(y0, y0 + h):
i = y * 1000 + x
bitmap[i] = cid if bitmap[i] is None else 'X'
# counts
counts = {}
for v in bitmap:
count = counts.setdefault(v, 0)
counts[v] = count + 1
# part 1
print counts['X']
# part 2
for cid, _, _, w, h in claims:
if counts.get(cid, 0) == w * h:
print cid
break
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .util import load_module
class TermParserFactory(object):
@staticmethod
def build_from_conf(conf):
args = {k: conf[k] for k in ['default_fields', 'aliases', 'integer_as_string'] if k in conf}
return TermParser(**args) if not 'class' in conf else load_module(conf['class'])(**args)
@staticmethod
def build_default():
return TermParser()
class TermParser(object):
"""
Parse and build a term from the grammar matches. A Term represents a query component that can have a specific field
to look for, or a default one, a field type, the value required for that field and the type of value.
TermParser defines methods to be used in combination with :class:Grammar as the callbacks for the pyparsing
setParseAction method.
Callback parameters are always:
- matched string from query string
- position of the match
- pyparsing token list
"""
def __init__(self, default_fields=['default'], aliases=None, integer_as_string=False):
self._default_fields = default_fields
self._field_name_aliases = aliases if aliases else {}
self._integers_as_string = integer_as_string
def _build_field_data(self, field_values, field_type):
return {Term.FIELD: field_values, Term.FIELD_TYPE: field_type}
def _build_value_data(self, value, value_type):
return {Term.VAL: value, Term.VAL_TYPE: value_type}
def _build_term_with_default_fields(self, value_dict):
default_fields = self._default_fields[0] if len(self._default_fields) == 1 else self._default_fields
r = self._build_field_data(default_fields, Term.DEFAULT)
r.update(value_dict)
return r
@property
def aliases(self):
return self._field_name_aliases
def term_parse(self, string, location, tokens):
"""
Term parse receives a list with the components of a query term, the fields to look for and the desired value.
Those components are expanded by field_parse and integer_parse r whatever value is matched, to a dictionary
specifying the field_type and field_value as well as value_type and value. Thus, tokens[0] contains one element
for the field data, and another for the value data. If there's only one item, it means no field was specified only
a value, and so we treat it as a default field which can be configured to be expanded to several fields.
If tokens[0] has 2 elements:
> tokens[0][0]: field dict
> tokens[0][1]: value dict
If tokens[0] has 1 element:
> tokens[0][0]: value dict
"""
if tokens:
if len(tokens[0]) == 1: # If there was no field specified, use the default
r = self._build_term_with_default_fields(tokens[0][0])
else:
r = tokens[0][0]
r.update(tokens[0][1])
return Term(**r)
def keyword_parse(self, string=None, location=None, tokens=None):
"""
Keywords are defined externally and so values are restricted to the ones accepted/defined. They are treated as
strings always and so the parsing method receives a token list with <keyword>, <separator>, <value>
> ej: has:notification => token list would be ['has', ':', 'notification']
"""
if tokens:
fields = [f for f in "".join(tokens).split(":") if f]
output = self._build_field_data(fields[0], Term.KEYWORD)
output.update(self._build_value_data(fields[1], Term.KEYWORD_VALUE))
return output
def field_parse(self, string, location, tokens):
"""
Fields are whatever comes before a separator and they are usually use for attribute/property matching. The value
of a field is parsed separately form the field name and it depends on the definition of the grammar and the
accepted/supported values. Thus this method receives a token list with <field name> <separator>.
If combined or nested fields are allowed, the pattern would be:
<field name> <separator> <field name> <separator> ...
> ej: address:zip:ABC1234 => token list would be ['address', ':', 'zip']
"""
if tokens:
fields = [f for f in "".join(tokens).split(":") if f]
t = fields if len(fields) > 1 else fields[0]
field_value = self._field_name_aliases.get(t, t)
return self._build_field_data(field_value, Term.ATTRIBUTE)
def integer_parse(self, string, location, tokens):
if tokens:
r = self._build_value_data(int(tokens[0]), Term.INT)
if self._integers_as_string:
r[Term.VAL_TYPE] = Term.PARTIAL_STRING
r[Term.VAL] = str(r[Term.VAL])
return r
def integer_comparison_parse(self, string, location, tokens):
if tokens:
val = int(tokens[1]) if not self._integers_as_string else tokens[1]
for symbol, value_type in [('<', Term.LOWER_THAN), ('<=', Term.LOWER_EQUAL_THAN),
('>', Term.GREATER_THAN), ('>=', Term.GREATER_EQUAL_THAN)]:
if tokens[0] == symbol:
return self._build_value_data(val, value_type)
raise Exception("Invalid comparison symbol!") # should never get here since pyparsing would fail before
def quoted_string_parse(self, string, location, tokens):
if tokens:
return self._build_value_data(tokens[0], Term.EXACT_STRING if '*' not in tokens[0] else Term.PARTIAL_STRING)
def partial_string_parse(self, string, location, tokens):
if tokens:
return self._build_value_data(tokens[0], Term.PARTIAL_STRING)
def range_parse(self, string, location, tokens):
if tokens:
return self._build_value_data([tokens[0][Term.VAL], tokens[2][Term.VAL]],
Term.RANGE % tokens[0][Term.VAL_TYPE])
class Term(dict):
# value types
RANGE = "%s_range"
INT = 'int'
EXACT_STRING = 'exact_string'
PARTIAL_STRING = 'partial_string'
KEYWORD_VALUE = 'keyword_value'
GREATER_THAN = 'greater_than'
GREATER_EQUAL_THAN = 'greater_equal_than'
LOWER_THAN = 'lower_than'
LOWER_EQUAL_THAN = 'lower_equal_than'
# field types
KEYWORD = 'keyword'
DEFAULT = 'default'
ATTRIBUTE = 'attribute'
# term keys
FIELD = 'field'
FIELD_TYPE = 'field_type'
VAL = 'val'
VAL_TYPE = 'val_type'
def __getattr__(self, key):
if key in self:
return self[key]
else:
raise AttributeError("Term doesn't have attribute '%s'" % key)
@property
def field(self):
return self[self.FIELD] if self.FIELD in self else None
@property
def field_type(self):
return self[self.FIELD_TYPE] if self.FIELD_TYPE in self else None
@property
def value(self):
return self[self.VAL] if self.VAL in self else None
@property
def value_type(self):
return self[self.VAL_TYPE] if self.VAL_TYPE in self else None
|
from django.contrib.postgres.fields import JSONField
from django.db import models
from django_pgviews import view as pgviews
from cove.input.models import SuppliedData
from .bluetail_models import Flag
class OCDSPackageDataJSON(models.Model):
"""
Model to store OCDS JSON package data.
"""
package_data = JSONField(null=True)
supplied_data = models.ForeignKey(SuppliedData, on_delete=None, null=True)
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_package_data_json'
class OCDSPackageData(pgviews.View):
"""
Model to store OCDS JSON package data.
"""
package_data = JSONField()
supplied_data = models.ForeignKey(SuppliedData, on_delete=None)
uri = models.TextField()
publishedDate = models.DateTimeField()
publisher = JSONField()
publisher_uid = models.TextField()
publisher_uri = models.TextField()
publisher_name = models.TextField()
publisher_scheme = models.TextField()
extensions = JSONField()
sql = """
SELECT
package.id,
package.supplied_data_id,
package.package_data ->> 'uri' as uri,
package.package_data ->> 'license' as license,
package.package_data ->> 'version' as version,
package.package_data ->> 'publishedDate' as publishedDate,
package.package_data ->> 'publicationPolicy' as publicationPolicy,
package.package_data -> 'packages' as packages,
package.package_data -> 'publisher' as publisher,
package.package_data -> 'publisher' ->> 'uid' as publisher_uid,
package.package_data -> 'publisher' ->> 'uri' as publisher_uri,
package.package_data -> 'publisher' ->> 'name' as publisher_name,
package.package_data -> 'publisher' ->> 'scheme' as publisher_scheme,
package.package_data -> 'extensions' as extensions
FROM bluetail_ocds_package_data_json package
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_package_data_view'
managed = False
class OCDSRecordJSON(models.Model):
"""
Model to store OCDS JSON records.
"""
ocid = models.TextField(primary_key=True)
record_json = JSONField()
package_data = models.ForeignKey(OCDSPackageDataJSON, on_delete=None, null=True)
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_record_json'
verbose_name_plural = 'OCDS JSON Records'
class OCDSReleaseJSON(pgviews.View):
"""
Model to store OCDS JSON releases.
OCID must be unique so multiple releases for a single OCID should be compiled before insertion.
"""
ocid = models.TextField(primary_key=True)
release_id = models.TextField()
release_json = JSONField()
package_data = models.ForeignKey(OCDSPackageDataJSON, on_delete=None, null=True)
sql = """
SELECT
ocds.ocid,
ocds.record_json -> 'compiledRelease' ->> 'id' as release_id,
ocds.record_json -> 'compiledRelease' as release_json,
ocds.package_data_id
FROM bluetail_ocds_record_json ocds
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_release_json_view'
managed = False
class OCDSTender(pgviews.View):
"""
django-pg-views for extracting Tender details from an OCDSReleaseJSON object
Tender as from an OCDS version 1.1 release
https://standard.open-contracting.org/latest/en/schema/reference/#tender
"""
# projection = ['bluetail.OCDSReleaseJSON.*', ]
# dependencies = ['bluetail.OtherView',]
ocid = models.TextField(primary_key=True)
release_id = models.TextField()
release_json = JSONField()
package_data_id = models.TextField()
title = models.TextField()
description = models.TextField()
value = models.FloatField()
currency = models.TextField()
release_date = models.DateTimeField()
tender_startdate = models.DateTimeField()
tender_enddate = models.DateTimeField()
buyer = models.TextField()
buyer_id = models.TextField()
sql = """
SELECT
ocds.ocid,
ocds.release_id,
ocds.release_json,
ocds.package_data_id,
ocds.release_json -> 'tag' as release_tag,
ocds.release_json ->> 'language' AS language,
ocds.release_json -> 'tender' ->> 'title' AS title,
ocds.release_json -> 'tender' ->> 'description' AS description,
ocds.release_json -> 'tender' -> 'value' ->> 'amount' AS value,
ocds.release_json -> 'tender' -> 'value' ->> 'currency' AS currency,
cast(NULLIF(ocds.release_json ->> 'date', '') AS TIMESTAMPTZ) AS release_date,
cast(NULLIF(ocds.release_json -> 'tender' -> 'tenderPeriod' ->> 'startDate', '') AS TIMESTAMPTZ) AS tender_startdate,
cast(NULLIF(ocds.release_json -> 'tender' -> 'tenderPeriod' ->> 'endDate', '') AS TIMESTAMPTZ) AS tender_enddate,
ocds.release_json -> 'buyer' ->> 'name' AS buyer,
ocds.release_json -> 'buyer' ->> 'id' AS buyer_id
FROM bluetail_ocds_release_json_view ocds
"""
@property
def flags(self):
return Flag.objects.filter(flagattachment__ocid=self.ocid)
@property
def total_warnings(self):
return self.flags.filter(flag_type="warning").count()
@property
def total_errors(self):
return self.flags.filter(flag_type="error").count()
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_tender_view'
managed = False
class OCDSTenderer(pgviews.View):
"""
View for extracting Party details from an OCDSReleaseJSON object
Parties as from an OCDS version 1.1 release in
https://standard.open-contracting.org/latest/en/schema/reference/#parties
"""
# dependencies = ['bluetail.OtherView',]
# projection = ['bluetail.OCDSReleaseJSON.ocid', ]
ocid = models.TextField(primary_key=True)
release_json = JSONField()
party_json = JSONField()
party_id = models.TextField()
party_role = models.TextField()
party_identifier_scheme = models.TextField()
party_identifier_id = models.TextField()
party_legalname = models.TextField()
party_name = models.TextField()
party_countryname = models.TextField()
contact_name = models.TextField()
sql = """
SELECT
ocds.ocid,
ocds.release_id,
ocds.release_json,
party as party_json,
role AS party_role,
party ->> 'id' as party_id,
party -> 'identifier' ->> 'scheme' as party_identifier_scheme,
party -> 'identifier' ->> 'id' as party_identifier_id,
party -> 'identifier' ->> 'legalName' as party_legalname,
party -> 'address' ->> 'countryName' as party_countryname,
party ->> 'name' party_name,
party -> 'contactPoint' ->> 'name' as contact_name
FROM
bluetail_ocds_release_json_view ocds,
LATERAL jsonb_array_elements(ocds.release_json -> 'parties') party,
LATERAL jsonb_array_elements_text(party -> 'roles') role
WHERE role = 'tenderer'
"""
class Meta:
app_label = 'bluetail'
db_table = 'bluetail_ocds_tenderers_view'
managed = False
|
#
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions for tool scripts.
"""
import logging
import os
import time
from io import StringIO
import OpenSSL
from ganeti import constants
from ganeti import errors
from ganeti import pathutils
from ganeti import utils
from ganeti import serializer
from ganeti import ssconf
from ganeti import ssh
def VerifyOptions(parser, opts, args):
"""Verifies options and arguments for correctness.
"""
if args:
parser.error("No arguments are expected")
return opts
def _VerifyCertificateStrong(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
"""Verifies a certificate against the local node daemon certificate.
Includes elaborate tests of encodings etc., and returns formatted
certificate.
@type cert_pem: string
@param cert_pem: Certificate and key in PEM format
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise error_fn("(stdin) Unable to load certificate: %s" % err)
try:
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
raise error_fn("(stdin) Unable to load private key: %s" % err)
# Check certificate with given key; this detects cases where the key given on
# stdin doesn't match the certificate also given on stdin
try:
utils.X509CertKeyCheck(cert, key)
except OpenSSL.SSL.Error:
raise error_fn("(stdin) Certificate is not signed with given key")
# Standard checks, including check against an existing local certificate
# (no-op if that doesn't exist)
_check_fn(cert)
key_encoded = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
cert_encoded = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert)
complete_cert_encoded = key_encoded + cert_encoded
if not cert_pem == complete_cert_encoded.decode('ascii'):
logging.error("The certificate differs after being reencoded. Please"
" renew the certificates cluster-wide to prevent future"
" inconsistencies.")
# Format for storing on disk
buf = StringIO()
buf.write(cert_pem)
return buf.getvalue()
def _VerifyCertificateSoft(cert_pem, error_fn,
_check_fn=utils.CheckNodeCertificate):
"""Verifies a certificate against the local node daemon certificate.
@type cert_pem: string
@param cert_pem: Certificate in PEM format (no key)
"""
try:
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except OpenSSL.crypto.Error as err:
pass
else:
raise error_fn("No private key may be given")
try:
cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
except Exception as err:
raise errors.X509CertError("(stdin)",
"Unable to load certificate: %s" % err)
_check_fn(cert)
def VerifyCertificateSoft(data, error_fn, _verify_fn=_VerifyCertificateSoft):
"""Verifies cluster certificate if existing.
@type data: dict
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
cert = data.get(constants.SSHS_NODE_DAEMON_CERTIFICATE)
if cert:
_verify_fn(cert, error_fn)
def VerifyCertificateStrong(data, error_fn,
_verify_fn=_VerifyCertificateStrong):
"""Verifies cluster certificate. Throws error when not existing.
@type data: dict
@type error_fn: callable
@param error_fn: function to call in case of an error
@rtype: string
@return: Formatted key and certificate
"""
cert = data.get(constants.NDS_NODE_DAEMON_CERTIFICATE)
if not cert:
raise error_fn("Node daemon certificate must be specified")
return _verify_fn(cert, error_fn)
def VerifyClusterName(data, error_fn, cluster_name_constant,
_verify_fn=ssconf.VerifyClusterName):
"""Verifies cluster name.
@type data: dict
"""
name = data.get(cluster_name_constant)
if name:
_verify_fn(name)
else:
raise error_fn("Cluster name must be specified")
return name
def VerifyHmac(data, error_fn):
"""Verifies the presence of the hmac secret.
@type data: dict
"""
hmac = data.get(constants.NDS_HMAC)
if not hmac:
raise error_fn("Hmac key must be provided")
return hmac
def LoadData(raw, data_check):
"""Parses and verifies input data.
@rtype: dict
"""
result = None
try:
result = serializer.LoadAndVerifyJson(raw, data_check)
logging.debug("Received data: %s", serializer.DumpJson(result))
except Exception as e:
logging.warn("Received data is not valid json: %s.", str(raw))
raise e
return result
def GenerateRootSshKeys(key_type, key_bits, error_fn, _suffix="",
_homedir_fn=None):
"""Generates root's SSH keys for this node.
"""
ssh.InitSSHSetup(key_type, key_bits, error_fn=error_fn,
_homedir_fn=_homedir_fn, _suffix=_suffix)
def GenerateClientCertificate(
data, error_fn, client_cert=pathutils.NODED_CLIENT_CERT_FILE,
signing_cert=pathutils.NODED_CERT_FILE):
"""Regenerates the client certificate of the node.
@type data: string
@param data: the JSON-formated input data
"""
if not os.path.exists(signing_cert):
raise error_fn("The signing certificate '%s' cannot be found."
% signing_cert)
# TODO: This sets the serial number to the number of seconds
# since epoch. This is technically not a correct serial number
# (in the way SSL is supposed to be used), but it serves us well
# enough for now, as we don't have any infrastructure for keeping
# track of the number of signed certificates yet.
serial_no = int(time.time())
# The hostname of the node is provided with the input data.
hostname = data.get(constants.NDS_NODE_NAME)
if not hostname:
raise error_fn("No hostname found.")
utils.GenerateSignedSslCert(client_cert, serial_no, signing_cert,
common_name=hostname)
|
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
from meraki.api_helper import APIHelper
from meraki.configuration import Configuration
from meraki.controllers.base_controller import BaseController
from meraki.http.auth.custom_header_auth import CustomHeaderAuth
class SAMLRolesController(BaseController):
"""A Controller to access Endpoints in the meraki API."""
def get_organization_saml_roles(self,
organization_id):
"""Does a GET request to /organizations/{organizationId}/samlRoles.
List the SAML roles for this organization
Args:
organization_id (string): TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=organization_id)
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': organization_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def create_organization_saml_role(self,
options=dict()):
"""Does a POST request to /organizations/{organizationId}/samlRoles.
Create a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
create_organization_saml_role --
CreateOrganizationSamlRoleModel -- TODO: type
description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('create_organization_saml_role')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def get_organization_saml_role(self,
options=dict()):
"""Does a GET request to /organizations/{organizationId}/samlRoles/{id}.
Return a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
id -- string -- TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def update_organization_saml_role(self,
options=dict()):
"""Does a PUT request to /organizations/{organizationId}/samlRoles/{id}.
Update a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
id -- string -- TODO: type description here. Example:
update_organization_saml_role --
UpdateOrganizationSamlRoleModel -- TODO: type
description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('update_organization_saml_role')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def delete_organization_saml_role(self,
options=dict()):
"""Does a DELETE request to /organizations/{organizationId}/samlRoles/{id}.
Remove a SAML role
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
organization_id -- string -- TODO: type description here.
Example:
id -- string -- TODO: type description here. Example:
Returns:
void: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(organization_id=options.get("organization_id"),
id=options.get("id"))
# Prepare query URL
_url_path = '/organizations/{organizationId}/samlRoles/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'organizationId': options.get('organization_id', None),
'id': options.get('id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.http_client.delete(_query_url)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
|
class Credential:
'''
Class that generates instances of a users credentials
'''
# Empty list of credentials
credential_list = []
def __init__(self, user_password, credential_name, credential_password):
'''
__init__ method to define the properties of a User object
Args:
credential_name : name of an account
user_password : password of the user
credential_password : password for the user account
'''
self.user_password = user_password
self.credential_name = credential_name
self.credential_password = credential_password
def save_credential(self):
'''
Method that saves a user's credentials to credential list
'''
Credential.credential_list.append(self)
@classmethod
def generate_password(cls):
'''
Method that generates a random alphanumeric password
'''
# Length of the generated password
size = 8
# Generate random alphanumeric
alphanum = string.ascii_uppercase + string.digits + string.ascii_lowercase
# Create password
password = ''.join( choice(alphanum) for num in range(size) )
return password
@classmethod
def display_credential(cls,password):
'''
Method that returns the credential list
Args:
password : the user password
'''
user_credential_list = []
for credential in cls.credential_list:
if credential.user_password == password:
user_credential_list.append(credential)
return user_credential_list
@classmethod
def credential_exist(cls, name):
'''
Method that checks if a credential exists in the credential list
Args:
name: name of the credential to search
Returns:
Boolean: true or false depending if the contact exists
'''
for credential in cls.credential_list:
if credential.credential_name == name:
return True
return False
|
import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable
from brancher import inference
import brancher.functions as BF
# Probabilistic model #
T = 100
nu = LogNormalVariable(0.3, 1., 'nu')
x0 = NormalVariable(0., 1., 'x0')
b = BetaVariable(0.5, 1.5, 'b')
x = [x0]
names = ["x0"]
for t in range(1,T):
names.append("x{}".format(t))
x.append(NormalVariable(b * x[t - 1], nu, names[t]))
AR_model = ProbabilisticModel(x)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[xt].cpu().detach().numpy()) for xt in x]
true_b = data[b].cpu().detach().numpy()
true_nu = data[nu].cpu().detach().numpy()
print("The true coefficient is: {}".format(float(true_b)))
# Observe data #
[xt.observe(data[xt][:, 0, :]) for xt in x]
# Variational distribution #
Qnu = LogNormalVariable(0.5, 1., "nu", learnable=True)
Qb = BetaVariable(0.5, 0.5, "b", learnable=True)
variational_posterior = ProbabilisticModel([Qb, Qnu])
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=200,
number_samples=300,
optimizer='Adam',
lr=0.05)
loss_list = AR_model.diagnostics["loss curve"]
# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
nu_posterior_samples = posterior_samples[nu].cpu().detach().numpy().flatten()
b_posterior_samples = posterior_samples[b].cpu().detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))
print("The estimated coefficient is: {} +- {}".format(b_mean, b_sd))
# Two subplots, unpack the axes array immediately
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
ax1.plot(time_series)
ax1.set_title("Time series")
ax2.plot(np.array(loss_list))
ax2.set_title("Convergence")
ax2.set_xlabel("Iteration")
ax3.hist(b_posterior_samples, 25)
ax3.axvline(x=true_b, lw=2, c="r")
ax3.set_title("Posterior samples (b)")
ax3.set_xlim(0,1)
ax4.hist(nu_posterior_samples, 25)
ax4.axvline(x=true_nu, lw=2, c="r")
ax4.set_title("Posterior samples (nu)")
plt.show()
|
##########################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
##########################################################################################
"""Implementation of NLO beam_factorization currents. These are the PDF counterterms as well
as the integrated initial state collinear counterterms."""
import os
import math
from madgraph.core.base_objects import EpsilonExpansion
import madgraph.various.misc as misc
import commons.utils as utils
import commons.QCD_local_currents as currents
import commons.factors_and_cuts as factors_and_cuts
from commons.integrated_current_expressions import HE
pjoin = os.path.join
CurrentImplementationError = utils.CurrentImplementationError
log = math.log
pi = math.pi
# All counterterms here adopt a xi-dependent distribution of the following form:
#
# Counterterm(xi) = F_+(xi) + [F] \delta(xi-1)
# (which can also be explicitely written)
# Counterterm(xi) = F(xi) + {F(xi)} \delta(xi-1) + [F] \delta(xi-1)
#
# where 'F' can either be a PDF counterterm or an interated collinear ISR counterterm.
# Then each piece of the distribution is assigned a different value for its attribute
# 'distribution_type' as follows:
#
# F(xi) --> distribution_type = 'bulk'
# {F(xi)} --> distribution_type = 'counterterm'
# [F(xi)] --> distribution_type = 'endpoint'
#=========================================================================================
# PDF Counterterm
#=========================================================================================
class QCD_beam_factorization_F0(currents.QCDBeamFactorizationCurrent):
"""Implements the NLO QCD PDF counterterm of type F(xi)"""
distribution_types_implemented_in_this_class = ['bulk','counterterm','endpoint']
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure').substructures[0]
# Check that it involves exactly one F structure with one leg.
if len(ss.substructures)==0:
factorization_structure = ss
elif len(ss.substructures)==1 and len(ss.substructures[0].substructures)==0:
factorization_structure = ss.substructures[0]
else:
return None
if factorization_structure.name() != 'F':
return None
if len(factorization_structure.legs) != 1:
return None
# Make sure the one leg of the F structure is initial-state
if not cls.is_initial(factorization_structure.legs[0]):
return None
# The current is valid (remember that this implements the PDF counterterm of
# all possible incoming flavors.
return init_vars
def evaluate_kernel(self, PS_point, process, xi, mu_r, mu_f, Q, normalization,
allowed_backward_evolved_flavors='ALL'):
""" Return an instance of BeamFactorizationCurrentEvaluation, whose 'values' entry
are dictionaries specifying the counterterm in flavor space, for the value of xi
specified in argument."""
if allowed_backward_evolved_flavors != 'ALL':
raise CurrentImplementationError('The current %s must always be called with'%self.__class__.__name__+
"allowed_backward_evolved_flavors='ALL', not %s"%str(allowed_backward_evolved_flavors))
# Only the order epsilon of the scales pre-factor matters here.
prefactor = EpsilonExpansion({
0 : 1.,
1 : log(mu_r**2 / mu_f**2)
})
prefactor *= EpsilonExpansion({-1:1.})*normalization
# Assign a fake xi for now if the distribution type is 'endpoint'
# TODO: this is not optimal, eventually we should put each of these three pieces in
# separate currents
if self.distribution_type == 'endpoint':
xi = 0.5
# Define the NLO QCD PDF counterterms kernels
kernel_gg = {
'bulk' : prefactor*(
2.*self.CA*( 1./ (1.-xi) + (1.-xi)/xi -1. + xi*(1-xi) )
),
'counterterm' : prefactor*( 2.*self.CA / (1.-xi) ),
'endpoint' : prefactor*( 11./6.*self.CA - 2./3.*self.NF*self.TR)
}
kernel_gq = {
'bulk' : prefactor*( self.CF*(1.+(1.-xi)**2)/xi ),
'counterterm' : None,
'endpoint' : None
}
kernel_qg = {
'bulk' : prefactor*( self.TR*(xi**2 + (1.-xi)**2) ),
'counterterm' : None,
'endpoint' : None
}
kernel_qq = {
'bulk' : prefactor*( self.CF*((1.+xi**2)/(1.-xi)) ),
'counterterm' : prefactor*( self.CF*((1.+xi**2)/(1.-xi)) ),
'endpoint' : None
}
active_quark_PDGs = tuple([pdg for pdg in range(1,7)+range(-1,-7,-1)
if pdg in self.beam_PDGs])
# Build the NLO flavor matrix
flavor_matrix = {}
for reduced_flavor in self.beam_PDGs:
# Gluon backward evolution
if reduced_flavor==21:
gluon_dict = {}
if kernel_gg[self.distribution_type] is not None:
gluon_dict[(21,)] = kernel_gg[self.distribution_type]
if active_quark_PDGs and kernel_gq[self.distribution_type] is not None:
gluon_dict[active_quark_PDGs] = kernel_gq[self.distribution_type]
if gluon_dict:
flavor_matrix[21] = gluon_dict
# Quark backward evolution
if reduced_flavor in active_quark_PDGs:
quark_dict = {}
if kernel_qg[self.distribution_type] is not None:
quark_dict[(21,)] = kernel_qg[self.distribution_type]
if kernel_qq[self.distribution_type] is not None:
quark_dict[(reduced_flavor,)] = kernel_qq[self.distribution_type]
if quark_dict:
flavor_matrix[reduced_flavor] = quark_dict
# Truncate all entries of the flavor matrix so as to remove irrelevant O(\eps) terms
for flav_in, flav_outs in flavor_matrix.items():
for flav_out, eps_expansion in flav_outs.items():
eps_expansion.truncate(max_power=0)
# Now assign the flavor matrix in the BeamFactorizationCurrentEvaluation instance
# If this is a physical contribution (i.e. not a counterterm) then we must enforce that
# the reduced kinematics is None as it will not even be read by MadNkLO.
evaluation = utils.BeamFactorizationCurrentEvaluation({
'spin_correlations' : [None,],
'color_correlations' : [None,],
'values' : { (0,0) : flavor_matrix }
})
return evaluation
#=========================================================================================
# PDF integrated initial-state single collinear counterterm
#=========================================================================================
class QCD_beam_factorization_single_collinear(currents.QCDBeamFactorizationCurrent):
"""Implements the NLO QCD initial-state single collinear integratated counterterm of type F(xi)"""
distribution_types_implemented_in_this_class = ['bulk','counterterm','endpoint']
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure').substructures[0]
# Check that it involves exactly one collinear structure with two legs.
if len(ss.substructures)!=1:
return None
collinear_structure = ss.substructures[0]
if collinear_structure.name() != 'C':
return None
if len(collinear_structure.legs) != 2:
return None
# Make sure that one of the two legs of the C structure is initial-state
if not any(cls.is_initial(leg) for leg in collinear_structure.legs):
return None
# The current is valid (remember that this implements the integrated
# initial state collinear counterterm of all possible incoming flavors.
return init_vars
def evaluate_kernel(self, PS_point, process, xi, mu_r, mu_f, Q, normalization,
allowed_backward_evolved_flavors='ALL'):
""" Return an instance of BeamFactorizationCurrentEvaluation, whose 'values' entry
are dictionaries specifying the counterterm in flavor space, for the value of xi
specified in argument."""
# Obtain Q_square.
Q_square = Q.square()
# Only up to the order epsilon^2 of the scales prefactor matters here.
logMuQ = log(mu_r**2/Q_square)
prefactor = EpsilonExpansion({ 0 : 1., 1 : logMuQ, 2 : 0.5*logMuQ**2 })
prefactor *= normalization
# The additional 1/x part of the prefactor is included later during the PDF
# convolution of the event (using its 'Bjorken rescaling' attribute) because
# we must make sure that the plus distribution hits on it.
# Also, the same 1/x appears in the PDF counterterms as a result of the change
# of variable necessary to bring them in the form where the plus distribution
# only acts on the PDF. So it makes sense to keep it completely factorised.
# Input variables
y_0 = factors_and_cuts.y_0_prime
logy0 = log(y_0)
# Assign a fake x for now if the distribution type is 'endpoint'
# TODO: this is not optimal, eventually we should put each of these three pieces in
# separate currents
if self.distribution_type == 'endpoint':
x = 0.5
else:
x = xi
# In MadNkLO, we use the change of variable xb' = xb*xi so that the factor
# (Q^2)^\eps in Eq. 5.21 of https://arxiv.org/pdf/0903.1218.pdf actually reads
# (Q^2/(xi1*xi2))^\eps and the '+' distributions also act on it, which we realize
# by simply multiplying the Q^2 provided by the xi factor that must be set to one.
logMuQ_plus = log(mu_r**2/(Q_square*x))
prefactor_plus = EpsilonExpansion({ 0 : 1., 1 : logMuQ_plus, 2 : 0.5*logMuQ_plus**2 })
prefactor_plus *= normalization
log1mx = log(1.-x)
# Heaviside
theta_x_1my0 = 1. if (x-(1-y_0)) >= 0. else 0.
theta_1my0_x = 1. if ((1-y_0)-x) >= 0. else 0.
# Define the NLO QCD integrate initial-state single collinear counterterms kernels
color_factor = self.CA
kernel_gg = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -2.*( 1./(1.-x) + (1.-x)/x - 1 + x*(1-x) ),
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) + (2.*logy0/(1.-x))*theta_1my0_x
+ 2.*( ((1.-x)/x) -1. + x*(1.-x) )*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x )
})),
'counterterm' : prefactor_plus*color_factor*(EpsilonExpansion({
-1 : -2.* ( 1./(1.-x) ) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) ,
})),
'endpoint' : prefactor*color_factor*(EpsilonExpansion({
-2 : 1. ,
-1 : 0. ,
0 : -(math.pi**2/6.) + logy0**2
}))
}
color_factor = self.CA
kernel_gq = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -(self.CF/self.CA)*(1.+(1.-x)**2) / x ,
0 : (self.CF/self.CA)*( ((1.+(1.-x)**2)/x)*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x ) + x )
})),
'counterterm' : None,
'endpoint' : None
}
color_factor = self.CF
kernel_qg = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -(self.TR/self.CF)*(x**2+(1.-x)**2) ,
0 : (self.TR/self.CF)*( (x**2 + (1.-x)**2)*( log1mx*(1.+theta_x_1my0) + logy0*theta_1my0_x ) + 2.*x*(1.-x) )
})),
'counterterm' : None,
'endpoint' : None
}
color_factor = self.CF
kernel_qq = {
'bulk' : prefactor*color_factor*(EpsilonExpansion({
-1 : -((1.+x**2)/(1.-x)) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) + (2.*logy0/(1.-x))*theta_1my0_x
- ( (1.+x)*( log1mx*(1.+theta_x_1my0)+logy0*theta_1my0_x ) -1.+x )
})),
'counterterm' : prefactor_plus*color_factor*(EpsilonExpansion({
-1 : -((1.+x**2)/(1.-x)) ,
0 : (2.*log1mx / (1.-x))*(1.+theta_x_1my0) ,
})),
'endpoint' : prefactor*color_factor*(EpsilonExpansion({
-2 : 1. ,
-1 : 3./2. ,
0 : -(math.pi**2/6.) + logy0**2
}))
}
active_quark_PDGs = tuple([pdg for pdg in range(1,7)+range(-1,-7,-1)
if pdg in self.beam_PDGs])
# Build the NLO flavor matrix
flavor_matrix = {}
for reduced_flavor in self.beam_PDGs:
# Gluon backward evolution
if reduced_flavor==21:
gluon_dict = {}
if kernel_gg[self.distribution_type] is not None:
gluon_dict[(21,)] = kernel_gg[self.distribution_type]
if active_quark_PDGs and kernel_gq[self.distribution_type] is not None:
gluon_dict[active_quark_PDGs] = kernel_gq[self.distribution_type]
if gluon_dict:
flavor_matrix[21] = gluon_dict
# Quark backward evolution
if reduced_flavor in active_quark_PDGs:
quark_dict = {}
if kernel_qg[self.distribution_type] is not None:
quark_dict[(21,)] = kernel_qg[self.distribution_type]
if kernel_qq[self.distribution_type] is not None:
quark_dict[(reduced_flavor,)] = kernel_qq[self.distribution_type]
if quark_dict:
flavor_matrix[reduced_flavor] = quark_dict
# Truncate all entries of the flavor matrix so as to remove irrelevant O(\eps) terms
for flav_in, flav_outs in flavor_matrix.items():
for flav_out, eps_expansion in flav_outs.items():
eps_expansion.truncate(max_power=0)
# Now apply the mask 'allowed_backward_evolved_flavors' if not set to 'ALL'
filtered_flavor_matrix = self.apply_flavor_mask(flavor_matrix,allowed_backward_evolved_flavors)
# Now assign the flavor matrix in the BeamFactorizationCurrentEvaluation instance
evaluation = utils.BeamFactorizationCurrentEvaluation({
'spin_correlations' : [None,],
'color_correlations' : [None,],
'values' : { (0,0) : filtered_flavor_matrix }
})
return evaluation
#=========================================================================================
# PDF integrated initial-state single soft-collinear counterterm
#=========================================================================================
class QCD_beam_factorization_single_softcollinear(currents.QCDBeamFactorizationCurrent):
"""Implements the NLO QCD initial-state single soft-collinear integgratated counterterm
of type F(xi). These are zero here since they have already been accounted for
in the soft counterterms."""
distribution_types_implemented_in_this_class = ['bulk','counterterm','endpoint']
# These integrated contributions are not really directly related to the physical
# properties of beam factorization (for instance they don't act on the flavor space) and
# therefore apply independely of it.
beam_types_implemented_in_this_class = 'ALL'
beam_PDGs_implemented_in_this_class = 'ALL'
# The soft-collinear integrated counterterm has been accounted for completely in the
# soft integrated counterterm
is_zero = True
def __init__(self, *args, **opts):
# Make sure it is initialized with the proper set of options and remove them
# before calling the mother constructor
if 'color_charge' not in opts:
raise CurrentImplementationError(
"The current '%s' must be instantiated with "%self.__class__.__name__+
" a 'color_charge' option specified.")
color_charge = opts.pop('color_charge')
super(QCD_beam_factorization_single_softcollinear, self).__init__(*args, **opts)
self.supports_helicity_assignment = False
# At this state color_charge is the string of the argument to retrieve ('CA' or 'CF')
# And now that the mother constructor is called, the group factors have been initialized
# and we can retrieve them.
self.color_charge = getattr(self, color_charge)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None:
return None
# If this is a BF current it will not have substructures
ss = current.get('singular_structure')
if len(ss.substructures)==0:
return None
# Retrieve singular structure
ss = current.get('singular_structure').substructures[0]
# Check that it involves exactly one collinear structure with two legs.
if len(ss.substructures)!=1:
return None
# Finally check that the singular structure and PDG matches
singular_structure = ss.substructures[0]
# It main structure should be of collinear type
if singular_structure.name()!='C':
return None
# It should have only one leg left, the other one being in the nested soft structure
# It must be an initial-state leg.
if len(singular_structure.legs)!=1:
return None
# The leg not soft must be quark or a gluon
if not abs(singular_structure.legs[0].pdg) in [21,]+range(1,7):
return None
# It should have exactly one nested structures
if len(singular_structure.substructures)!=1:
return None
sub_singular_structure = singular_structure.substructures[0]
# Make sure this substructure is soft
if sub_singular_structure.name()!='S':
return None
# Make sure it contains a single soft leg
if len(sub_singular_structure.legs)!=1:
return None
soft_leg = sub_singular_structure.legs[0]
# Make sure the soft leg is massless final and a gluon
if model.get_particle(soft_leg.pdg).get('mass').upper()!='ZERO':
return None
if soft_leg.pdg != 21:
return None
# We now know that this current is implemented here. We return
# the specific color charge to instantiate this kernel with,
# in the form of a the name of the group factor to retrieve upon
# initialization.
if singular_structure.legs[0].pdg == 21:
# This is a 'g > g g' soft-collinear splitting
init_vars['color_charge'] = 'CA'
else:
# This is a 'q > g g' soft-collinear splitting
init_vars['color_charge'] = 'CA'
return init_vars
|
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_authorizationpolicy_binding(base_resource) :
""" Binding class showing the authorizationpolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = None
self._priority = None
self._sc = None
self._gotopriorityexpression = None
self._bindpoint = None
self._invoke = None
self._labeltype = None
self._labelname = None
self._name = None
self.___count = None
@property
def priority(self) :
r"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
r"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
r"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE, MQTT_JUMBO_REQ.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
r"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE, MQTT_JUMBO_REQ
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
r"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
r"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
r"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
r"""Name of the label invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
r"""Invoke policies bound to a virtual server or policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
r"""Invoke policies bound to a virtual server or policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
r"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
r"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def sc(self) :
r"""Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_authorizationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_authorizationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = lbvserver_authorizationpolicy_binding()
addresource.name = resource.name
addresource.policyname = resource.policyname
addresource.priority = resource.priority
addresource.gotopriorityexpression = resource.gotopriorityexpression
addresource.bindpoint = resource.bindpoint
addresource.invoke = resource.invoke
addresource.labeltype = resource.labeltype
addresource.labelname = resource.labelname
return addresource
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = cls.filter_add_parameters(resource)
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_add_parameters(resource[i])
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = lbvserver_authorizationpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource[i])
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch lbvserver_authorizationpolicy_binding resources.
"""
try :
if not name :
obj = lbvserver_authorizationpolicy_binding()
response = obj.get_resources(service, option_)
else :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of lbvserver_authorizationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count lbvserver_authorizationpolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of lbvserver_authorizationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
MQTT_JUMBO_REQ = "MQTT_JUMBO_REQ"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_authorizationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_authorizationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_authorizationpolicy_binding = [lbvserver_authorizationpolicy_binding() for _ in range(length)]
|
import functools
import inspect
import warnings
string_types = (type(b''), type(u''))
def warn_deprecation(text):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
text,
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warn_deprecation(
fmt1.format(name=func1.__name__, reason=reason),
)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warn_deprecation(
fmt2.format(name=func2.__name__),
)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
|
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to rank modules to use in a downstream classification task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import pandas as pd
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_hub.tools.module_search import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None,
"Specification of a dataset. E.g. use `cifar10#1000` to "
"perform search using 1000 examples from tfds `cifar10` "
"dataset.")
flags.DEFINE_multi_string("module", None, "Module to consider in the search")
flags.DEFINE_string("module_list", None,
"Path to text file with a module per line to be considered in the search."
"Empty lines and lines starting with # are ignored")
def load_data(data_spec):
return utils.load_data(**data_spec)
def load_raw_features(data_spec):
data = load_data(data_spec=data_spec)
return data.map(lambda x: tf.image.resize(x["image"], (224, 224)))
def load_labels(data_spec):
data = load_data(data_spec=data_spec)
return np.array([x for x in data.map(lambda x: x["label"])])
def compute_embeddings(module_spec, data_spec):
raw_features = load_raw_features(data_spec=data_spec)
embedding_fn = utils.load_embedding_fn(
module=module_spec)
outputs = []
for batch in raw_features.batch(10):
outputs.extend(embedding_fn(batch))
return np.array(outputs)
def compute_score(module_spec, data_spec):
embeddings = compute_embeddings(module_spec=module_spec,
data_spec=data_spec)
distances = utils.compute_distance_matrix_loo(embeddings)
labels = load_labels(data_spec=data_spec)
error_rate = utils.knn_errorrate_loo(distances, labels, k=1)
return np.array(error_rate)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if not FLAGS.dataset:
raise app.UsageError("--dataset is a required argument.")
module_list = []
if FLAGS.module:
module_list.extend(FLAGS.module)
if FLAGS.module_list:
with tf.io.gfile.GFile(FLAGS.module_list) as f:
lines = f.read().split("\n")
module_list.extend([l for l in lines if l != "" and not l.startswith("#")])
ds_sections = FLAGS.dataset.split("#")
dataset = ds_sections[0]
train_examples = int(ds_sections[1]) if len(ds_sections) != 0 else None
data_spec = {
"dataset": dataset,
"split": "train",
"num_examples": train_examples,
}
results = []
for module in module_list:
results.append((
module, data_spec,
compute_score(module_spec=module, data_spec=data_spec)))
df = pd.DataFrame(results, columns=["module", "data", "1nn"])
df = df.filter(["module", "1nn"])
df.sort_values(["1nn"])
df.reset_index(drop=True)
df.set_index("module")
with pd.option_context(
"display.max_rows", None,
"display.max_columns", None,
"display.precision", 3,
"max_colwidth", -1, # Don't truncate columns (e.g. module name).
"display.expand_frame_repr", False, # Don't wrap output.
):
print("# Module ranking for %s" % data_spec)
print(df)
if __name__ == '__main__':
app.run(main)
|
import os
import sys
def check_ret(ret):
if(ret != 0):
os.system('git checkout -B develop remotes/origin/develop')
os.system('git clean -xdf -f')
sys.exit(1)
branchs = ['develop', 'master']
for item in branchs:
os.system('git clean -xdf -f')
os.system('git checkout -B ' + item + ' remotes/origin/' + item)
os.system('git clean -xdf -f')
ret = os.system('git pull origin')
check_ret(ret)
ret = os.system('git submodule update --init --force')
check_ret(ret)
#back to develop
os.system('git checkout -B develop remotes/origin/develop')
os.system('git clean -xdf -f')
|
"""
Name: Tkinter Exercise - a simple calculator
Description: iOS calculator simulator
Date: 2/21/2018
Author: Haowei Wu
"""
import tkinter
class Calculator:
# Params
app_title = "A simple calculator"
disp_font = ("Helvetica", 25, "bold")
btn_font = ("Helvetica", 20, "bold")
def __init__(self, root):
self.root = root
self.initialize()
def initialize(self):
# Variables
self.ans = "0"
self.operator = None
self.user_input = ""
self.last_user_input = ""
self.is_result = False
self.ever_equals = False
self.true_equal = False
# GUI
self.set_title()
self.set_display()
self.set_buttons()
# Clear
self.clear()
def set_title(self):
self.root.title(self.app_title)
def set_display(self):
self.display = tkinter.Entry(self.root, font=self.disp_font, justify=tkinter.RIGHT)
self.display.grid(row=0, column=0, columnspan=4, sticky="news", ipady=10)
def set_buttons(self):
# row 1
self.btn_clear = tkinter.Button(self.root, text="C", font=self.btn_font, command=lambda: self.btn_press("C"))
self.btn_clear.grid(row=1, column=0, sticky="news")
self.btn_negative = tkinter.Button(self.root, text="+/-", font=self.btn_font, command=lambda: self.btn_press("+/-"))
self.btn_negative.grid(row=1, column=1, sticky="news")
self.btn_percent = tkinter.Button(self.root, text="%", font=self.btn_font, command=lambda: self.btn_press("%"))
self.btn_percent.grid(row=1, column=2, sticky="news")
self.btn_divide = tkinter.Button(self.root, text="÷", font=self.btn_font, command=lambda: self.btn_press("/"))
self.btn_divide.grid(row=1, column=3, sticky="news")
# row 2
self.btn_7 = tkinter.Button(self.root, text="7", font=self.btn_font, command=lambda: self.btn_press("7"))
self.btn_7.grid(row=2, column=0, sticky="news")
self.btn_8 = tkinter.Button(self.root, text="8", font=self.btn_font, command=lambda: self.btn_press("8"))
self.btn_8.grid(row=2, column=1, sticky="news")
self.btn_9 = tkinter.Button(self.root, text="9", font=self.btn_font, command=lambda: self.btn_press("9"))
self.btn_9.grid(row=2, column=2, sticky="news")
self.btn_multiply = tkinter.Button(self.root, text="x", font=self.btn_font, command=lambda: self.btn_press("*"))
self.btn_multiply.grid(row=2, column=3, sticky="news")
# row 3
self.btn_4 = tkinter.Button(self.root, text="4", font=self.btn_font, command=lambda: self.btn_press("4"))
self.btn_4.grid(row=3, column=0, sticky="news")
self.btn_5 = tkinter.Button(self.root, text="5", font=self.btn_font, command=lambda: self.btn_press("5"))
self.btn_5.grid(row=3, column=1, sticky="news")
self.btn_6 = tkinter.Button(self.root, text="6", font=self.btn_font, command=lambda: self.btn_press("6"))
self.btn_6.grid(row=3, column=2, sticky="news")
self.btn_minus = tkinter.Button(self.root, text="-", font=self.btn_font, command=lambda: self.btn_press("-"))
self.btn_minus.grid(row=3, column=3, sticky="news")
# row 4
self.btn_1 = tkinter.Button(self.root, text="1", font=self.btn_font, command=lambda: self.btn_press("1"))
self.btn_1.grid(row=4, column=0, sticky="news")
self.btn_2 = tkinter.Button(self.root, text="2", font=self.btn_font, command=lambda: self.btn_press("2"))
self.btn_2.grid(row=4, column=1, sticky="news")
self.btn_3 = tkinter.Button(self.root, text="3", font=self.btn_font, command=lambda: self.btn_press("3"))
self.btn_3.grid(row=4, column=2, sticky="news")
self.btn_plus = tkinter.Button(self.root, text="+", font=self.btn_font, command=lambda: self.btn_press("+"))
self.btn_plus.grid(row=4, column=3, sticky="news")
# row 5
self.btn_0 = tkinter.Button(self.root, text="0", font=self.btn_font, command=lambda: self.btn_press("0"))
self.btn_0.grid(row=5, column=0, columnspan=2, sticky="news")
self.btn_dot = tkinter.Button(self.root, text=".", font=self.btn_font, command=lambda: self.btn_press("."))
self.btn_dot.grid(row=5, column=2, sticky="news")
self.btn_equal = tkinter.Button(self.root, text="=", font=self.btn_font, command=lambda: self.btn_press("="))
self.btn_equal.grid(row=5, column=3, sticky="news")
def clear(self):
self.ans = "0"
self.operator = None
self.user_input = ""
self.last_user_input = ""
self.ever_equals = False
self.is_result = False
self.update_display("0")
self.true_equal = False
def update_display(self, content):
self.display.delete(0, tkinter.END)
self.display.insert(0, content)
def calculation(self, ans, user_input, operator):
ans = float(ans)
user_input = float(user_input)
if operator != None:
if operator == "+":
ans = ans + user_input
if operator == "-":
ans = ans - user_input
if operator == "*":
ans = ans * user_input
if operator == "/":
ans = ans / user_input
return(str(ans))
else:
return(str(user_input))
def btn_press(self, press):
digits = [str(i) for i in range(10)]
operators = ["+","-","*","/"]
if press == "C":
self.clear()
if self.display.get() == "Error":
pass
else:
if press in digits:
if self.true_equal:
self.clear()
self.user_input += press
self.update_display(self.user_input)
self.is_result = False
if press in operators:
if not self.ever_equals and (not self.operator):
if self.user_input=="":
self.user_input = "0"
self.ans = self.user_input
self.user_input = ""
if self.operator and self.user_input !="":
self.btn_press("=")
self.operator = press
self.true_equal = False
if press == ".":
if "." not in self.user_input:
if self.user_input == "":
self.user_input = "0."
else:
self.user_input = self.user_input + "."
self.update_display(self.user_input)
self.is_result = False
if press == "+/-":
if self.is_result:
self.ans = str(-float(self.ans))
self.update_display(self.ans)
else:
if self.user_input == "":
self.user_input = "0"
self.user_input = str(-float(self.user_input))
self.update_display(self.user_input)
if press == "%":
if self.is_result:
self.ans = str(float(self.ans)/100)
self.update_display(self.ans)
else:
if self.user_input == "":
self.user_input = "0"
self.user_input = str(float(self.user_input)/100)
self.update_display(self.user_input)
if press == "=":
if self.user_input == "":
self.user_input = self.last_user_input
if self.user_input == "":
self.user_input = self.ans
try:
self.ans = self.calculation(self.ans, self.user_input, self.operator)
self.last_user_input = self.user_input
self.user_input = ""
self.update_display(self.ans)
self.ever_equals = True
self.is_result = True
self.true_equal = True
except:
self.update_display("Error")
if __name__ == "__main__":
root = tkinter.Tk()
Calculator(root)
root.mainloop()
|
import unittest
import pandas as pd
import os
from kmall_player import *
class KmallPlayerTest(unittest.TestCase):
def setUp(self) -> None:
file_name = "data/MRZ_LARGE_SIZE.kmall"
self.f = open(file_name, "rb")
self.file_size = os.fstat(self.f.fileno()).st_size
self.player = KmallPlayer()
k = KMALL.kmall(file_name)
k.index_file()
# Panda DataFrame type
self.index: pd.DataFrame = k.Index
self.mrz_pack = self.index.iloc[0]
def tearDown(self) -> None:
self.f.close()
def test_packet(self):
self.assertEqual(self.index.shape[0], 1)
self.assertTrue(self.mrz_pack['MessageSize'] > self.player.MAX_DATAGRAM_SIZE)
self.assertTrue('#MRZ' in self.mrz_pack['MessageType'])
def test_raw_header_reading(self):
header_dict = self.player.read_header_raw(self.f.read(self.player.HEADER_STRUCT_SIZE))
# Our test file contains only one packet
self.assertEqual(header_dict['numBytesDgm'], self.file_size)
self.assertTrue('#MRZ' in str(header_dict['dgmType']))
def test_partitionning(self):
msgs = self.player.partition_msg(self.f.read(self.mrz_pack['MessageSize']))
# Expecting 2 partitions
self.assertEqual(len(msgs), 2)
# Let's check the newly generated header content for our splits :
# First split should be of maximum size
self.assertEqual(self.player.read_header_raw(msgs[0])['numBytesDgm'], self.player.MAX_DATAGRAM_SIZE)
# Second and last split should take up the rest
last_packet_content_size = (self.file_size - self.player.HEADER_AND_PART_SIZE - 4)\
% self.player.MAX_DATA_SIZE
last_packet_size = last_packet_content_size + self.player.HEADER_AND_PART_SIZE + 4
self.assertEqual(self.player.read_header_raw(msgs[1])['numBytesDgm'], last_packet_size)
# Run tests
unittest.main()
|
from flask import Blueprint
fs_api=Blueprint('fs_api',__name__,template_folder='templates')
from .views import configuration,dialplan,directory,vars,update_cdr
|
#!/usr/bin/env python3
import threading
import typing
import warnings
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import torch
from captum._utils.common import (
_reduce_list,
_run_forward,
_sort_key_list,
_verify_select_neuron,
)
from captum._utils.typing import (
Literal,
ModuleOrModuleList,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from torch import Tensor, device
from torch.nn import Module
def apply_gradient_requirements(
inputs: Tuple[Tensor, ...], warn: bool = True
) -> List[bool]:
"""
Iterates through tuple on input tensors and sets requires_grad to be true on
each Tensor, and ensures all grads are set to zero. To ensure that the input
is returned to its initial state, a list of flags representing whether or not
a tensor originally required grad is returned.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients"
grad_required = []
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
grad_required.append(input.requires_grad)
inputs_dtype = input.dtype
# Note: torch 1.2 doesn't support is_complex for dtype that's why we check
# on the existance of is_complex method.
if not inputs_dtype.is_floating_point and not (
hasattr(inputs_dtype, "is_complex") and inputs_dtype.is_complex
):
if warn:
warnings.warn(
"""Input Tensor %d has a dtype of %s.
Gradients cannot be activated
for these data types."""
% (index, str(inputs_dtype))
)
elif not input.requires_grad:
if warn:
warnings.warn(
"Input Tensor %d did not already require gradients, "
"required_grads has been set automatically." % index
)
input.requires_grad_()
return grad_required
def undo_gradient_requirements(
inputs: Tuple[Tensor, ...], grad_required: List[bool]
) -> None:
"""
Iterates through list of tensors, zeros each gradient, and sets required
grad to false if the corresponding index in grad_required is False.
This method is used to undo the effects of prepare_gradient_inputs, making
grads not required for any input tensor that did not initially require
gradients.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients."
assert len(inputs) == len(
grad_required
), "Input tuple length should match gradient mask."
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
if not grad_required[index]:
input.requires_grad_(False)
def compute_gradients(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
r"""
Computes gradients of the output with respect to inputs for an
arbitrary forward function.
Args:
forward_fn: forward function. This can be for example model's
forward function.
input: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
additional_forward_args: Additional input arguments that forward
function requires. It takes an empty tuple (no additional
arguments) if no additional arguments are required
"""
with torch.autograd.set_grad_enabled(True):
# runs forward pass
outputs = _run_forward(forward_fn, inputs, target_ind, additional_forward_args)
assert outputs[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
# torch.unbind(forward_out) is a list of scalar tensor tuples and
# contains batch_size * #steps elements
grads = torch.autograd.grad(torch.unbind(outputs), inputs,create_graph=True, retain_graph=True) #create_graph True, allow_unused is added TB
return grads
def _neuron_gradients(
inputs: Union[Tensor, Tuple[Tensor, ...]],
saved_layer: Dict[device, Tuple[Tensor, ...]],
key_list: List[device],
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tuple[Tensor, ...]:
with torch.autograd.set_grad_enabled(True):
gradient_tensors = []
for key in key_list:
current_out_tensor = _verify_select_neuron(
saved_layer[key], gradient_neuron_selector
)
gradient_tensors.append(
torch.autograd.grad(
torch.unbind(current_out_tensor)
if current_out_tensor.numel() > 1
else current_out_tensor,
inputs,
)
)
_total_gradients = _reduce_list(gradient_tensors, sum)
return _total_gradients
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]:
return _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
gradient_neuron_selector=None,
grad_enabled=grad_enabled,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: Literal[False] = False,
require_layer_grads: bool = False,
) -> Dict[Module, Dict[device, Tuple[Tensor, ...]]]:
...
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
*,
forward_hook_with_return: Literal[True],
require_layer_grads: bool = False,
) -> Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor]:
...
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: bool = False,
require_layer_grads: bool = False,
) -> Union[
Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor],
Dict[Module, Dict[device, Tuple[Tensor, ...]]],
]:
r"""
A helper function that allows to set a hook on model's `layer`, run the forward
pass and returns intermediate layer results, stored in a dictionary,
and optionally also the output of the forward function. The keys in the
dictionary are the device ids and the values are corresponding intermediate layer
results, either the inputs or the outputs of the layer depending on whether we set
`attribute_to_layer_input` to True or False.
This is especially useful when we execute forward pass in a distributed setting,
using `DataParallel`s for example.
"""
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]] = defaultdict(dict)
lock = threading.Lock()
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
# Set a forward hook on specified module and run forward pass to
# get layer output tensor(s).
# For DataParallel models, each partition adds entry to dictionary
# with key as device and value as corresponding Tensor.
def hook_wrapper(original_module):
def forward_hook(module, inp, out=None):
eval_tsrs = inp if attribute_to_layer_input else out
is_eval_tuple = isinstance(eval_tsrs, tuple)
if not is_eval_tuple:
eval_tsrs = (eval_tsrs,)
if require_layer_grads:
apply_gradient_requirements(eval_tsrs, warn=False)
with lock:
nonlocal saved_layer
# Note that cloning behaviour of `eval_tsr` is different
# when `forward_hook_with_return` is set to True. This is because
# otherwise `backward()` on the last output layer won't execute.
if forward_hook_with_return:
saved_layer[original_module][eval_tsrs[0].device] = eval_tsrs
eval_tsrs_to_return = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
if not is_eval_tuple:
eval_tsrs_to_return = eval_tsrs_to_return[0]
return eval_tsrs_to_return
else:
saved_layer[original_module][eval_tsrs[0].device] = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
return forward_hook
all_hooks = []
try:
for single_layer in all_layers:
if attribute_to_layer_input:
all_hooks.append(
single_layer.register_forward_pre_hook(hook_wrapper(single_layer))
)
else:
all_hooks.append(
single_layer.register_forward_hook(hook_wrapper(single_layer))
)
output = _run_forward(
forward_fn,
inputs,
target=target_ind,
additional_forward_args=additional_forward_args,
)
finally:
for hook in all_hooks:
hook.remove()
if len(saved_layer) == 0:
raise AssertionError("Forward hook did not obtain any outputs for given layer")
if forward_hook_with_return:
return saved_layer, output
return saved_layer
def _gather_distributed_tensors(
saved_layer: Dict[device, Tuple[Tensor, ...]],
device_ids: Union[None, List[int]] = None,
key_list: Union[None, List[device]] = None,
) -> Tuple[Tensor, ...]:
r"""
A helper function to concatenate intermediate layer results stored on
different devices in `saved_layer`. `saved_layer` is a dictionary that
contains `device_id` as a key and intermediate layer results (either
the input or the output of the layer) stored on the device corresponding to
the key.
`key_list` is a list of devices in appropriate ordering for concatenation
and if not provided, keys are sorted based on device ids.
If only one key exists (standard model), key list simply has one element.
"""
if key_list is None:
key_list = _sort_key_list(list(saved_layer.keys()), device_ids)
return _reduce_list([saved_layer[device_id] for device_id in key_list])
def _extract_device_ids(
forward_fn: Callable,
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]],
device_ids: Union[None, List[int]],
) -> Union[None, List[int]]:
r"""
A helper function to extract device_ids from `forward_function` in case it is
provided as part of a `DataParallel` model or if is accessible from
`forward_fn`.
In case input device_ids is not None, this function returns that value.
"""
# Multiple devices / keys implies a DataParallel model, so we look for
# device IDs if given or available from forward function
# (DataParallel model object).
if (
max(len(saved_layer[single_layer]) for single_layer in saved_layer) > 1
and device_ids is None
):
if (
hasattr(forward_fn, "device_ids")
and cast(Any, forward_fn).device_ids is not None
):
device_ids = cast(Any, forward_fn).device_ids
else:
raise AssertionError(
"Layer tensors are saved on multiple devices, however unable to access"
" device ID list from the `forward_fn`. Device ID list must be"
" accessible from `forward_fn`. For example, they can be retrieved"
" if `forward_fn` is a model of type `DataParallel`. It is used"
" for identifying device batch ordering."
)
return device_ids
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tensor, ...],
List[Tuple[Tensor, ...]],
]:
"""
This method computes forward evaluation for a particular layer using a
forward hook. If a gradient_neuron_selector is provided, then gradients with
respect to that neuron in the layer output are also returned.
These functionalities are combined due to the behavior of DataParallel models
with hooks, in which hooks are executed once per device. We need to internally
combine the separated tensors from devices by concatenating based on device_ids.
Any necessary gradients must be taken with respect to each independent batched
tensor, so the gradients are computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel models
can be found in the PyTorch data parallel documentation. We maintain the separate
evals in a dictionary protected by a lock, analogous to the gather implementation
for the core PyTorch DataParallel implementation.
"""
grad_enabled = True if gradient_neuron_selector is not None else grad_enabled
with torch.autograd.set_grad_enabled(grad_enabled):
saved_layer = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(list(next(iter(saved_layer.values())).keys()), device_ids)
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
_gather_distributed_tensors(saved_layer[layer], key_list=key_list),
inp_grads,
)
else:
if isinstance(layer, Module):
return _gather_distributed_tensors(saved_layer[layer], key_list=key_list)
else:
return [
_gather_distributed_tensors(saved_layer[curr_layer], key_list=key_list)
for curr_layer in layer
]
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: List[Module],
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: ModuleOrModuleList,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]],
]:
r"""
Computes gradients of the output with respect to a given layer as well
as the output evaluation of the layer for an arbitrary forward function
and given input.
For data parallel models, hooks are executed once per device ,so we
need to internally combine the separated tensors from devices by
concatenating based on device_ids. Any necessary gradients must be taken
with respect to each independent batched tensor, so the gradients are
computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel
models can be found in the PyTorch data parallel documentation. We maintain
the separate inputs in a dictionary protected by a lock, analogous to the
gather implementation for the core PyTorch DataParallel implementation.
NOTE: To properly handle inplace operations, a clone of the layer output
is stored. This structure inhibits execution of a backward hook on the last
module for the layer output when computing the gradient with respect to
the input, since we store an intermediate clone, as
opposed to the true module output. If backward module hooks are necessary
for the final module when computing input gradients, utilize
_forward_layer_eval_with_neuron_grads instead.
Args:
forward_fn: forward function. This can be for example model's
forward function.
layer: Layer for which gradients / output will be evaluated.
inputs: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
output_fn: An optional function that is applied to the layer inputs or
outputs depending whether the `attribute_to_layer_input` is
set to `True` or `False`
args: Additional input arguments that forward function requires.
It takes an empty tuple (no additional arguments) if no
additional arguments are required
Returns:
2-element tuple of **gradients**, **evals**:
- **gradients**:
Gradients of output with respect to target layer output.
- **evals**:
Target layer output for given input.
"""
with torch.autograd.set_grad_enabled(True):
# saved_layer is a dictionary mapping device to a tuple of
# layer evaluations on that device.
saved_layer, output = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
target_ind=target_ind,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
forward_hook_with_return=True,
require_layer_grads=True,
)
assert output[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(
list(next(iter(saved_layer.values())).keys()), device_ids
)
all_outputs: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
if isinstance(layer, Module):
all_outputs = _reduce_list(
[
saved_layer[layer][device_id]
if output_fn is None
else output_fn(saved_layer[layer][device_id])
for device_id in key_list
]
)
else:
all_outputs = [
_reduce_list(
[
saved_layer[single_layer][device_id]
if output_fn is None
else output_fn(saved_layer[single_layer][device_id])
for device_id in key_list
]
)
for single_layer in layer
]
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
grad_inputs = tuple(
layer_tensor
for single_layer in all_layers
for device_id in key_list
for layer_tensor in saved_layer[single_layer][device_id]
)
saved_grads = torch.autograd.grad(torch.unbind(output), grad_inputs)
offset = 0
all_grads: List[Tuple[Tensor, ...]] = []
for single_layer in all_layers:
num_tensors = len(next(iter(saved_layer[single_layer].values())))
curr_saved_grads = [
saved_grads[i : i + num_tensors]
for i in range(
offset, offset + len(key_list) * num_tensors, num_tensors
)
]
offset += len(key_list) * num_tensors
if output_fn is not None:
curr_saved_grads = [
output_fn(curr_saved_grad) for curr_saved_grad in curr_saved_grads
]
all_grads.append(_reduce_list(curr_saved_grads))
layer_grads: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
layer_grads = all_grads
if isinstance(layer, Module):
layer_grads = all_grads[0]
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
cast(Tuple[Tensor, ...], layer_grads),
cast(Tuple[Tensor, ...], all_outputs),
inp_grads,
)
return layer_grads, all_outputs # type: ignore
def construct_neuron_grad_fn(
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_neuron_input: bool = False,
) -> Callable:
def grad_fn(
forward_fn: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
_, grads = _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args,
gradient_neuron_selector=neuron_selector,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
return grads
return grad_fn
def _compute_jacobian_wrt_params(
model: Module,
inputs: Union[Tuple[Tensor], Tensor],
labels: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
) -> Tuple[Tensor, ...]:
r"""
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method is equivalent to calculating the
gradient for every individual example in the minibatch.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (Tensor): The minibatch for which the forward pass is computed.
The dimensions of input are (N, *) where N is the batch_size.
The input must have a batch dimension, even if batch_size = 1.
labels (Tensor or None): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable or None): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='none'`.
Returns:
grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
with torch.autograd.set_grad_enabled(True):
out = model(inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `none`"
assert loss_fn.reduction == "none", msg0 # type: ignore
else:
msg1 = (
"Loss function is applying a reduction. Please ensure "
f"Output shape: {out.shape} and Loss shape: {loss.shape} "
"are matching."
)
assert loss.dim() != 0, msg1
assert out.shape[0] == loss.shape[0], msg1
out = loss
grads_list = [
torch.autograd.grad(
outputs=out[i],
inputs=model.parameters(), # type: ignore
grad_outputs=torch.ones_like(out[i]),
retain_graph=True,
)
for i in range(out.shape[0])
]
grads = tuple([torch.stack(x) for x in zip(*grads_list)])
return tuple(grads)
def _compute_jacobian_wrt_params_autograd_hacks(
model: Module,
inputs: Union[Tuple[Tensor], Tensor],
labels: Optional[Tensor] = None,
loss_fn: Optional[Module] = None,
reduction_type: Optional[str] = "sum",
) -> Tuple[Any, ...]:
r"""
NOT SUPPORTED FOR OPEN SOURCE. This method uses an internal 'hack` and is currently
not supported.
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method uses autograd_hacks to fully vectorize
the Jacobian calculation. Currently, only linear and conv2d layers are supported.
User must `add_hooks(model)` before calling this function.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (Tensor): The minibatch for which the forward pass is computed.
The dimensions of input are (N, *) where N is the batch_size.
The input must have a batch dimension, even if batch_size = 1.
labels (Tensor or None): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable or None): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='sum'` or
`reduction='mean'`.
reduction_type (str): The type of reduction applied. If a loss_fn is passed,
this should match `loss_fn.reduction`. Else if gradients are being
computed on direct model outputs (scores), then 'sum' should be used.
Defaults to 'sum'.
Returns:
grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
from captum._utils.fb import autograd_hacks
with torch.autograd.set_grad_enabled(True):
autograd_hacks.add_hooks(model)
out = model(inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `sum` or `mean`"
assert loss_fn.reduction != "none", msg0
msg1 = (
f"loss_fn.reduction ({loss_fn.reduction}) does not match reduction "
f"type ({reduction_type}). Please ensure they are matching."
)
assert loss_fn.reduction == reduction_type, msg1
msg2 = (
"Please ensure custom loss function is applying either a "
"sum or mean reduction."
)
assert out.shape != loss.shape, msg2
if reduction_type != "sum" and reduction_type != "mean":
raise ValueError(
f"{reduction_type} is not a valid value for reduction_type. "
"Must be either 'sum' or 'mean'."
)
out = loss
model.zero_grad()
out.backward(gradient=torch.ones_like(out))
autograd_hacks.compute_grad1(model, loss_type=reduction_type)
grads = tuple(
param.grad1 # type: ignore
for param in model.parameters()
if hasattr(param, "grad1")
)
autograd_hacks.clear_backprops(model)
autograd_hacks.remove_hooks(model)
return grads
|
#https://finance.yahoo.com/screener/6039bb71-c189-4b62-ab6d-6dbd659495bb?count=200
import requests
from bs4 import BeautifulSoup
# import json
my_screener = requests.get(f'https://finance.yahoo.com/screener/6039bb71-c189-4b62-ab6d-6dbd659495bb?count=200')
#print(my_screener)
with open('code/reit-data/reits-screener.html','r') as ticker_report:
ticker_table_string = ticker_report.read()
soup = BeautifulSoup(ticker_table_string, "html.parser")
tables = soup.find_all("table")
#print(tables[0])
tickers = tables[0].find_all("a")
for ticker in tickers:
print(ticker.text)
|
from collections import defaultdict
import logging
import random
from faker import Faker
import requests
logger = logging.getLogger(__file__)
def test_create_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 201
response_json = response.json()
assert len(response_json) == 1
user_id = response_json.get('user_id')
assert user_id
assert isinstance(user_id, int)
def test_get_token():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
token = response_json.get('token')
assert token
def test_get_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
headers = {'Authorization': f'Bearer {token}'}
response = requests.get(
f'http://127.0.0.1:5000/api/v1/users/{user_id}',
headers=headers
)
logger.info(f'Receive response: {response.text}')
expected_user = {
'id': user_id,
'username': user_info['username'],
'common_name': user_info['common_name'],
'email': user_info['email']
}
assert response.status_code == 200
assert response.json() == expected_user
def test_update_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
headers = {'Authorization': f'Bearer {token}'}
fields_to_update = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
}
response = requests.put(
f'http://127.0.0.1:5000/api/v1/users/{user_id}',
headers=headers,
json=fields_to_update
)
logger.info(f'Receive response: {response.text}')
expected_user = {
'id': user_id,
'username': fields_to_update['username'],
'common_name': fields_to_update['common_name'],
'email': fields_to_update['email']
}
assert response.status_code == 200
assert response.json() == expected_user
def test_get_user_posts():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
forum_info = {
'name': fake.company() + str(random.randint(1, 1000)),
'short_name': fake.company_suffix() + str(random.randint(1, 1000))
}
headers = {'Authorization': f'Bearer {token}'}
response = requests.post(
f'http://127.0.0.1:5000/api/v1/forums/create',
headers=headers,
json=forum_info
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 201
forum_id = response.json()['forum_id']
thread_info = {
'name': fake.company() + str(random.randint(1, 1000)),
'short_name': fake.company_suffix() + str(random.randint(1, 1000)),
'text': fake.text(),
'forum_id': forum_id
}
response = requests.post(
'http://127.0.0.1:5000/api/v1/threads/create',
json=thread_info,
headers=headers
)
logger.info(f'Receive response: {response.text}')
thread_id = response.json()['thread_id']
headers = {'Authorization': f'Bearer {token}'}
expected_posts = defaultdict(dict)
for _ in range(3):
post_text = fake.text()
response = requests.post(
'http://127.0.0.1:5000/api/v1/posts/create',
json={'text': post_text, 'thread_id': thread_id},
headers=headers
)
assert response.status_code == 201
expected_posts[response.json()['post_id']] = post_text
response = requests.get(
f'http://127.0.0.1:5000/api/v1/users/{user_id}/posts',
headers=headers
)
logger.info(f'Get user posts response: {response.text}')
assert response.status_code == 200
response_json = response.json()
returned_posts = response_json.get('user_posts')
assert returned_posts is not None
assert len(returned_posts) == len(expected_posts)
for post in returned_posts:
post_id = post.get('id')
assert post_id in expected_posts
expected_text = expected_posts[post_id]
assert post.get('text') == expected_text
assert post.get('user_id') == user_id
assert post.get('creation_timestamp')
|
from abc import ABC, abstractmethod
import logging
import os
from typing import Optional, Union, Iterable, Dict
import h5py
import numpy as np
import torch
from PIL import Image
from tqdm import tqdm
from brainio.stimuli import StimulusSet
from model_tools.activations import ActivationsModel
from model_tools.activations.core import flatten, change_dict
from model_tools.utils import fullname, s3
from model_tools.utils.pca import IncrementalPCAPytorch, PCAPytorch
from result_caching import store_dict
Stimuli = Union[Iterable[str], StimulusSet, Iterable[os.PathLike]]
BasePCA = Union[IncrementalPCAPytorch, PCAPytorch]
class LayerHookBase(ABC):
def __init__(self, activations_extractor: ActivationsModel, identifier: Optional[str] = None):
self._extractor = activations_extractor
self.identifier = identifier
self.handle = None
def __call__(self, batch_activations: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
self.setup(batch_activations)
return change_dict(batch_activations, self.layer_apply, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
@classmethod
def hook(cls, activations_extractor: ActivationsModel, identifier: Optional[str] = None, **kwargs):
hook = cls(activations_extractor=activations_extractor, identifier=identifier, **kwargs)
assert not cls.is_hooked(activations_extractor), f"{cls.__name__} is already hooked"
handle = activations_extractor.register_batch_activations_hook(hook)
hook.handle = handle
return handle
@classmethod
def is_hooked(cls, activations_extractor: ActivationsModel) -> bool:
return any(isinstance(hook, cls) for hook in
activations_extractor._extractor._batch_activations_hooks.values())
def setup(self, batch_activations: Dict[str, np.ndarray]) -> None:
pass
@abstractmethod
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pass
class LayerGlobalMaxPool2d(LayerHookBase):
def __init__(self, *args, identifier: Optional[str] = None, **kwargs):
if identifier is None:
identifier = 'maxpool'
super(LayerGlobalMaxPool2d, self).__init__(*args, **kwargs, identifier=identifier)
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
if activations.ndim != 4:
return activations
return np.max(activations, axis=(2, 3))
class LayerRandomProjection(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
identifier: Optional[str] = None,
**kwargs):
if identifier is None:
identifier = f'randproj_ncomponents={n_components}_force={force}'
super(LayerRandomProjection, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._layer_ws = {}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
activations = flatten(activations)
if activations.shape[1] <= self._n_components and not self._force:
return activations
if layer not in self._layer_ws:
w = np.random.normal(size=(activations.shape[-1], self._n_components)) / np.sqrt(self._n_components)
self._layer_ws[layer] = w
else:
w = self._layer_ws[layer]
activations = activations @ w
return activations
class LayerPCA(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
stimuli: Optional[Stimuli] = None,
stimuli_identifier: Optional[str] = None,
identifier: Optional[str] = None,
batch_size: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
**kwargs):
if stimuli is None:
# Default to ImageNet validation with 1 image per class
stimuli = _get_imagenet_val(n_components)
stimuli_identifier = 'brainscore-imagenetval'
if isinstance(stimuli, StimulusSet) and stimuli_identifier is None and hasattr(stimuli, 'identifier'):
stimuli_identifier = stimuli.identifier
if stimuli_identifier is None:
raise ValueError('If passing a list of paths for stimuli '
'or a StimulusSet without an identifier attribute, '
'you must provide a stimuli_identifier')
if identifier is None:
identifier = f'pca_ncomponents={n_components}_force={force}_stimuli_identifier={stimuli_identifier}'
super(LayerPCA, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._stimuli_identifier = stimuli_identifier
self._stimuli = stimuli
self._batch_size = batch_size
self._device = device
self._logger = logging.getLogger(fullname(self))
self._layer_pcas = {}
def setup(self, batch_activations) -> None:
layers = batch_activations.keys()
missing_layers = [layer for layer in layers if layer not in self._layer_pcas]
if len(missing_layers) == 0:
return
layer_pcas = self._pcas(identifier=self._extractor.identifier,
layers=missing_layers,
n_components=self._n_components,
force=self._force,
stimuli_identifier=self._stimuli_identifier)
self._layer_pcas = {**self._layer_pcas, **layer_pcas}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pca = self._layer_pcas[layer]
activations = flatten(activations)
if pca is None:
return activations
return pca.transform(torch.from_numpy(activations).to(self._device))
@store_dict(dict_key='layers', identifier_ignore=['layers'])
def _pcas(self, identifier, layers, n_components, force, stimuli_identifier) -> Dict[str, BasePCA]:
self._logger.debug(f'Retrieving {stimuli_identifier} activations')
self.handle.disable()
activations = self._extractor(self._stimuli, layers=layers, stimuli_identifier=False)
activations = {layer: activations.sel(layer=layer).values
for layer in np.unique(activations['layer'])}
assert len(set(layer_activations.shape[0] for layer_activations in activations.values())) == 1, "stimuli differ"
self.handle.enable()
self._logger.debug(f'Computing {stimuli_identifier} principal components')
progress = tqdm(total=len(activations), desc="layer principal components", leave=False)
def init_and_progress(layer, activations):
activations = flatten(activations)
if activations.shape[1] <= n_components and not force:
self._logger.debug(f"Not computing principal components for {layer} "
f"activations {activations.shape} as shape is small enough already")
progress.update(1)
return None
n_components_ = n_components if activations.shape[1] > n_components else activations.shape[1]
if self._batch_size is None:
pca = PCAPytorch(n_components_, device=self._device)
pca.fit(torch.from_numpy(activations).to(self._device))
else:
pca = IncrementalPCAPytorch(n_components_, device=self._device)
for i in range(0, activations.shape[0], self._batch_size):
activations_batch = torch.from_numpy(activations[i:i + self._batch_size]).to(self._device)
pca.fit_partial(activations_batch)
return pca
layer_pcas = change_dict(activations, init_and_progress, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
progress.close()
return layer_pcas
def _get_imagenet_val(num_images):
_logger = logging.getLogger(fullname(_get_imagenet_val))
num_classes = 1000
num_images_per_class = (num_images - 1) // num_classes
base_indices = np.arange(num_images_per_class).astype(int)
indices = []
for i in range(num_classes):
indices.extend(50 * i + base_indices)
for i in range((num_images - 1) % num_classes + 1):
indices.extend(50 * i + np.array([num_images_per_class]).astype(int))
framework_home = os.path.expanduser(os.getenv('MT_HOME', '~/.model-tools'))
imagenet_filepath = os.getenv('MT_IMAGENET_PATH', os.path.join(framework_home, 'imagenet2012.hdf5'))
imagenet_dir = f"{imagenet_filepath}-files"
os.makedirs(imagenet_dir, exist_ok=True)
if not os.path.isfile(imagenet_filepath):
os.makedirs(os.path.dirname(imagenet_filepath), exist_ok=True)
_logger.debug(f"Downloading ImageNet validation to {imagenet_filepath}")
s3.download_file("imagenet2012-val.hdf5", imagenet_filepath)
filepaths = []
with h5py.File(imagenet_filepath, 'r') as f:
for index in indices:
imagepath = os.path.join(imagenet_dir, f"{index}.png")
if not os.path.isfile(imagepath):
image = np.array(f['val/images'][index])
Image.fromarray(image).save(imagepath)
filepaths.append(imagepath)
return filepaths
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 18:03:39 2020
@author: akanksha
"""
import pandas as pd
import numpy as np
import joblib
from itertools import combinations
import sklearn
from functools import reduce
import argparse
import os
parser = argparse.ArgumentParser(description = 'Prediction from combined models for the reads.')
parser.add_argument('--methodsfile','-i', type = str, required = True,
help = 'TSV file containing name and path of the method output tsv file. The output tsv file from the method should be in the format [ID,Pos,Strand,Score]. Can be compressed in gz.')
parser.add_argument('--model','-m', choices = ["default","optimized"], required = True, type = str,
help = 'which model to select from default RF or optimized RF with max_depth 3 and n_estimator 10')
parser.add_argument('--output', '-o',type = str, required = True,
help = 'Where to store the outputs')
options = parser.parse_args()
def mod_file(data_file_path):
data_file=pd.read_csv(data_file_path, header=0, sep="\t")
name=data_file_path.split("\\")[-1].split(".")[0]
data_file.drop_duplicates(subset=['Chr',"ID","Pos","Strand"],inplace=True) # add chr
data_file.reset_index(inplace=True,drop=True)
mask=data_file.index[data_file.Strand=="-"].tolist()
data_file["Pos"][mask]=data_file["Pos"][mask]-1
data_file.drop(["Strand"], axis=1, inplace=True)
data_file.rename(columns={"Score":name}, inplace=True)
data_file.reset_index(inplace=True, drop=True)
return(data_file)
def main(mp,combine_file):
loaded_model = joblib.load(open(mp, 'rb'))
X=combine_file[combine_file.columns[3:]] #2:
X=sklearn.preprocessing.MinMaxScaler().fit_transform(X)
prediction=pd.DataFrame(loaded_model.predict(X)) ##
prediction_prob=pd.DataFrame(loaded_model.predict_proba(X))
prediction.rename(columns={0:"Prediction"}, inplace=True)
prediction_prob=prediction_prob[[1]]
prediction_prob.rename(columns={1:"Prob_methylation"}, inplace=True)
final_output=pd.concat([combine_file[combine_file.columns[:3]],prediction,prediction_prob], axis=1) #:2
#os.makedirs(options.output)
#final_output.to_csv(options.output+'/predictions_combination_method.tsv', header=True, index=None, sep='\t')
dir = ("combined_model_results")
if not os.path.isdir(dir):
os.makedirs(dir)
final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\t')
else:
final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\t')
if __name__ == '__main__':
df_file=pd.read_csv(options.methodsfile, header=None, sep='\t')
if options.model=="default":
fillval="default"
else:
fillval="max_depth_3_n_estimator_10"
modelname='_'.join(df_file[0])
mp='saved_models/rf_model_'+fillval+'_'+modelname+'.model'
dfs=[]
for i in df_file[1]:
dfs.append(mod_file(i))
combine_file=reduce(lambda left,right: pd.merge(left, right, how='inner',on=["ID","Chr","Pos"]), dfs) # add chr
combine_file.drop_duplicates(subset=["ID","Chr","Pos"],inplace=True) # add chr
combine_file.reset_index(inplace=True, drop=True)
main(mp,combine_file) ##
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import random
import re
from ..compat import compat_str
from ..utils import (ExtractorError, determine_ext, int_or_none,
parse_duration, str_or_none, try_get, url_or_none,
urljoin)
from .common import InfoExtractor
class NRKBaseIE(InfoExtractor):
_GEO_COUNTRIES = ["NO"]
_CDN_REPL_REGEX = r"""(?x)://
(?:
nrkod\d{1,2}-httpcache0-47115-cacheod0\.dna\.ip-only\.net/47115-cacheod0|
nrk-od-no\.telenorcdn\.net|
minicdn-od\.nrk\.no/od/nrkhd-osl-rr\.netwerk\.no/no
)/"""
def _extract_nrk_formats(self, asset_url, video_id):
if re.match(r"https?://[^/]+\.akamaihd\.net/i/", asset_url):
return self._extract_akamai_formats(asset_url, video_id)
asset_url = re.sub(r"(?:bw_(?:low|high)=\d+|no_audio_only)&?", "", asset_url)
formats = self._extract_m3u8_formats(
asset_url, video_id, "mp4", "m3u8_native", fatal=False
)
if not formats and re.search(self._CDN_REPL_REGEX, asset_url):
formats = self._extract_m3u8_formats(
re.sub(
self._CDN_REPL_REGEX,
"://nrk-od-%02d.akamaized.net/no/" % random.randint(0, 99),
asset_url,
),
video_id,
"mp4",
"m3u8_native",
fatal=False,
)
return formats
def _raise_error(self, data):
MESSAGES = {
"ProgramRightsAreNotReady": "Du kan dessverre ikke se eller høre programmet",
"ProgramRightsHasExpired": "Programmet har gått ut",
"NoProgramRights": "Ikke tilgjengelig",
"ProgramIsGeoBlocked": "NRK har ikke rettigheter til å vise dette programmet utenfor Norge",
}
message_type = data.get("messageType", "")
# Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
if (
"IsGeoBlocked" in message_type
or try_get(data, lambda x: x["usageRights"]["isGeoBlocked"]) is True
):
self.raise_geo_restricted(
msg=MESSAGES.get("ProgramIsGeoBlocked"), countries=self._GEO_COUNTRIES
)
message = data.get("endUserMessage") or MESSAGES.get(message_type, message_type)
raise ExtractorError("%s said: %s" % (self.IE_NAME, message), expected=True)
def _call_api(self, path, video_id, item=None, note=None, fatal=True, query=None):
return self._download_json(
urljoin("https://psapi.nrk.no/", path),
video_id,
note or "Downloading %s JSON" % item,
fatal=fatal,
query=query,
headers={"Accept-Encoding": "gzip, deflate, br"},
)
class NRKIE(NRKBaseIE):
_VALID_URL = r"""(?x)
(?:
nrk:|
https?://
(?:
(?:www\.)?nrk\.no/video/(?:PS\*|[^_]+_)|
v8[-.]psapi\.nrk\.no/mediaelement/
)
)
(?P<id>[^?\#&]+)
"""
_TESTS = [
{
# video
"url": "http://www.nrk.no/video/PS*150533",
"md5": "f46be075326e23ad0e524edfcb06aeb6",
"info_dict": {
"id": "150533",
"ext": "mp4",
"title": "Dompap og andre fugler i Piip-Show",
"description": "md5:d9261ba34c43b61c812cb6b0269a5c8f",
"duration": 262,
},
},
{
# audio
"url": "http://www.nrk.no/video/PS*154915",
# MD5 is unstable
"info_dict": {
"id": "154915",
"ext": "mp4",
"title": "Slik høres internett ut når du er blind",
"description": "md5:a621f5cc1bd75c8d5104cb048c6b8568",
"duration": 20,
},
},
{
"url": "nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9",
"only_matching": True,
},
{
"url": "nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70",
"only_matching": True,
},
{
"url": "https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9",
"only_matching": True,
},
{
"url": "https://www.nrk.no/video/dompap-og-andre-fugler-i-piip-show_150533",
"only_matching": True,
},
{
"url": "https://www.nrk.no/video/humor/kommentatorboksen-reiser-til-sjos_d1fda11f-a4ad-437a-a374-0398bc84e999",
"only_matching": True,
},
{
# podcast
"url": "nrk:l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "nrk:podcast/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
# clip
"url": "nrk:150533",
"only_matching": True,
},
{
"url": "nrk:clip/150533",
"only_matching": True,
},
{
# program
"url": "nrk:MDDP12000117",
"only_matching": True,
},
{
"url": "nrk:program/ENRK10100318",
"only_matching": True,
},
{
# direkte
"url": "nrk:nrk1",
"only_matching": True,
},
{
"url": "nrk:channel/nrk1",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url).split("/")[-1]
path_templ = "playback/%s/" + video_id
def call_playback_api(item, query=None):
return self._call_api(path_templ % item, video_id, item, query=query)
# known values for preferredCdn: akamai, iponly, minicdn and telenor
manifest = call_playback_api("manifest", {"preferredCdn": "akamai"})
video_id = try_get(manifest, lambda x: x["id"], compat_str) or video_id
if manifest.get("playability") == "nonPlayable":
self._raise_error(manifest["nonPlayable"])
playable = manifest["playable"]
formats = []
for asset in playable["assets"]:
if not isinstance(asset, dict):
continue
if asset.get("encrypted"):
continue
format_url = url_or_none(asset.get("url"))
if not format_url:
continue
asset_format = (asset.get("format") or "").lower()
if asset_format == "hls" or determine_ext(format_url) == "m3u8":
formats.extend(self._extract_nrk_formats(format_url, video_id))
elif asset_format == "mp3":
formats.append(
{
"url": format_url,
"format_id": asset_format,
"vcodec": "none",
}
)
self._sort_formats(formats)
data = call_playback_api("metadata")
preplay = data["preplay"]
titles = preplay["titles"]
title = titles["title"]
alt_title = titles.get("subtitle")
description = preplay.get("description")
duration = parse_duration(playable.get("duration")) or parse_duration(
data.get("duration")
)
thumbnails = []
for image in try_get(preplay, lambda x: x["poster"]["images"], list) or []:
if not isinstance(image, dict):
continue
image_url = url_or_none(image.get("url"))
if not image_url:
continue
thumbnails.append(
{
"url": image_url,
"width": int_or_none(image.get("pixelWidth")),
"height": int_or_none(image.get("pixelHeight")),
}
)
subtitles = {}
for sub in try_get(playable, lambda x: x["subtitles"], list) or []:
if not isinstance(sub, dict):
continue
sub_url = url_or_none(sub.get("webVtt"))
if not sub_url:
continue
sub_key = str_or_none(sub.get("language")) or "nb"
sub_type = str_or_none(sub.get("type"))
if sub_type:
sub_key += "-%s" % sub_type
subtitles.setdefault(sub_key, []).append(
{
"url": sub_url,
}
)
legal_age = try_get(
data, lambda x: x["legalAge"]["body"]["rating"]["code"], compat_str
)
# https://en.wikipedia.org/wiki/Norwegian_Media_Authority
age_limit = None
if legal_age:
if legal_age == "A":
age_limit = 0
elif legal_age.isdigit():
age_limit = int_or_none(legal_age)
is_series = try_get(data, lambda x: x["_links"]["series"]["name"]) == "series"
info = {
"id": video_id,
"title": title,
"alt_title": alt_title,
"description": description,
"duration": duration,
"thumbnails": thumbnails,
"age_limit": age_limit,
"formats": formats,
"subtitles": subtitles,
}
if is_series:
series = season_id = season_number = episode = episode_number = None
programs = self._call_api(
"programs/%s" % video_id, video_id, "programs", fatal=False
)
if programs and isinstance(programs, dict):
series = str_or_none(programs.get("seriesTitle"))
season_id = str_or_none(programs.get("seasonId"))
season_number = int_or_none(programs.get("seasonNumber"))
episode = str_or_none(programs.get("episodeTitle"))
episode_number = int_or_none(programs.get("episodeNumber"))
if not series:
series = title
if alt_title:
title += " - %s" % alt_title
if not season_number:
season_number = int_or_none(
self._search_regex(
r"Sesong\s+(\d+)",
description or "",
"season number",
default=None,
)
)
if not episode:
episode = alt_title if is_series else None
if not episode_number:
episode_number = int_or_none(
self._search_regex(
r"^(\d+)\.", episode or "", "episode number", default=None
)
)
if not episode_number:
episode_number = int_or_none(
self._search_regex(
r"\((\d+)\s*:\s*\d+\)",
description or "",
"episode number",
default=None,
)
)
info.update(
{
"title": title,
"series": series,
"season_id": season_id,
"season_number": season_number,
"episode": episode,
"episode_number": episode_number,
}
)
return info
class NRKTVIE(InfoExtractor):
IE_DESC = "NRK TV and NRK Radio"
_EPISODE_RE = r"(?P<id>[a-zA-Z]{4}\d{8})"
_VALID_URL = r"https?://(?:tv|radio)\.nrk(?:super)?\.no/(?:[^/]+/)*%s" % _EPISODE_RE
_TESTS = [
{
"url": "https://tv.nrk.no/program/MDDP12000117",
"md5": "c4a5960f1b00b40d47db65c1064e0ab1",
"info_dict": {
"id": "MDDP12000117",
"ext": "mp4",
"title": "Alarm Trolltunga",
"description": "md5:46923a6e6510eefcce23d5ef2a58f2ce",
"duration": 2223.44,
"age_limit": 6,
"subtitles": {
"nb-nor": [
{
"ext": "vtt",
}
],
"nb-ttv": [
{
"ext": "vtt",
}
],
},
},
},
{
"url": "https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014",
"md5": "8d40dab61cea8ab0114e090b029a0565",
"info_dict": {
"id": "MUHH48000314",
"ext": "mp4",
"title": "20 spørsmål - 23. mai 2014",
"alt_title": "23. mai 2014",
"description": "md5:bdea103bc35494c143c6a9acdd84887a",
"duration": 1741,
"series": "20 spørsmål",
"episode": "23. mai 2014",
"age_limit": 0,
},
},
{
"url": "https://tv.nrk.no/program/mdfp15000514",
"info_dict": {
"id": "MDFP15000514",
"ext": "mp4",
"title": "Kunnskapskanalen - Grunnlovsjubiléet - Stor ståhei for ingenting",
"description": "md5:89290c5ccde1b3a24bb8050ab67fe1db",
"duration": 4605.08,
"series": "Kunnskapskanalen",
"episode": "Grunnlovsjubiléet - Stor ståhei for ingenting",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
},
{
# single playlist video
"url": "https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2",
"info_dict": {
"id": "MSPO40010515",
"ext": "mp4",
"title": "Sprint fri teknikk, kvinner og menn 06.01.2015",
"description": "md5:c03aba1e917561eface5214020551b7a",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"expected_warnings": ["Failed to download m3u8 information"],
"skip": "particular part is not supported currently",
},
{
"url": "https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015",
"info_dict": {
"id": "MSPO40010515",
"ext": "mp4",
"title": "Sprint fri teknikk, kvinner og menn 06.01.2015",
"description": "md5:c03aba1e917561eface5214020551b7a",
"age_limit": 0,
},
"expected_warnings": ["Failed to download m3u8 information"],
"skip": "Ikke tilgjengelig utenfor Norge",
},
{
"url": "https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13",
"info_dict": {
"id": "KMTE50001317",
"ext": "mp4",
"title": "Anno - 13. episode",
"description": "md5:11d9613661a8dbe6f9bef54e3a4cbbfa",
"duration": 2340,
"series": "Anno",
"episode": "13. episode",
"season_number": 3,
"episode_number": 13,
"age_limit": 0,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017",
"info_dict": {
"id": "MUHH46000317",
"ext": "mp4",
"title": "Nytt på Nytt 27.01.2017",
"description": "md5:5358d6388fba0ea6f0b6d11c48b9eb4b",
"duration": 1796,
"series": "Nytt på nytt",
"episode": "27.01.2017",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"skip": "ProgramRightsHasExpired",
},
{
"url": "https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/serie/dagsnytt/sesong/201507/NPUB21019315",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
"nrk:%s" % video_id, ie=NRKIE.ie_key(), video_id=video_id
)
class NRKTVEpisodeIE(InfoExtractor):
_VALID_URL = r"https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/(?P<season_number>\d+)/episode/(?P<episode_number>\d+))"
_TESTS = [
{
"url": "https://tv.nrk.no/serie/hellums-kro/sesong/1/episode/2",
"info_dict": {
"id": "MUHH36005220",
"ext": "mp4",
"title": "Hellums kro - 2. Kro, krig og kjærlighet",
"description": "md5:ad92ddffc04cea8ce14b415deef81787",
"duration": 1563.92,
"series": "Hellums kro",
"season_number": 1,
"episode_number": 2,
"episode": "2. Kro, krig og kjærlighet",
"age_limit": 6,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://tv.nrk.no/serie/backstage/sesong/1/episode/8",
"info_dict": {
"id": "MSUI14000816",
"ext": "mp4",
"title": "Backstage - 8. episode",
"description": "md5:de6ca5d5a2d56849e4021f2bf2850df4",
"duration": 1320,
"series": "Backstage",
"season_number": 1,
"episode_number": 8,
"episode": "8. episode",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"skip": "ProgramRightsHasExpired",
},
]
def _real_extract(self, url):
display_id, season_number, episode_number = re.match(
self._VALID_URL, url
).groups()
webpage = self._download_webpage(url, display_id)
info = self._search_json_ld(webpage, display_id, default={})
nrk_id = (
info.get("@id")
or self._html_search_meta("nrk:program-id", webpage, default=None)
or self._search_regex(
r'data-program-id=["\'](%s)' % NRKTVIE._EPISODE_RE, webpage, "nrk id"
)
)
assert re.match(NRKTVIE._EPISODE_RE, nrk_id)
info.update(
{
"_type": "url",
"id": nrk_id,
"url": "nrk:%s" % nrk_id,
"ie_key": NRKIE.ie_key(),
"season_number": int(season_number),
"episode_number": int(episode_number),
}
)
return info
class NRKTVSerieBaseIE(NRKBaseIE):
def _extract_entries(self, entry_list):
if not isinstance(entry_list, list):
return []
entries = []
for episode in entry_list:
nrk_id = episode.get("prfId") or episode.get("episodeId")
if not nrk_id or not isinstance(nrk_id, compat_str):
continue
entries.append(
self.url_result("nrk:%s" % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id)
)
return entries
_ASSETS_KEYS = (
"episodes",
"instalments",
)
def _extract_assets_key(self, embedded):
for asset_key in self._ASSETS_KEYS:
if embedded.get(asset_key):
return asset_key
@staticmethod
def _catalog_name(serie_kind):
return "podcast" if serie_kind in ("podcast", "podkast") else "series"
def _entries(self, data, display_id):
for page_num in itertools.count(1):
embedded = data.get("_embedded") or data
if not isinstance(embedded, dict):
break
assets_key = self._extract_assets_key(embedded)
if not assets_key:
break
# Extract entries
entries = try_get(
embedded,
(
lambda x: x[assets_key]["_embedded"][assets_key],
lambda x: x[assets_key],
),
list,
)
for e in self._extract_entries(entries):
yield e
# Find next URL
next_url_path = try_get(
data,
(
lambda x: x["_links"]["next"]["href"],
lambda x: x["_embedded"][assets_key]["_links"]["next"]["href"],
),
compat_str,
)
if not next_url_path:
break
data = self._call_api(
next_url_path,
display_id,
note="Downloading %s JSON page %d" % (assets_key, page_num),
fatal=False,
)
if not data:
break
class NRKTVSeasonIE(NRKTVSerieBaseIE):
_VALID_URL = r"""(?x)
https?://
(?P<domain>tv|radio)\.nrk\.no/
(?P<serie_kind>serie|pod[ck]ast)/
(?P<serie>[^/]+)/
(?:
(?:sesong/)?(?P<id>\d+)|
sesong/(?P<id_2>[^/?#&]+)
)
"""
_TESTS = [
{
"url": "https://tv.nrk.no/serie/backstage/sesong/1",
"info_dict": {
"id": "backstage/1",
"title": "Sesong 1",
},
"playlist_mincount": 30,
},
{
# no /sesong/ in path
"url": "https://tv.nrk.no/serie/lindmo/2016",
"info_dict": {
"id": "lindmo/2016",
"title": "2016",
},
"playlist_mincount": 29,
},
{
# weird nested _embedded in catalog JSON response
"url": "https://radio.nrk.no/serie/dickie-dick-dickens/sesong/1",
"info_dict": {
"id": "dickie-dick-dickens/1",
"title": "Sesong 1",
},
"playlist_mincount": 11,
},
{
# 841 entries, multi page
"url": "https://radio.nrk.no/serie/dagsnytt/sesong/201509",
"info_dict": {
"id": "dagsnytt/201509",
"title": "September 2015",
},
"playlist_mincount": 841,
},
{
# 180 entries, single page
"url": "https://tv.nrk.no/serie/spangas/sesong/1",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/hele_historien/sesong/diagnose-kverulant",
"info_dict": {
"id": "hele_historien/diagnose-kverulant",
"title": "Diagnose kverulant",
},
"playlist_mincount": 3,
},
{
"url": "https://radio.nrk.no/podkast/loerdagsraadet/sesong/202101",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if NRKTVIE.suitable(url)
or NRKTVEpisodeIE.suitable(url)
or NRKRadioPodkastIE.suitable(url)
else super(NRKTVSeasonIE, cls).suitable(url)
)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
domain = mobj.group("domain")
serie_kind = mobj.group("serie_kind")
serie = mobj.group("serie")
season_id = mobj.group("id") or mobj.group("id_2")
display_id = "%s/%s" % (serie, season_id)
data = self._call_api(
"%s/catalog/%s/%s/seasons/%s"
% (domain, self._catalog_name(serie_kind), serie, season_id),
display_id,
"season",
query={"pageSize": 50},
)
title = try_get(data, lambda x: x["titles"]["title"], compat_str) or display_id
return self.playlist_result(self._entries(data, display_id), display_id, title)
class NRKTVSeriesIE(NRKTVSerieBaseIE):
_VALID_URL = r"https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/(?P<serie_kind>serie|pod[ck]ast)/(?P<id>[^/]+)"
_TESTS = [
{
# new layout, instalments
"url": "https://tv.nrk.no/serie/groenn-glede",
"info_dict": {
"id": "groenn-glede",
"title": "Grønn glede",
"description": "md5:7576e92ae7f65da6993cf90ee29e4608",
},
"playlist_mincount": 90,
},
{
# new layout, instalments, more entries
"url": "https://tv.nrk.no/serie/lindmo",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/blank",
"info_dict": {
"id": "blank",
"title": "Blank",
"description": "md5:7664b4e7e77dc6810cd3bca367c25b6e",
},
"playlist_mincount": 30,
},
{
# new layout, seasons
"url": "https://tv.nrk.no/serie/backstage",
"info_dict": {
"id": "backstage",
"title": "Backstage",
"description": "md5:63692ceb96813d9a207e9910483d948b",
},
"playlist_mincount": 60,
},
{
# old layout
"url": "https://tv.nrksuper.no/serie/labyrint",
"info_dict": {
"id": "labyrint",
"title": "Labyrint",
"description": "I Daidalos sin undersjøiske Labyrint venter spennende oppgaver, skumle robotskapninger og slim.",
},
"playlist_mincount": 3,
},
{
"url": "https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/saving-the-human-race",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/postmann-pat",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/serie/dickie-dick-dickens",
"info_dict": {
"id": "dickie-dick-dickens",
"title": "Dickie Dick Dickens",
"description": "md5:19e67411ffe57f7dce08a943d7a0b91f",
},
"playlist_mincount": 8,
},
{
"url": "https://nrksuper.no/serie/labyrint",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers",
"info_dict": {
"id": "ulrikkes_univers",
},
"playlist_mincount": 10,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/nrkno-poddkast-26588-134079-05042018030000",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if any(
ie.suitable(url)
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKRadioPodkastIE, NRKTVSeasonIE)
)
else super(NRKTVSeriesIE, cls).suitable(url)
)
def _real_extract(self, url):
site, serie_kind, series_id = re.match(self._VALID_URL, url).groups()
is_radio = site == "radio.nrk"
domain = "radio" if is_radio else "tv"
size_prefix = "p" if is_radio else "embeddedInstalmentsP"
series = self._call_api(
"%s/catalog/%s/%s" % (domain, self._catalog_name(serie_kind), series_id),
series_id,
"serie",
query={size_prefix + "ageSize": 50},
)
titles = (
try_get(
series,
[
lambda x: x["titles"],
lambda x: x[x["type"]]["titles"],
lambda x: x[x["seriesType"]]["titles"],
],
)
or {}
)
entries = []
entries.extend(self._entries(series, series_id))
embedded = series.get("_embedded") or {}
linked_seasons = try_get(series, lambda x: x["_links"]["seasons"]) or []
embedded_seasons = embedded.get("seasons") or []
if len(linked_seasons) > len(embedded_seasons):
for season in linked_seasons:
season_url = urljoin(url, season.get("href"))
if not season_url:
season_name = season.get("name")
if season_name and isinstance(season_name, compat_str):
season_url = "https://%s.nrk.no/serie/%s/sesong/%s" % (
domain,
series_id,
season_name,
)
if season_url:
entries.append(
self.url_result(
season_url,
ie=NRKTVSeasonIE.ie_key(),
video_title=season.get("title"),
)
)
else:
for season in embedded_seasons:
entries.extend(self._entries(season, series_id))
entries.extend(self._entries(embedded.get("extraMaterial") or {}, series_id))
return self.playlist_result(
entries, series_id, titles.get("title"), titles.get("subtitle")
)
class NRKTVDirekteIE(NRKTVIE):
IE_DESC = "NRK TV Direkte and NRK Radio Direkte"
_VALID_URL = r"https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)"
_TESTS = [
{
"url": "https://tv.nrk.no/direkte/nrk1",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/direkte/p1_oslo_akershus",
"only_matching": True,
},
]
class NRKRadioPodkastIE(InfoExtractor):
_VALID_URL = r"https?://radio\.nrk\.no/pod[ck]ast/(?:[^/]+/)+(?P<id>l_[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})"
_TESTS = [
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"md5": "8d40dab61cea8ab0114e090b029a0565",
"info_dict": {
"id": "MUHH48000314AA",
"ext": "mp4",
"title": "20 spørsmål 23.05.2014",
"description": "md5:bdea103bc35494c143c6a9acdd84887a",
"duration": 1741,
"series": "20 spørsmål",
"episode": "23.05.2014",
},
},
{
"url": "https://radio.nrk.no/podcast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/sesong/1/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/hele_historien/sesong/bortfoert-i-bergen/l_774d1a2c-7aa7-4965-8d1a-2c7aa7d9652c",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
"nrk:%s" % video_id, ie=NRKIE.ie_key(), video_id=video_id
)
class NRKPlaylistBaseIE(InfoExtractor):
def _extract_description(self, webpage):
pass
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result("nrk:%s" % video_id, NRKIE.ie_key())
for video_id in re.findall(self._ITEM_RE, webpage)
]
playlist_title = self._extract_title(webpage)
playlist_description = self._extract_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description
)
class NRKPlaylistIE(NRKPlaylistBaseIE):
_VALID_URL = r"https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)"
_ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"'
_TESTS = [
{
"url": "http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763",
"info_dict": {
"id": "gjenopplev-den-historiske-solformorkelsen-1.12270763",
"title": "Gjenopplev den historiske solformørkelsen",
"description": "md5:c2df8ea3bac5654a26fc2834a542feed",
},
"playlist_count": 2,
},
{
"url": "http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449",
"info_dict": {
"id": "rivertonprisen-til-karin-fossum-1.12266449",
"title": "Rivertonprisen til Karin Fossum",
"description": "Første kvinne på 15 år til å vinne krimlitteraturprisen.",
},
"playlist_count": 2,
},
]
def _extract_title(self, webpage):
return self._og_search_title(webpage, fatal=False)
def _extract_description(self, webpage):
return self._og_search_description(webpage)
class NRKTVEpisodesIE(NRKPlaylistBaseIE):
_VALID_URL = r"https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)"
_ITEM_RE = r'data-episode=["\']%s' % NRKTVIE._EPISODE_RE
_TESTS = [
{
"url": "https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031",
"info_dict": {
"id": "69031",
"title": "Nytt på nytt, sesong: 201210",
},
"playlist_count": 4,
}
]
def _extract_title(self, webpage):
return self._html_search_regex(
r"<h1>([^<]+)</h1>", webpage, "title", fatal=False
)
class NRKSkoleIE(InfoExtractor):
IE_DESC = "NRK Skole"
_VALID_URL = r"https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)"
_TESTS = [
{
"url": "https://www.nrk.no/skole/?page=search&q=&mediaId=14099",
"md5": "18c12c3d071953c3bf8d54ef6b2587b7",
"info_dict": {
"id": "6021",
"ext": "mp4",
"title": "Genetikk og eneggede tvillinger",
"description": "md5:3aca25dcf38ec30f0363428d2b265f8d",
"duration": 399,
},
},
{
"url": "https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
nrk_id = self._download_json(
"https://nrkno-skole-prod.kube.nrk.no/skole/api/media/%s" % video_id,
video_id,
)["psId"]
return self.url_result("nrk:%s" % nrk_id)
|
#!/usr/bin/env python
"""
Configure folder for sCMOS testing.
Hazen 09/17
"""
import numpy
import os
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.simulator.emitters_on_grid as emittersOnGrid
import storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom
import storm_analysis.diagnostics.sCMOS.settings as settings
def testingParameters(cal_file):
"""
Create a sCMOS parameters object.
"""
params = parameters.ParametersSCMOS()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("camera_calibration", "filename", cal_file)
params.setAttr("find_max_radius", "int", 5)
params.setAttr("fit_error_model", "string", settings.fit_error_model)
params.setAttr("foreground_sigma", "float", 1.5)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("model", "string", settings.model)
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("roi_size", "int", settings.roi_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("threshold", "float", settings.threshold)
# Don't do tracking.
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
# Z fitting.
#
# These are nonsense values. We test either '2D' of '3D' mode
# and check how well we do at fitting the localization widths.
#
params.setAttr("do_zfit", "int", 0)
params.setAttr("cutoff", "float", 0.0)
params.setAttr("max_z", "float", 0.5)
params.setAttr("min_z", "float", -0.5)
params.setAttr("z_value", "float", 0.0)
params.setAttr("z_step", "float", 1.0)
params.setAttr("wx_wo", "float", 1.0)
params.setAttr("wx_c", "float", 1.0)
params.setAttr("wx_d", "float", 1.0)
params.setAttr("wxA", "float", 0.0)
params.setAttr("wxB", "float", 0.0)
params.setAttr("wxC", "float", 0.0)
params.setAttr("wxD", "float", 0.0)
params.setAttr("wy_wo", "float", 1.0)
params.setAttr("wy_c", "float", 1.0)
params.setAttr("wy_d", "float", 1.0)
params.setAttr("wyA", "float", 0.0)
params.setAttr("wyB", "float", 0.0)
params.setAttr("wyC", "float", 0.0)
params.setAttr("wyD", "float", 0.0)
# 'peak_locations' testing.
if hasattr(settings, "peak_locations") and (settings.peak_locations is not None):
params.setAttr("peak_locations", "filename", settings.peak_locations)
return params
def configure(cal_file = None):
# Create sCMOS calibration file if not specified.
#
if cal_file is None:
cal_file = "calib.npy"
offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset
variance = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance
gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain
rqe = numpy.ones((settings.y_size, settings.x_size))
numpy.save(cal_file, [offset, variance, gain, rqe, 2])
# Create parameters file for analysis.
#
print("Creating XML file.")
params = testingParameters(cal_file)
params.toXMLFile("scmos.xml", pretty = True)
# Create localization on a grid file.
#
print("Creating gridded localization.")
emittersOnGrid.emittersOnGrid("grid_list.hdf5",
settings.nx,
settings.ny,
1.5,
20,
0.0,
0.0)
# Create randomly located localizations file.
#
print("Creating random localization.")
emittersUniformRandom.emittersUniformRandom("random_list.hdf5",
1.0,
10,
settings.x_size,
settings.y_size,
0.0)
if (__name__ == "__main__"):
configure()
|
from unicorn import *
from unicorn.x86_const import *
from capstone import *
from importlib import import_module
from emulation.syscall import clean_stack
import argparse
import emulation.syscall as winsyscall
import pefile
import struct
import sys
import ast
import os
#TODO: Deal with SEH structure
#TODO: Randomize TEB base address
#TODO: Randomize process ID
#TODO: Randomize thread ID
#TODO: Process management
#TODO: Thread management
#TODO: Fake FileSystem
#TODO: Fake running process
API_refs = 'winapi_9k.csv'
regs = ['eax', 'ebx', 'ecx', 'edx', 'esp', 'ebp', 'edi', 'esi']
md = Cs(CS_ARCH_X86, CS_MODE_32)
full_content = ''
class Environment:
def __init__(self, args):
# Argument validation
self.breakpoint = args.breakpoint
self.trace = args.trace
self.dump = args.dump
self.silent = args.silent
self.out = args.out
self.stack = args.stack
self.registers = args.registers
self.debug = args.debug
self.handle_list = args.handle
self.show_extract = args.extract
self.imports = args.imports
self.dynamics = []
if self.trace:
self.calltrace = []
if self.stack and self.registers:
self.dump = True
if self.dump:
self.registers = True
self.stack = True
path = args.path
self.shortname = path.split('/')[-1].split('.')[0].lower()
self.drivename = 'C:\\Users\\EllenRipley\\Desktop\\' + self.shortname
self.username = 'EllenRipley'
self.computername = 'Nostromo'
self.computer_mac = '0F-0C-95-86-20-29'
self.computer_ip = '192.168.0.12'
self.path = path
self.chunks = []
self.virtual_memory = []
self.resources = {}
self.extracts = {}
self.threads = []
self.thread_ret = None
self.thread_trace = []
self.thread_max_replay = 5
self.max_loop = 10
self.current_loop_counter = 0
self.previous_loop = []
self.current_loop = []
self.execution_mode = 'default'
self.uc = Uc(UC_ARCH_X86, UC_MODE_32)
self.handle = {'0xaa': ['placeholder_dynamic_handle', 'dummy']}
try:
self.pe = pefile.PE(path)
except OSError as e:
print(e)
exit -1
except pefile.PEFormatError as e:
print(f'Malformated or invalid PE file: {e.value}')
exit -1
# Log every instruction emulated
def hook_code(self, a, address, size, user_data):
instruction = self.uc.mem_read(address, size)
# Manual Breakpoint
if self.breakpoint:
if hex(address) == self.breakpoint:
final_esp = self.uc.reg_read(UC_X86_REG_ESP)
final_ebp = self.uc.reg_read(UC_X86_REG_EBP)
self.uc.emu_stop()
self.calltrace.append('breakpoint')
print('[+] Breakpoint hits at 0x%08x' % int(self.breakpoint, 16))
return
# Out of function range
for i in md.disasm(instruction, address):
#if 'int' in i.mnemonic:
#original_eip = self.uc.reg_read(UC_X86_REG_EIP)
#self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(i.bytes))
#return
if i.mnemonic == 'add' and i.op_str == 'byte ptr [eax], al':
print('[!] End of the main emulation thread')
self.uc.emu_stop()
return
# Bypass traps to debuger
#if str(i.mnemonic) == 'int3':
# if not self.silent:
# print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
# original_eip = self.uc.reg_read(UC_X86_REG_EIP)
# self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(i.bytes))
if str(i.mnemonic) == 'call' and 'dword ptr [' in i.op_str:
target = i.op_str.split('[')[1].split(']')[0]
if target not in self.raw_IAT and self.silent:
# print('[CHECKME]> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
else:
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
elif str(i.mnemonic) == 'call':
#print('[Debug]', i.mnemonic, i.op_str)
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
elif str(i.mnemonic) == 'jmp' and 'dword ptr [' in i.op_str:
target = i.op_str.split('[')[1].split(']')[0]
if i.op_str in regs:
dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + i.op_str.replace(' ','').upper() + ')')
elif ('+' in i.op_str or '-' in i.op_str or '*' in i.op_str):
left_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '')
operator = i.op_str.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '')
right_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '')
# call/jmp [eax+4]
if left_elem in regs:
left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper())))
dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
target = '0x%08x' % struct.unpack('I', content)[0]
# call/jmp [eax*4 + 10]
elif '+' in left_elem or '-' in left_elem or '*' in left_elem:
lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0]
lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper())))
lleft_op = left_elem.replace(lleft_elem, lleft_value)
dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
target = '0x%06x' % struct.unpack('I', content)[0]
else:
print('[-] Something went terribly wrong')
exit(1)
else:
target = i.op_str.split('[')[1].split(']')[0]
if target not in self.raw_IAT:
#self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
if not self.silent:
print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
#return
self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
else:
self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
else:
if not self.silent:
print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
# Hook and trace syscalls
def hook_syscall(self, instruction, mnemonic, addr, byte):
if self.execution_mode == 'thread':
self.thread_trace.append(addr)
dup_api = {i:self.thread_trace.count(i) for i in self.thread_trace}
for elem in dup_api:
rep = dup_api[elem]
if rep >= self.thread_max_replay:
self.uc.emu_stop()
if self.debug:
print('[!] Thread stoped due to it\'s repetition (infinite loop)')
return
is_ptr = False
if '[' in instruction:
is_ptr = True
try:
if instruction in regs:
dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + instruction.replace(' ','').upper() + ')')
elif ('+' in instruction or '-' in instruction) and is_ptr:
left_elem = instruction.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '')
operator = instruction.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '')
right_elem = instruction.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '')
# call/jmp [eax+4]
if left_elem in regs:
left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper())))
dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
dest_addr = '0x%08x' % struct.unpack('I', content)[0]
# call/jmp [eax*4 + 10]
elif '+' in left_elem or '-' in left_elem or '*' in left_elem:
lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0]
lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper())))
lleft_op = left_elem.replace(lleft_elem, lleft_value)
dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
dest_addr = '0x%08x' % struct.unpack('I', content)[0]
else:
print('[-] Something went terribly wrong')
exit(1)
else:
dest_addr = '0x' + instruction.split('0x')[1].replace(']','')
except:
print('[-] Weird call at 0x%08X, investigate me ! "%s %s"' % (addr, mnemonic, instruction))
return
# Are we calling a function from the IAT in a weird way ?
#print(self.IAT)
if str(dest_addr) in self.IAT_hook.values():
target_iat_call = list(self.IAT_hook.keys())[list(self.IAT_hook.values()).index(dest_addr)]
for dll in self.IAT:
for func_addr in self.IAT[dll]:
func_name = self.IAT[dll].get(func_addr)
if func_name == target_iat_call:
#print('[*] IAT call detected:', target_iat_call, func_addr)
dest_addr = func_addr
break
#return
# Is this targeting the IAT or a mapped function ?
api_name_tmp = None
IAT_entry = list(self.raw_IAT.keys())
if dest_addr not in IAT_entry:
if is_ptr:
raw_ptr = self.uc.mem_read(int(dest_addr, 16), 0x4)
ptr = '0x%08x' % struct.unpack('<I', raw_ptr)[0]
if ptr in self.IAT_hook.values():
try:
api_name_tmp = [k for k,v in self.IAT_hook.items() if v == ptr][0]
except:
api_name_tmp = None
else:
if not self.silent:
print('> Tracing intruction ' + hex(addr), ':', mnemonic, self.shortname + '.' + str(instruction) )
print('> Following function ' + self.shortname + '.' + str(instruction) + ':')
if self.trace:
self.calltrace.append(self.shortname + '.' + str(instruction))
return
if api_name_tmp == None:
try:
api_name = self.raw_IAT[dest_addr]
except:
return
else:
api_name = api_name_tmp
is_valid, description, args, args_count = self.extract_API_args(api_name)
if not is_valid:
if self.debug:
print('[!] Unknown call destination, fix me dude')
self.uc.emu_stop()
if is_ptr:
api_name = '&' + api_name
display_line = instruction.replace(dest_addr, api_name)
if not self.silent:
print('> Tracing intruction ' + hex(addr), ':', mnemonic, display_line)
# print('> Tracing intruction ' + hex(addr), ': call', display_line + ' #' + description)
if mnemonic == 'call':
self.fake_syscall(addr, args_count, api_name, byte, 0x0) # Return 0 by default
elif mnemonic == 'jmp':
self.fake_jmpcall(addr, args_count, api_name, byte, 0x0)
# Read <size> bytes from the stack address <start>
def read_stack(self, start, size):
print('=========== Stack Dump ==========')
final_stack = self.uc.mem_read(start, size)
stack_addr = start
for x in range(0, size // 4):
stack_addr += 4
stack_content = final_stack[0:4]
final_stack = final_stack[4:]
stack_value = struct.unpack('I', stack_content)[0]
print('0x%08x : 0x%08x' % (stack_addr, stack_value))
# Fake syscall function
def fake_syscall(self, addr, args_count, api, opcode, ret_value):
api_name = api.replace('&', '')
display = '> ' + hex(addr) + ': ' + api_name + '('
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
val = self.uc.mem_read(current_esp, 4*args_count)
loc_esp = self.uc.reg_read(UC_X86_REG_ESP)
args = []
for x in range(0, args_count):
value = self.read_byte(loc_esp + (x*4))
args.append(hex(value))
# Test weather or not a special hook exist
if api_name in dir(winsyscall):
# This API need to be intercept with a special hardcoded hook
function = getattr(winsyscall, api_name)
ret_code, ret_args = function(self, args)
if ret_code == 'THREAD':
taddr = int(self.threads[-1])
ret_code = 0x1
for elem in self.handle:
hval = self.handle[elem][0]
if hval == taddr:
ret_code = int(elem, 16)
break
if self.debug:
print('[!] Spawning a new thread at ' + hex(self.threads[-1]))
if ret_args == 'EXIT':
print(display + '0x0)')
self.uc.emu_stop()
return
display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = '
if ret_code != None:
display += hex(ret_code)
else:
display += str(ret_code)
else:
clean_stack(self, args_count)
ret_code = 0x0
display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = '
display += hex(ret_code)
# Avoid dead end / infinite loop
if len(self.current_loop) < self.max_loop:
self.current_loop.append(addr)
elif len(self.current_loop) == self.max_loop:
if self.previous_loop.sort() == self.current_loop.sort():
if self.current_loop_counter == self.max_loop:
print('[!] Inifinite loop detected, stoping the emulation')
self.uc.emu_stop()
return
self.current_loop = []
self.current_loop_counter += 1
else:
self.previous_loop = self.current_loop
print(display)
# Does the function return something ?
if ret_code != None:
# Fake return code to 0
self.uc.reg_write(UC_X86_REG_EAX, ret_code)
# Redirect EIP
original_eip = self.uc.reg_read(UC_X86_REG_EIP)
self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(opcode))
# Pop a value from the stack
def popstack(self):
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
val = self.uc.mem_read(current_esp, 4)
stack_value = struct.unpack('I', val)[0]
return stack_value
# Decrement the stack value
def decstack(self):
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
self.uc.reg_write(UC_X86_REG_ESP, int(current_esp + 4))
# Read a 4 byte value at a given address
def read_byte(self, addr):
val = self.uc.mem_read(addr, 4)
formated_value = struct.unpack('I', val)[0]
return formated_value
# Fake jmp to syscall ptr
def fake_jmpcall(self, addr, args_count, api, opcode, ret_value):
display = '> ' + hex(addr) + ': ' + api.replace('&', '') + '('
ret = self.popstack()
self.decstack()
loc_esp = self.uc.reg_read(UC_X86_REG_ESP)
loc_args = []
for x in range(0, args_count):
value = self.read_byte(loc_esp + (x*4))
loc_args.append(hex(value))
# display += str(loc_args).replace('[', '').replace(']', '').replace("'", '') + ''
args = loc_args
api_name = api.replace('&', '')
if api_name in dir(winsyscall):
# This API need to be intercept with a special hardcoded hook
function = getattr(winsyscall, api_name)
ret_code, ret_args = function(self, args)
if ret_code == 'THREAD':
taddr = int(self.threads[-1])
ret_code = 0x1
for elem in self.handle:
hval = self.handle[elem][0]
if hval == taddr:
ret_code = int(elem, 16)
break
if self.debug:
print('[!] Spawning a new thread at ' + hex(self.threads[-1]))
if ret_args == 'EXIT':
print(display + '0x0)')
self.uc.emu_stop()
return
display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = '
if ret_code != None:
display += hex(ret_code)
else:
display += str(ret_code)
else:
# clean_stack(self, args_count)
ret_code = 0x0
display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = '
display += hex(ret_code)
# Avoid dead end / infinite loop
if len(self.current_loop) < self.max_loop:
self.current_loop.append(addr)
elif len(self.current_loop) == self.max_loop:
if self.previous_loop.sort() == self.current_loop.sort():
if self.current_loop_counter == self.max_loop:
print('[!] Inifinite loop detected, stoping the emulation')
self.uc.emu_stop()
return
self.current_loop = []
self.current_loop_counter += 1
else:
self.previous_loop = self.current_loop
print(display)
# Does the function return something ?
if ret_code != None:
# Fake return code to 0
self.uc.reg_write(UC_X86_REG_EAX, ret_code)
else:
# Fake return code to 0
self.uc.reg_write(UC_X86_REG_EAX, 0x0)
# Redirect EIP
self.uc.reg_write(UC_X86_REG_EIP, ret)
# Print a list of used handles
def read_handle(self):
print('========= Opened Handles ========')
for h in self.handle:
handle_addr = h
handle_value = self.handle[h][0]
handle_type = self.handle[h][1]
if handle_type == 'dummy':
continue
if len(str(handle_value)) > 50:
handle_value = str(handle_value)[:25] + '[...]' + str(handle_value)[-9:]
print('Address=' + str(handle_addr) + ' Type=' + str(handle_type) + ' Value=' + str(handle_value) )
# Show and extract potentials payloads
def display_extracts(self):
# Search Binary in allocated memory regions
for vmem in self.virtual_memory:
content = self.uc.mem_read(vmem.data_address, vmem.data_size)
if content[:2] == b'MZ':
self.extracts['hmemory_' + hex(vmem.data_address)] = content
print('======= Extracted Payloads =======')
if len(self.extracts) == 0:
print('Nothing found')
return
dirname = './' + self.shortname + '_emu'
if not os.path.exists(dirname):
os.makedirs(dirname)
counter = 0
for entry in self.extracts:
name = entry[1:]
options = ''
data = self.extracts[entry]
if len(str(data)) > 50:
sdata = str(data)[:25] + '[...]' + str(data)[-9:]
else:
sdata = data
if data[:2] == b'MZ' or data[:2] == 'MZ':
options = ' (PE payload detected)'
print('Name="' + name + '" Content="' + sdata + '"' + options)
fname = name.split('\\')[-1]
if fname == '':
fname = 'generic_extract_' + str(counter) + '.bin'
f = open(dirname + '/' + fname, 'wb')
f.write(data)
f.close()
# Print a list of dynamically resolved functions
def read_dynamic_imports(self):
print('========= Dynamic Imports =======')
if len(self.dynamics) == 0x0:
print('No dynamic imports where detected during the emulation')
for i in self.dynamics:
print('Address=', i[0], ' Name=', i[1])
# Print a dump of the current registers
def read_full_regs(self):
print('=== Registers Dump ===')
print('EAX: 0x%08x | EBP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EAX), self.uc.reg_read(UC_X86_REG_EBP)))
print('EBX: 0x%08x | ESP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EBX), self.uc.reg_read(UC_X86_REG_ESP)))
print('ECX: 0x%08x | ESI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_ECX), self.uc.reg_read(UC_X86_REG_ESI)))
print('EDX: 0x%08x | EDI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EDX), self.uc.reg_read(UC_X86_REG_EDI)))
print('EIP: 0x%08x ' % self.uc.reg_read(UC_X86_REG_EIP))
# Retreive the corresponding Windows API in our list
def extract_API_args(self, api_name):
with open(API_refs) as f:
line = next((l for l in f if api_name == l.split(';')[0]), None)
if line == None or line == '':
# We're fucked mate
return False, '', '', 0
name = line.split(';')[0]
description = line.split(';')[1].split(';')[0]
args = line.split(';')[2]
args_count = args.count(',') + 1
if args_count == 1 and args.replace('\n', '').replace(' ','') == '':
args_count = 0
if args == '' or args == None:
# We're double fucked maaaatee
# print('[!] Cannot gather arguments count and type, fix me')
return True, description, '', 0
return True, description, args, args_count
# Setup a fake IAT
def generate_Import_Address_Table(self):
self.IAT = {}
self.raw_IAT = {}
dll_count = 0
functions_count = 0
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
functions = {}
dll_count += 1
for imp in entry.imports:
functions_count += 1
#print(imp.name.decode())
functions[hex(imp.address)] = imp.name.decode()
self.raw_IAT[hex(imp.address)] = imp.name.decode()
self.IAT[entry.dll.lower().decode()] = functions
self.IAT['dynamic_import'] = {'0x00ff0000': 'placeholder_dynamic_import'}
if self.debug:
print('[DEBUG] ' + str(functions_count) + ' functions imported in the IAT from ' + str(dll_count) + ' DLL')
# Setup a hook structure for the IAT
def hook_Import_Address_Table(self):
self.IAT_hook = {}
cnt = 0
for dll in self.IAT:
if dll == 'dynamic_import':
continue
for entry_addr in self.IAT[dll]:
entry = self.IAT[dll][entry_addr]
#self.uc.mem_write(int(entry_addr, 16), bytes([cnt]))
content = self.uc.mem_read(int(entry_addr, 16), 0x4)
value = '0x' + struct.pack("<I", int(bytes(content).hex(), 16)).hex()
self.IAT_hook[entry] = value
cnt += 1
#print(self.IAT_hook)
if self.debug:
print('[DEBUG] ' + str(cnt) + ' IAT entry where hooked')
# Setup the process TIB structure
def generate_Thread_Information_Block(self):
self.TEB_base_addr = 0x200000
self.process_ID = 0x1908
self.thread_ID = 0x10C
self.PEB_base_addr = self.TEB_base_addr + 0x1000
TEB = b''
TEB += struct.pack("<I", 0xffffffff) # FS:[0x00] Structure Exception Handler (SEH)
TEB += struct.pack("<I", (self.stack_addr + self.stack_size)) # FS:[0x04] Stack Base
TEB += struct.pack("<I", self.stack_addr) # FS:[0x08] Stack Limit
TEB += struct.pack("<I", 0x0) # FS:[0x0C] Subsystem TIB
TEB += struct.pack("<I", 0x0) # FS:[0x10] Fiber Data
TEB += struct.pack("<I", 0x0) # FS:[0x14] Arbitrary Data Slot
TEB += struct.pack("<I", self.TEB_base_addr) # FS:[0x18] Linear Address of TEB
TEB += struct.pack("<I", 0x0) # FS:[0x1C] Environment Pointer
TEB += struct.pack("<I", self.process_ID) # FS:[0x20] Process ID
TEB += struct.pack("<I", self.thread_ID) # FS:[0x24] Current Thread ID
TEB += struct.pack("<I", 0x0) # FS:[0x28] Active RPC Handle
TEB += struct.pack("<I", 0x0) # FS:[0x2C] Linear Address of the thread-local storage array
TEB += struct.pack("<I", self.PEB_base_addr) # FS:[0x30] Linear Address of the Process Environment Block (PEB)
page_size=4096
m = 0x5000 % page_size
f = page_size - m
aligned_size = 0x5000 + f
# Map and write the TEB in memory
self.uc.mem_map(self.TEB_base_addr, aligned_size)
self.uc.mem_write(self.TEB_base_addr, TEB)
def launch(self):
# Get header most importants fields
self.header_image_base = self.pe.OPTIONAL_HEADER.ImageBase
self.header_size_of_image = self.pe.OPTIONAL_HEADER.SizeOfImage
self.header_entrypoint = self.pe.OPTIONAL_HEADER.AddressOfEntryPoint
self.mapped_image = self.pe.get_memory_mapped_image(ImageBase=self.header_image_base)
self.mapped_size = (len(self.mapped_image) + 0x1000) & ~0xFFF
self.exit_addr = 0xfffff000
# Redirect to file
if self.out != None:
sys.stdout = open(self.out, "w")
# Get virtual size needed for PE mapping
min_offset = sys.maxsize
virtual_size = 0
for section in self.pe.sections:
min_offset = section.VirtualAddress
virtual_size += min_offset
virtual_size += min_offset
m = virtual_size % 4096
f = 4096 - m
aligned_virtual_size = virtual_size + f
# Map the binary in memory
self.uc.mem_map(self.header_image_base, self.mapped_size)
self.uc.mem_write(self.header_image_base, self.mapped_image)
self.start_addr = self.header_entrypoint + self.header_image_base
if self.debug:
print('[DEBUG] Binary mapped in memory at 0x%08x' % self.header_image_base)
# Initialize the stack
self.stack_addr = 0x0
self.stack_size = 0x200000
self.uc.mem_map(self.stack_addr, self.stack_size)
if self.debug:
print('[DEBUG] Stack of 0x%x bytes starting at 0x%08x' % (self.stack_size, self.stack_addr))
self.uc.reg_write(UC_X86_REG_ESP, self.stack_addr + self.stack_size - 0x500)
self.uc.reg_write(UC_X86_REG_EBP, self.stack_addr + self.stack_size - 0x100)
if self.debug:
print('[DEBUG] Initial stack frame created between 0x%08x and 0x%08x' % (self.stack_size - 0x500, self.stack_size - 0x100))
# Create a the TEB structure
self.generate_Thread_Information_Block()
if self.debug:
print('[DEBUG] Thread Information Block initiated at 0x%08x' % self.TEB_base_addr)
# Create a the PEB structure
# TODO
# Create a fake IAT
self.generate_Import_Address_Table()
# Place hooks on the IAT
self.hook_Import_Address_Table()
# Initiate the registers
self.uc.reg_write(UC_X86_REG_EDI, self.start_addr)
self.uc.reg_write(UC_X86_REG_ESI, self.start_addr)
self.uc.reg_write(UC_X86_REG_EDX, self.start_addr)
self.uc.reg_write(UC_X86_REG_ECX, self.start_addr)
self.uc.reg_write(UC_X86_REG_EBX, self.PEB_base_addr) # EBP point to the PEB address
self.uc.reg_write(UC_X86_REG_EAX, self.TEB_base_addr) # EAX point to the TIB address
# Place a debug hook
self.uc.hook_add(UC_HOOK_CODE, self.hook_code)
# Place a memory debug hook
#self.uc.hook_add(UC_ERR_FETCH_UNMAPPED, self.hook_mem_invalid)
# Start emulation
print('[DEBUG] Starting the emulation of "%s.exe" from 0x%08x' % (self.drivename, self.start_addr))
print()
self.uc.emu_start(self.start_addr, self.start_addr + 500000, timeout=20 * UC_SECOND_SCALE)
print()
if len(self.threads) != 0:
uniq_threads = list(dict.fromkeys(self.threads))
else:
uniq_threads = False
if self.debug:
print('[!] Looking for entrypoints in the threads queue')
if uniq_threads:
for thread_addr in uniq_threads:
print('[!] Starting the thread ' + hex(thread_addr))
self.execution_mode = 'thread'
self.uc.hook_add(UC_HOOK_CODE, self.hook_code)
self.uc.emu_start(thread_addr, self.start_addr + 100, timeout=20 * UC_SECOND_SCALE)
#self.uc.reg_write(UC_X86_REG_EIP, add)
print('[!] End of the thread ' + hex(thread_addr))
self.thread_trace = []
print()
# Display final program's state
final_esp = self.uc.reg_read(UC_X86_REG_ESP)
final_ebp = self.uc.reg_read(UC_X86_REG_EBP)
if args.dynamics:
self.read_dynamic_imports()
print()
if self.stack:
self.read_stack(final_esp, final_ebp - final_esp)
print()
if self.registers:
self.read_full_regs()
print()
if self.handle_list:
self.read_handle()
print()
if self.show_extract:
self.display_extracts()
print()
if self.trace:
print('==== Call trace ====')
print(' → Entrypoint')
for elem in self.calltrace:
print(' → ' + elem)
if self.out != None:
sys.stdout.close()
def main(args):
emul = Environment(args)
emul.launch()
parser = argparse.ArgumentParser(description='Windows Binary Emulator')
parser.add_argument('-p', '--path', required=True, help='path to the binary file to emulate')
parser.add_argument('-b', '--breakpoint', required=False, help='pause the execution at the given address')
parser.add_argument('--trace', required=False, action="store_true", help='display the call trace of the binary')
parser.add_argument('--dump', required=False, action="store_true", help='display a full dump of the program\'s state after the execution')
parser.add_argument('--stack', required=False, action="store_true", help='display a dump of the stack after the execution')
parser.add_argument('--registers', required=False, action="store_true", help='display a dump of the regsiters after the execution')
parser.add_argument('--debug', required=False, action="store_true", help='display debug messages')
parser.add_argument('--silent', required=False, action="store_true", help='only print out the system calls')
parser.add_argument('--handle', required=False, action="store_true", help='display the list of used handles')
parser.add_argument('--extract', required=False, action="store_true", help='extract potentials payloads found in memory. Files are saved to <bin_name>_emu.out/')
parser.add_argument('--imports', required=False, action="store_true", help='UNIMPLEMENTED - display the static content of the import address table (IAT)')
parser.add_argument('--dynamics', required=False, action="store_true", help='display the list of dynamically resolved syscall')
parser.add_argument('--out', required=False, help='save the emulation output to a file')
args = parser.parse_args()
main(args)
|
import sqlite3
from app import app
from flask import g
DATABASE = 'db/trackpants.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_db(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
|
import argparse, re, sys, os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
path = ''
flname = sys.argv[1]
try:
chartType = sys.argv[2]
except:
chartType = 'ch1_vload'
print('chartType:'+chartType)
fl = flname.split('/')
for i in fl[:-1]:
path = path+i+'/'
fw = open(flname, 'r')
rawdata = fw.read().strip()
ch1_list = []
ch2_list = []
ch1_vload = []
ch1_volt = []
ch1_iload = []
ch1_pload = []
ch2_vload = []
ch2_volt = []
ch2_iload = []
ch2_pload = []
unit = ''
line = rawdata.split('\n')
for aline in line:
tmp = aline.split('||')
ch1_list.append(tmp[0].lstrip())
ch2_list.append(tmp[2].lstrip())
for item in ch1_list:
tmp = item.split(' | ')
for sub in tmp:
if sub.count("V-load"):
ch1_vload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("Voltage"):
ch1_volt.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("I-load"):
ch1_iload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("P-load"):
ch1_pload.append(float(re.search('\d+\.\d+', sub).group()))
for item in ch2_list:
tmp = item.split(' | ')
for sub in tmp:
if sub.count("V-load"):
ch2_vload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("Voltage"):
ch2_volt.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("I-load"):
ch2_iload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("P-load"):
ch2_pload.append(float(re.search('\d+\.\d+', sub).group()))
if chartType.lower().count('vload') or chartType.lower().count('v-load'):
print('**vload')
unit = 'V'
if chartType.lower().count('ch1'):
y = ch1_vload
else:
y = ch2_vload
elif chartType.lower().count('volt'):
print('**volt')
unit = 'mV'
if chartType.lower().count('ch1'):
y = ch1_volt
else:
y = ch2_volt
elif chartType.lower().count('iload') or chartType.lower().count('i-load'):
print('**iload')
unit = 'mA'
if chartType.lower().count('ch1'):
y = ch1_iload
else:
y = ch2_iload
elif chartType.lower().count('pload') or chartType.lower().count('p-load'):
print('**pload')
unit = 'mW'
if chartType.lower().count('ch1'):
y = ch1_pload
else:
y = ch2_pload
x = np.linspace(1,len(y),len(y))
fig = plt.figure(1)
ax = plt.axes()
plt.xlim([0, len(y)])
plt.ylim([0,160])
plt.plot(x,y,ls='-',c='b')
plt.grid('on')
plt.title(chartType)
plt.ylabel('['+unit+']')
plt.savefig(path+chartType+'.png')
print("File Path:"+path+chartType+'.png')
|
from __future__ import absolute_import
import collections
import contextlib
import copy
import typing as tp # NOQA
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import device_resident
from chainer import initializers
from chainer import link_hook
from chainer import types # NOQA
from chainer.utils import collections_abc
from chainer import variable
def _is_shape(value):
# type: (tp.Optional[tp.Any]) -> bool
if value is None:
return True
elif isinstance(value, collections_abc.Sequence):
try:
return all(int(x) for x in value)
except TypeError:
return False
try:
int(value) # try to cast
return True
except TypeError:
return False
def _ensure_shape_dtype(value):
# type: (tp.Optional[tp.Any]) -> tp.Tuple[tp.Optional[types.ShapeSpec], types.DTypeSpec] # NOQA
# Return value paired with dtype FP32 if it is a shape.
if _is_shape(value):
return value, numpy.float32
# Otherwise, returns it with assuming a shape-dtype pair.
else:
return value # type: ignore
class Link(device_resident.DeviceResident):
"""Building block of model definitions.
Link is a building block of neural network models that support various
features like handling parameters, defining network fragments,
serialization, etc.
Link is the primitive structure for the model definitions. It supports
management of parameter variables and *persistent values* that should be
incorporated to serialization.
Parameter is an instance of :class:`~chainer.Parameter` registered to a
link. A :class:`~chainer.Parameter` object can be registered as a
parameter of the link by assigning it to an attribute within *an
initialization scope*, which is a code surrounded by a
:meth:`init_scope` context manager using the ``with`` statement.
Persistent values are arrays, scalars, or any other serializable values
registered via :meth:`register_persistent` or :meth:`add_persistent`.
.. note::
Whereas arbitrary serializable objects can be registered as persistent
values, it is strongly recommended that you just register values that
should be treated as results of learning. A typical example of
persistent values is ones computed during training and required for
testing, e.g. running statistics for batch normalization.
Parameters and persistent values are referred by their names. They can be
accessed as attributes of the links. Link class itself manages the lists
of names of parameters and persistent values to distinguish parameters and
persistent values from other attributes.
Link can be composed into more complex models. This composition feature is
supported by child classes like :class:`Chain` and :class:`ChainList`. One
can create a chain by combining one or more links. See the documents for
these classes for details.
As noted above, Link supports the serialization protocol of the
:class:`~chainer.Serializer` class. **Note that only parameters and
persistent values are saved and loaded.** Other attributes are considered
as a part of user program (i.e. a part of network definition). In order to
construct a link from saved file, other attributes must be identically
reconstructed by user codes.
.. admonition:: Example
This is a simple example of custom link definition. Chainer itself also
provides many links defined under the :mod:`~chainer.links` module. They
might serve as examples, too.
Consider we want to define a simple primitive link that implements a
fully-connected layer based on the :func:`~functions.linear` function.
Note that this function takes input units, a weight variable, and a bias
variable as arguments. Then, the fully-connected layer can be defined as
follows::
import chainer
import chainer.functions as F
from chainer import initializers
import numpy as np
class LinearLayer(chainer.Link):
def __init__(self, n_in, n_out):
super(LinearLayer, self).__init__()
with self.init_scope():
self.W = chainer.Parameter(
initializers.Normal(), (n_out, n_in))
self.b = chainer.Parameter(
initializers.Zero(), (n_out,))
def forward(self, x):
return F.linear(x, self.W, self.b)
This example shows that a user can define arbitrary parameters and use
them in any methods. Links typically implement the ``forward``
operator, although they can also provide other methods to implement the
forward propagation.
Args:
params:
Names, shapes, and optional dtypes of initial parameters.
The keywords are used as the parameter names and the corresponding
values consist either of the shape or a tuple of shape and a dtype
``(shape, dtype)``.
If only the shape is supplied, the default dtype will be used.
Attributes:
name (str): Name of this link, given by the parent chain (if exists).
"""
_local_link_hooks = None # type: tp.Optional[collections.OrderedDict[str, chainer.LinkHook]] # NOQA
__init_done = False
def __init__(self, **params):
# type: (**tp.Any) -> None
super(Link, self).__init__()
self._params = set() # type: tp.Set[str]
self._persistent = set() # type: tp.Set[str]
self._within_init_scope = False # type: bool
self.name = None # type: tp.Optional[str]
# This flag has to be set before calling add_param().
self.__init_done = True
for name, value in six.iteritems(params):
shape, dtype = _ensure_shape_dtype(value)
self.add_param(name, shape, dtype=dtype)
def __check_init_done(self):
if not self.__init_done:
raise RuntimeError('Link.__init__() has not been called.')
def __str__(self):
specs = ', '.join(
'{}={}'.format(k, v) for k, v in self.printable_specs
)
return '{cls}({specs})'.format(
cls=self.__class__.__name__, specs=specs,
)
@property
def local_link_hooks(self):
# type: () -> collections.OrderedDict[str, chainer.LinkHook]
"""Ordered dictionary of registered link hooks.
Contrary to ``chainer.thread_local.link_hooks``,
which registers its elements to all functions,
link hooks in this property are specific to this link.
"""
if self._local_link_hooks is None:
self._local_link_hooks = collections.OrderedDict()
return self._local_link_hooks
@property
def _n_local_link_hooks(self):
# type: () -> int
return (0 if self._local_link_hooks is None
else len(self._local_link_hooks))
@property
def _device_id(self):
warnings.warn(
'Link._device_id is left only for backward compatibility and '
'likely to be removed. Use Link.device instead.',
DeprecationWarning)
device = self.device
if device.xp is cuda.cupy:
return device.device.id
return None
@property
def printable_specs(self):
"""Generator of printable specs of this link.
Yields:
specs (tuple of str and object):
Basically, it returns the arguments (pair of keyword and value)
that are passed to the :meth:`__init__`. This pair of key and
value is used for representing this class or subclass with
:meth:`__str__`.
"""
if 0:
yield
@property
def within_init_scope(self):
# type: () -> bool
"""True if the current code is inside of an initialization scope.
See :meth:`init_scope` for the details of the initialization scope.
"""
return getattr(self, '_within_init_scope', False)
@contextlib.contextmanager
def init_scope(self):
# type: () -> tp.Iterator[None]
"""Creates an initialization scope.
This method returns a context manager object that enables registration
of parameters (and links for :class:`~chainer.Chain`) by an assignment.
A :class:`~chainer.Parameter` object can be automatically registered
by assigning it to an attribute under this context manager.
.. admonition:: Example
In most cases, the parameter registration is done in the
initializer method. Using the ``init_scope`` method, we can
simply assign a :class:`~chainer.Parameter` object to register
it to the link.
.. code-block:: python
class MyLink(chainer.Link):
def __init__(self):
super().__init__()
with self.init_scope():
self.W = chainer.Parameter(0, (10, 5))
self.b = chainer.Parameter(0, (5,))
"""
# super().__init__ must be called before init_scope().
self.__check_init_done()
old_flag = self.within_init_scope
self._within_init_scope = True
try:
yield
finally:
self._within_init_scope = old_flag
def __call__(self, *args, **kwargs):
# type: (*tp.Any, **tp.Any) -> tp.Any # NOQA
self.__check_init_done()
# TODO(niboshi): Support link hooks for other forward methods.
hooks = chainer._get_link_hooks()
if self._n_local_link_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_link_hooks)
hooks = hooks.values() # avoid six for performance
# Call forward_preprocess hook
if hooks:
pre_cb_args = link_hook._ForwardPreprocessCallbackArgs(
self, 'forward', args, kwargs)
for hook in hooks:
hook.forward_preprocess(pre_cb_args)
# Call the forward function
# (See #5078) super().__call__ is used when the method is injected by a
# mixin class. To keep backward compatibility, the injected one is
# prioritized over forward().
forward = getattr(super(Link, self), '__call__', None)
if forward is None:
# forward is implemented in the child classes
forward = self.forward # type: ignore
out = forward(*args, **kwargs)
# Call forward_postprocess hook
if hooks:
post_cb_args = link_hook._ForwardPostprocessCallbackArgs(
self, 'forward', args, kwargs, out)
for hook in hooks:
hook.forward_postprocess(post_cb_args)
return out
def __setattr__(self, name, value):
# type: (str, tp.Any) -> None
if self.within_init_scope and isinstance(value, variable.Parameter):
value.name = name
self._params.add(name)
self._persistent.discard(name)
super(Link, self).__setattr__(name, value)
def __delattr__(self, name):
# type: (str) -> None
self._params.discard(name)
self._persistent.discard(name)
super(Link, self).__delattr__(name)
def add_param(self, name, shape=None, dtype=numpy.float32,
initializer=None):
# type: (str, tp.Optional[types.ShapeSpec], types.DTypeSpec, tp.Optional[types.InitializerSpec]) -> None # NOQA
"""Registers a parameter to the link.
Args:
name (str): Name of the parameter. This name is also used as the
attribute name.
shape (int or tuple of ints): Shape of the parameter array. If it
is omitted, the parameter variable is left uninitialized.
dtype: Data type of the parameter array.
initializer (:ref:`initializer <initializer>`): If it is not
``None``, the data is initialized with the given initializer.
If it is an array, the data is directly initialized by it. If
it is callable, it is used as a weight initializer. Note that
in these cases, ``dtype`` argument is ignored. It can also be
a scalar, in which case the data array will be filled by this
scalar. Note that float32 is used in this case.
"""
if name in self.__dict__:
raise AttributeError(
'cannot register a new parameter %s: attribute exists'
% name)
if initializer is None:
initializer = initializers.NaN(dtype)
param = variable.Parameter(initializer, shape)
with self.init_scope():
setattr(self, name, param)
def add_persistent(self, name, value):
# type: (str, tp.Any) -> None
"""Registers a persistent value to the link.
The registered value is saved and loaded on serialization and
deserialization. The value is set to an attribute of the link.
Args:
name (str): Name of the persistent value. This name is also used
for the attribute name.
value: Value to be registered.
"""
d = self.__dict__
if name in d:
raise AttributeError(
'cannot register a new persistent value %s: attribute exists'
% name)
self._persistent.add(name)
self._params.discard(name)
d[name] = value
def register_persistent(self, name):
# type: (str) -> None
"""Registers an attribute of a given name as a persistent value.
This is a convenient method to register an existing attribute as a
persistent value. If ``name`` has been already registered as a
parameter, this method removes it from the list of parameter names
and re-registers it as a persistent value.
Args:
name (str): Name of the attribute to be registered.
"""
if not hasattr(self, name):
raise AttributeError(
'cannot register non-existent attribute %s as a persistent '
'value' % name)
self._persistent.add(name)
self._params.discard(name)
def copy(self, mode='share'):
# type: (str) -> 'Link'
"""Copies the link hierarchy to new one.
The whole hierarchy rooted by this link is copied. There are three
modes to perform copy. Please see the documentation for the argument
``mode`` below.
The name of the link is reset on the copy, since the copied instance
does not belong to the original parent chain (even if exists).
Args:
mode (str): It should be either ``init``, ``copy``, or ``share``.
``init`` means parameter variables under the returned link
object is re-initialized by calling their
:meth:`~chainer.Parameter.initialize` method, so that all the
parameters may have different initial values from the original
link.
``copy`` means that the link object is deeply copied, so that
its parameters are not re-initialized but are also deeply
copied. Thus, all parameters have same initial values but can
be changed independently.
``share`` means that the link is shallowly copied, so that its
parameters' arrays are shared with the original one. Thus,
their values are changed synchronously. The default ``mode``
is ``share``.
Returns:
Link: Copied link object.
"""
if mode == 'share':
ret = copy.copy(self)
ret._params = set(self._params)
ret._persistent = set(self._persistent)
ret.name = None
d = ret.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in ret._params:
d[name] = copy.copy(d[name])
d[name].grad = None
return ret
elif mode == 'copy':
return copy.deepcopy(self)
elif mode == 'init':
ret = copy.deepcopy(self)
for param in ret.params(include_uninit=False):
param.initialize(param.shape)
return ret
else:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
def device_resident_accept(self, visitor):
super(Link, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._params:
x = d[name]
visitor.visit_variable(x)
for name in self._persistent:
x = d[name]
if isinstance(x, chainer.get_array_types()):
d[name] = visitor.visit_array(x)
def params(self, include_uninit=True):
# type: (bool) -> tp.Iterator[chainer.Parameter]
"""Returns a generator of all parameters under the link hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all parameters.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in sorted(self._params):
if include_uninit or d[name].data is not None:
yield d[name]
def namedparams(self, include_uninit=True):
# type: (bool) -> tp.Iterator[tp.Tuple[str, chainer.Parameter]]
"""Returns a generator of all (path, param) pairs under the hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all (path, parameter) pairs. The
paths are relative from this link.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in sorted(self._params):
if include_uninit or d[name].data is not None:
yield '/' + name, d[name]
def links(self, skipself=False):
# type: (bool) -> tp.Iterator['Link']
"""Returns a generator of all links under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all links.
"""
if not skipself:
yield self
def namedlinks(self, skipself=False):
# type: (bool) -> tp.Iterator[tp.Tuple[str, 'Link']]
"""Returns a generator of all (path, link) pairs under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all (path, link) pairs.
"""
if not skipself:
yield '/', self
def children(self):
# type: () -> tp.Iterator['Link']
"""Returns a generator of all child links.
Returns:
A generator object that generates all child links.
"""
if 0:
yield
def copyparams(self, link, copy_persistent=True):
# type: ('Link', bool) -> None
"""Copies all parameters from given link.
This method copies data arrays of all parameters in the hierarchy. The
copy is even done across the host and devices. Note that this method
does not copy the gradient arrays.
*From v5.0.0:* this method also copies the persistent values (e.g. the
moving statistics of :class:`~chainer.links.BatchNormalization`). If
the persistent value is an ndarray, the elements are copied. Otherwise,
it is copied using :func:`copy.deepcopy`. The old behavior (not copying
persistent values) can be reproduced with ``copy_persistent=False``.
Args:
link (Link): Source link object.
copy_persistent (bool): If ``True``, persistent values are also
copied. ``True`` by default.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].copydata(src[name])
if copy_persistent:
array_types = chainer.get_array_types()
for name in self._persistent:
d = dst[name]
s = src[name]
if isinstance(d, array_types) and isinstance(s, array_types):
backend.copyto(d, s)
else:
dst[name] = copy.deepcopy(s)
def cleargrads(self):
# type: () -> None
"""Clears all gradient arrays.
This method should be called before the backward computation at every
iteration of the optimization.
"""
for param in self.params():
param.cleargrad()
def zerograds(self):
# type: () -> None
"""Initializes all gradient arrays by zero.
.. deprecated:: v1.15
Use the more efficient :meth:`cleargrads` instead.
"""
warnings.warn(
'Link.zerograds is deprecated. Use Link.cleargrads instead.',
DeprecationWarning)
for param in self.params():
param.zerograd()
def addgrads(self, link):
# type: ('Link') -> None
"""Accumulates gradient values from given link.
This method adds each gradient array of the given link to corresponding
gradient array of this link. The accumulation is even done across
host and different devices.
Args:
link (Link): Source link object.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].addgrad(src[name])
def enable_update(self):
# type: () -> None
"""Enables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``True``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = True
def disable_update(self):
# type: () -> None
"""Disables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``False``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = False
@property
def update_enabled(self):
# type: () -> bool
"""``True`` if at least one parameter has an update rule enabled."""
for param in self.params():
rule = param.update_rule
if rule is not None and rule.enabled:
return True
return False
def serialize(self, serializer):
# type: (chainer.AbstractSerializer) -> None
"""Serializes the link object.
Args:
serializer (~chainer.AbstractSerializer): Serializer object.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in self._params:
param = d[name]
data = serializer(name, param.data) # type: types.NdArray
if param.data is None and data is not None:
# Initialize the parameter here
param.initialize(data.shape)
with chainer.using_device(param.device):
param.data[...] = param.device.send(data)
for name in self._persistent:
d[name] = serializer(name, d[name])
def repeat(self, n_repeat, mode='init'):
# type: (int, str) -> chainer.Sequential
"""Repeats this link multiple times to make a :class:`~chainer.Sequential`.
This method returns a :class:`~chainer.Sequential` object which has
the same :class:`~chainer.Link` multiple times repeatedly. The ``mode``
argument means how to copy this link to repeat.
.. admonition:: Example
You can repeat the same link multiple times to create a longer
:class:`~chainer.Sequential` block like this:
.. testcode::
class ConvBNReLU(chainer.Chain):
def __init__(self):
super(ConvBNReLU, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(
None, 64, 3, 1, 1, nobias=True)
self.bn = L.BatchNormalization(64)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
net = ConvBNReLU().repeat(16, mode='init')
The ``net`` object contains 16 blocks, each of which is
``ConvBNReLU``. And the ``mode`` was ``init``, so each block
is re-initialized with different parameters. If you give
``copy`` to this argument, each block has same values for its
parameters but its object ID is different from others. If it is
``share``, each block is same to others in terms of not only
parameters but also the object IDs because they are shallow-copied,
so that when the parameter of one block is changed, all the
parameters in the others also change.
Args:
n_repeat (int): Number of times to repeat.
mode (str): It should be either ``init``, ``copy``, or ``share``.
``init`` means parameters of each repeated element in the
returned :class:`~chainer.Sequential` will be re-initialized,
so that all elements have different initial parameters.
``copy`` means that the parameters will not be re-initialized
but object itself will be deep-copied, so that all elements
have same initial parameters but can be changed independently.
``share`` means all the elements which consist the resulting
:class:`~chainer.Sequential` object are same object because
they are shallow-copied, so that all parameters of elements
are shared with each other.
"""
ret = chainer.Sequential()
if n_repeat <= 0:
return ret
if mode not in ['init', 'copy', 'share']:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
link = self
for _ in range(n_repeat):
ret.append(link.copy(mode))
return ret
def count_params(self):
# type: () -> int
"""Counts the total number of parameters.
This method counts the total number of scalar values included in all
the :class:`~chainer.Parameter`\\ s held by this link and its
descendants.
If the link containts uninitialized parameters, this method raises a
warning.
Returns:
The total size of parameters (int)
"""
size = 0
for name, param in self.namedparams():
if param.array is None:
warnings.warn(
'Parameter \'{}\' has not been initialized, so the '
'resulting count will not include the number of parameters'
' in it.'.format(name))
continue
size += param.size
return size
def add_hook(self, hook, name=None):
# type: (chainer.LinkHook, tp.Optional[str]) -> 'Link'
"""Registers a link hook.
Args:
hook (~chainer.LinkHook): Link hook to be registered.
name (str): Name of the link hook. The name must be unique
among link hooks registered to this link. If ``None``,
the default name of the link hook is used.
Returns:
self
"""
if not isinstance(hook, link_hook.LinkHook):
raise TypeError('Hook must be of type LinkHook')
if name is None:
name = hook.name
hooks = self.local_link_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(self)
return self
def delete_hook(self, name):
# type: (str) -> None
"""Unregisters the link hook.
Args:
name (str): The name of the link hook to be unregistered.
"""
if name in self.local_link_hooks:
self.local_link_hooks[name].deleted(self)
del self.local_link_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
class Chain(Link):
"""Composable link with object-like interface.
Composability is one of the most important features of neural nets. Neural
net models consist of many reusable fragments, and each model itself might
be embedded into a larger learnable system. Chain enables us to write a
neural net based on composition, without bothering about routine works like
collecting parameters, serialization, copying the structure with parameters
shared, etc.
This class actually provides a way to compose one or more links into one
structure. A chain can contain one or more *child links*. Child link is a
link registered to the chain with its own name. The child link is stored to
an attribute of the chain with the name. User can write a whole model or a
fragment of neural nets as a child class of Chain.
Each chain itself is also a link. Therefore, one can combine chains into
higher-level chains. In this way, links and chains construct a *link
hierarchy*. Link hierarchy forms a tree structure, where each node is
identified by the path from the root. The path is represented by a string
like a file path in UNIX, consisting of names of nodes on the path, joined
by slashes ``/``.
A child link can be added just by assigning it to an attribute of the
chain within :meth:`~chainer.Chain.init_scope`.
The registered child link is saved and loaded on serialization and
deserialization, and involved in the optimization. The registered link
is called a child. The child link is accessible via :meth:`children`
generator, which returns a generator running through the children in
lexical order.
On registration of a child link, its :attr:`~Link.name` attribute is also
set (or overwritten if the link has already been registered to another
chain).
.. admonition:: Example
This is a simple example of custom chain definition. Chainer itself also
provides some chains defined under the :mod:`~chainer.links` module.
They might serve as examples, too.
Consider we want to define a multi-layer perceptron consisting of two
hidden layers with rectifiers as activation functions. We can use the
:class:`~chainer.links.Linear` link as a building block::
import chainer
import chainer.functions as F
import chainer.links as L
class MultiLayerPerceptron(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(MultiLayerPerceptron, self).__init__()
with self.init_scope():
self.layer1 = L.Linear(n_in, n_hidden)
self.layer2 = L.Linear(n_hidden, n_hidden)
self.layer3 = L.Linear(n_hidden, n_out)
def forward(self, x):
# Forward propagation
h1 = F.relu(self.layer1(x))
h2 = F.relu(self.layer2(h1))
return self.layer3(h2)
Child links are registered via the assignment within a
``with self.init_scope():`` block. The forward propagation is often
implemented as the ``forward`` operator as the above example, though
it is not mandatory.
Args:
links: Child links. The keywords are used as their names. The names are
also set to the links.
"""
def __init__(self, **links):
# type: (**Link) -> None
super(Chain, self).__init__()
self._children = set() # type: tp.Set[str]
for name, link in six.iteritems(links):
self.add_link(name, link)
def __str__(self):
reps = []
for child in self.children():
rep = '({name}): {rep},'.format(
name=child.name, rep=str(child),
)
# Add indentation to each line.
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps: # No newline with no children.
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __getitem__(self, name):
# type: (str) -> tp.Any
"""Equivalent to getattr."""
return getattr(self, name)
def __setattr__(self, name, value):
# type: (str, tp.Any) -> None
if self.within_init_scope and isinstance(value, Link):
if hasattr(self, name):
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
value.name = name
self._children.add(name)
super(Chain, self).__setattr__(name, value)
def __delattr__(self, name):
# type: (str) -> None
self._children.discard(name)
super(Chain, self).__delattr__(name)
def add_link(self, name, link):
# type: (str, Link) -> None
"""Registers a child link to this chain.
Args:
name (str): Name of the child link. This name is also used as the
attribute name.
link (Link): The link object to be registered.
"""
if name in self.__dict__:
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
if not isinstance(link, Link):
raise TypeError('cannot register a non-link object as a child')
with self.init_scope():
setattr(self, name, link)
def copy(self, mode='share'):
# type: (str) -> 'Chain'
ret = super(Chain, self).copy() # type: ignore # should be Chain
ret._children = set(ret._children) # type: ignore
d = ret.__dict__ # type: tp.Dict[str, Link]
for name in ret._children: # type: ignore
# copy child links recursively
copied = d[name].copy(mode)
copied.name = name
d[name] = copied
return ret # type: ignore
def device_resident_accept(self, visitor):
super(Chain, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._children:
d[name].device_resident_accept(visitor)
def params(self, include_uninit=True):
# type: (bool) -> tp.Iterator[chainer.Parameter]
for param in super(Chain, self).params(include_uninit):
yield param
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
for param in d[name].params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
# type: (bool) -> tp.Iterator[tp.Tuple[str, chainer.Parameter]]
for ret in super(Chain, self).namedparams(include_uninit):
yield ret
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
prefix = '/' + name
for path, param in d[name].namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
# type: (bool) -> tp.Iterator[Link]
if not skipself:
yield self
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
for link in d[name].links():
yield link
def namedlinks(self, skipself=False):
# type: (bool) -> tp.Iterator[tp.Tuple[str, Link]]
if not skipself:
yield '/', self
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
child = d[name]
prefix = '/' + name
yield prefix, child
for path, link in d[name].namedlinks(True):
yield prefix + path, link
def children(self):
# type: () -> tp.Iterator[Link]
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
yield d[name]
def copyparams(self, link, copy_persistent=True):
# type: (Link, bool) -> None
super(Chain, self).copyparams(link, copy_persistent)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].copyparams(src[name], copy_persistent)
def addgrads(self, link):
# type: (Link) -> None
super(Chain, self).addgrads(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].addgrads(src[name])
def serialize(self, serializer):
# type: (chainer.AbstractSerializer) -> None
super(Chain, self).serialize(serializer)
d = self.__dict__ # type: tp.Dict[str, Link]
for name in self._children:
d[name].serialize(serializer[name])
class ChainList(Link, collections_abc.MutableSequence):
"""Composable link with list-like interface.
This is another example of compositional link. Unlike :class:`Chain`, this
class can be used like a list of child links. Each child link is indexed by
a non-negative integer, and it maintains the current number of registered
child links. The :meth:`add_link` method inserts a new link at the end of
the list. It is useful to write a chain with arbitrary number of child
links, e.g. an arbitrarily deep multi-layer perceptron.
This class inherits the methods `index`, `count`, `append`, `reverse`,
`extend`, `pop`, `remove` from `collections.abc.MutableSequence` and
can be accessed and assigned by index or slice.
Args:
links: Initial child links.
"""
def __init__(self, *links):
# type: (*Link) -> None
super(ChainList, self).__init__()
self._children = [] # type: tp.List[Link]
for link in links:
self.add_link(link)
def __str__(self):
reps = []
for index, child in enumerate(self._children):
rep = '({index}): {rep},'.format(
index=index, rep=str(child),
)
# Add indentation to each line.
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps: # No newline with no children.
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __setattr__(self, name, value):
# type: (str, tp.Any) -> None
if self.within_init_scope and isinstance(value, Link):
raise TypeError(
'cannot register a new link'
' within a "with chainlist.init_scope():" block.')
super(ChainList, self).__setattr__(name, value)
def __setitem__(self, index, value):
# type: (tp.Union[int, slice], tp.Union[Link, tp.Iterable[Link]]) -> None # NOQA
if isinstance(index, int):
link = value # type: ignore # should be Link
link.name = str(index) # type: ignore
self._children[index] = link # type: ignore
elif isinstance(index, slice):
self._children[index] = value # type: ignore # should be Iterable[Link] # NOQA
for i, c in enumerate(self._children): # type: ignore
c.name = str(i)
else:
raise TypeError(
'ChainList indices must be integers or slices, not %s' %
type(index).__name__)
def __getitem__(self, index):
"""Returns the child at given index.
Args:
index (int): Index of the child in the list.
Returns:
Link: The ``index``-th child link.
"""
return self._children[index]
def __delitem__(self, index):
# type: (tp.Union[int, slice]) -> None
del self._children[index]
for i, c in enumerate(self._children):
c.name = str(i)
def insert(self, index, link):
# type: (int, Link) -> None
"""Insert a child link at the given index.
Args:
index (int): The position of the list where the new
link is inserted.
link (Link): The link to be inserted.
"""
if index == len(self._children):
self._children.append(link)
link.name = str(index)
else:
self._children.insert(index, link)
for i, c in enumerate(self._children):
c.name = str(i)
def __iter__(self):
# type: () -> tp.Iterator[Link]
return iter(self._children)
def __len__(self):
# type: () -> int
"""Returns the number of children."""
return len(self._children)
def add_link(self, link):
# type: (Link) -> None
"""Registers a child link and adds it to the tail of the list.
Args:
link (Link): The link object to be registered.
"""
self.append(link)
def copy(self, mode='share'):
# type: (str) -> 'ChainList'
"""Returns a deep copy of the chainlist."""
ret = super(ChainList, self).copy() # type: ignore # should be ChainList # NOQA
ret._children = list(ret._children) # type: ignore # copy
children = ret._children # type: ignore
for i, child in enumerate(children):
child = child.copy(mode)
child.name = str(i)
children[i] = child
return ret # type: ignore
def device_resident_accept(self, visitor):
super(ChainList, self).device_resident_accept(visitor)
for link in self._children:
link.device_resident_accept(visitor)
def params(self, include_uninit=True):
# type: (bool) -> tp.Iterator[chainer.Parameter]
for param in super(ChainList, self).params(include_uninit):
yield param
for link in self._children:
for param in link.params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
# type: (bool) -> tp.Iterator[tp.Tuple[str, chainer.Parameter]]
for ret in super(ChainList, self).namedparams(include_uninit):
yield ret
for idx, link in enumerate(self._children):
prefix = '/%d' % idx
for path, param in link.namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
# type: (bool) -> tp.Iterator[Link]
if not skipself:
yield self
for child in self._children:
for link in child.links():
yield link
def namedlinks(self, skipself=False):
# type: (bool) -> tp.Iterator[tp.Tuple[str, Link]]
if not skipself:
yield '/', self
for idx, child in enumerate(self._children):
prefix = '/%d' % idx
yield prefix, child
for path, link in child.namedlinks(True):
yield prefix + path, link
def children(self):
# type: () -> tp.Iterator[Link]
for child in self._children:
yield child
def copyparams(self, link, copy_persistent=True):
# type: (Link, bool) -> None # link is actually a ChainList
super(ChainList, self).copyparams(link, copy_persistent)
for idx, child in enumerate(self._children):
child.copyparams(link[idx], copy_persistent) # type: ignore
def addgrads(self, link):
# type: (Link) -> None # link is actually a ChainList
super(ChainList, self).addgrads(link)
for idx, child in enumerate(self._children):
child.addgrads(link[idx]) # type: ignore
def serialize(self, serializer):
# type: (chainer.AbstractSerializer) -> None
super(ChainList, self).serialize(serializer)
for idx, child in enumerate(self._children):
child.serialize(serializer['%d' % idx])
|
from flask import Flask, render_template, request, flash, redirect, url_for, session
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Message, Mail
from passlib.hash import sha256_crypt
from functools import wraps
import requests
import time
# create the flask app from config file and instantiate db
application = Flask(__name__)
application.config.from_object('config.AWSConfig')
db = SQLAlchemy(application)
# init mail client
mail = Mail()
mail.init_app(application)
# have to import since models relies on db object
from models import Cities, Users, Listings
from forms import RegisterForm, ContactForm, ProfileForm
# custom decorator to verify user is logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash ("Please login to see this content.", "danger")
return redirect(url_for('login'))
return wrap
# register user with form and validating from wtforms
# if valid notify user and redirect if successful, otherwise display error
@application.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
# use passwordrandom.com to get user ip and recommend password
recommendation = requests.get('https://www.passwordrandom.com/query?command=password')\
.content.decode("utf-8")
ip = requests.get('https://www.passwordrandom.com/query?command=ip').\
content.decode("utf-8")
flash("We recommend using password: '%s'" % recommendation, 'warning')
if request.method == 'POST' and form.validate():
new_user = Users(first=form.first.data,
last=form.last.data,
email=form.email.data,
username=form.username.data,
city=form.city.data,
password=sha256_crypt.encrypt(str(form.password.data)),
ip=ip,
register_date=time.strftime('%Y-%m-%d %H:%M:%S'))
db.session.add(new_user)
db.session.commit()
session.pop('_flashes', None)
flash('Welcome to flippin!\nYour account has been successfully created.', 'success')
return redirect(url_for('index'))
return render_template('register.html', form=form)
# homepage
@application.route('/')
def index():
return render_template('home.html')
# login user. does not use wtforms since little validation needs to be done.
@application.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# get user information and query database for match
username = request.form['username']
password_candidate = request.form['password']
result = Users.query.filter_by(username=username).first()
# if info is correct redirect and set session variables
if result is not None:
password = result.password
if sha256_crypt.verify(password_candidate, password):
session['logged_in'] = True
session['username'] = username
session['city'] = result.city
# gets the related city name given the users relevant foreign key
session['city_name'] = Cities.query.filter_by(id=result.city).first().name
flash('Log in successful. Enjoy!', 'success')
return redirect(url_for('items'))
# otherwise return relevant error
else:
return render_template('login.html', error="Invalid password")
else:
return render_template('login.html', error="No user found")
return render_template('login.html')
# items page, requires that user is logged in
@application.route('/items')
@is_logged_in
def items():
listings = Listings.query.filter_by(city=session['city']).all()
return render_template('items.html', items=listings, length=len(listings))
@application.route('/profile', methods=['GET', 'POST'])
@is_logged_in
def profile():
form = ProfileForm(request.form)
user = Users.query.filter_by(username=session['username']).first()
if request.method == 'POST' and form.validate():
user.email = form.email.data
user.city = form.city.data
user.password = sha256_crypt.encrypt(str(form.password.data))
session['city'] = form.city.data
db.session.commit()
flash('Your account settings have been updated.', 'success')
return redirect(url_for('profile'))
return render_template('profile.html', user=user, form=form)
@application.route('/delete')
@is_logged_in
def delete_user():
db.session.query(Users).filter(Users.username == session['username']).delete()
db.session.commit()
session.clear()
flash('Your account has been deleted! Sorry to see you go.', 'success')
return render_template('home.html')
# logout method, clear session variables and redirect
@application.route('/logout')
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
# contact page
@application.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm(request.form)
# on submit send email with form contents to and from support email
if request.method == 'POST' and form.validate():
# don't need to specify sender, default is in app config
msg = Message(form.subject.data, sender="support@flippinapp.com",
recipients=["support@flippinapp.com"])
msg.body = """
From: %s <%s>
About: %s
%s
""" % (form.name.data, form.email.data, form.subject.data, form.message.data)
mail.send(msg)
flash('Thanks for reaching out! We will get back to you shortly.', 'success')
return render_template('contact.html', form=form)
if __name__ == '__main__':
application.run()
|
from pkg_resources import resource_filename
from .base import set_base_parser
from .helper import add_arg_group
from ..helper import get_random_identity
def set_hw_parser(parser=None):
if not parser:
parser = set_base_parser()
gp = add_arg_group(parser, title='General')
gp.add_argument('--workdir', type=str, default=get_random_identity(),
help='the workdir for hello-world demo, '
'all data, indices, shards and outputs will be saved there')
gp.add_argument('--logserver', action='store_true', default=False,
help='start a log server for the dashboard')
gp.add_argument('--logserver-config', type=str,
default=resource_filename('jina',
'/'.join(('resources', 'logserver.default.yml'))),
help='the yaml config of the log server')
gp.add_argument('--download-proxy', type=str,
help='specify the proxy when downloading sample data')
gp = add_arg_group(parser, title='Scalability')
gp.add_argument('--shards', type=int,
default=2,
help='number of shards when index and query')
gp.add_argument('--parallel', type=int,
default=2,
help='number of parallel when index and query')
gp = add_arg_group(parser, title='Index')
gp.add_argument('--uses-index', type=str,
default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.index.yml'))),
help='the yaml path of the index flow')
gp.add_argument('--index-data-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
help='the url of index data (should be in idx3-ubyte.gz format)')
gp.add_argument('--index-labels-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
help='the url of index labels data (should be in idx3-ubyte.gz format)')
gp.add_argument('--index-batch-size', type=int,
default=1024,
help='the batch size in indexing')
gp = add_arg_group(parser, title='Search')
gp.add_argument('--uses-query', type=str,
default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.query.yml'))),
help='the yaml path of the query flow')
gp.add_argument('--query-data-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
help='the url of query data (should be in idx3-ubyte.gz format)')
gp.add_argument('--query-labels-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
help='the url of query labels data (should be in idx3-ubyte.gz format)')
gp.add_argument('--query-batch-size', type=int,
default=32,
help='the batch size in searching')
gp.add_argument('--num-query', type=int, default=128,
help='number of queries to visualize')
gp.add_argument('--top-k', type=int, default=50,
help='top-k results to retrieve and visualize')
return parser
|
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Mantra Output node App for use with Toolkit's Houdini engine.
"""
import sgtk
class TkMantraNodeApp(sgtk.platform.Application):
"""The Mantra Output Node."""
def init_app(self):
"""Initialize the app."""
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
self.handler = tk_houdini_mantra.TkMantraNodeHandler(self)
def convert_to_regular_mantra_nodes(self):
"""Convert Toolkit Mantra nodes to regular Mantra nodes.
Convert all Tooklit Mantra nodes found in the current script to
regular Mantra nodes. Additional Toolkit information will be stored in
user data named 'tk_*'
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> app.convert_to_regular_mantra_nodes()
"""
self.log_debug(
"Converting Toolkit Mantra nodes to built-in Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_to_regular_mantra_nodes(self)
def convert_back_to_tk_mantra_nodes(self):
"""Convert regular Mantra nodes back to Toolkit Mantra nodes.
Convert any regular Mantra nodes that were previously converted
from Toolkit Mantra nodes back into Toolkit Mantra nodes.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> app.convert_back_to_tk_mantra_nodes()
"""
self.log_debug(
"Converting built-in Mantra nodes back to Toolkit Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_back_to_tk_mantra_nodes(self)
def get_nodes(self):
"""
Returns a list of hou.node objects for each tk mantra node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> tk_mantra_nodes = app.get_nodes()
"""
self.log_debug("Retrieving tk-houdini-mantra nodes...")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
nodes = tk_houdini_mantra.TkMantraNodeHandler.\
get_all_tk_mantra_nodes()
self.log_debug("Found %s tk-houdini-mantra nodes." % (len(nodes),))
return nodes
def get_output_path(self, node):
"""
Returns the evaluated output path for the supplied node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> output_path = app.get_output_path(tk_mantra_node)
"""
self.log_debug("Retrieving output path for %s" % (node,))
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
output_path = tk_houdini_mantra.TkMantraNodeHandler.\
get_output_path(node)
self.log_debug("Retrieved output path: %s" % (output_path,))
return output_path
def get_work_file_template(self):
"""
Returns the configured work file template for the app.
"""
return self.get_template("work_file_template")
|
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
# Find path for cases
curr_dir_path = os.path.dirname(os.path.realpath(__file__))
# print(curr_dir_path)
# cases = os.listdir(curr_dir_path + '/Cases')
# pop = cases.index('baseCase')
# cases.pop(pop)
# Label graph with bold characters
font_axis_publish = {
'color': 'black',
'weight': 'bold',
'size': 22,
}
# Read in digitized data
digi_n = pd.read_csv(
curr_dir_path + '/n_nstar_radius.dat',
header = 0,
sep = '\t',
names = ['r', 'n_nstar']
)
digi_T = pd.read_csv(
curr_dir_path + '/T_Tstar_radius_DAC.dat',
header = 0,
sep = '\t',
names = ['r', 'T_Tstar']
)
# Read in simulated data.
sim = pd.read_csv(
curr_dir_path + '/postProcessing/sampleDict/0.3/horizontalLine_Ttra_Ar_rhoN_Ar.csv'
)
# Used to see what the values trend to.
print(sim['Ttra_Ar'])
sim = sim[['x', 'rhoN_Ar', 'Ttra_Ar']].dropna()
sim['rhoN_Ar'] = sim['rhoN_Ar'] / 8.377e20
sim['Ttra_Ar'] = sim['Ttra_Ar'] / 1000.0
# Producde Analytical Data
def TTt_Ma(Ma, ga = 1.4):
return (ga + 1) / (2 + (ga - 1) * Ma ** 2)
def rrt_Ma(Ma, ga = 1.4):
rrt = (1 / TTt_Ma(Ma, ga)) ** ((ga + 1) / (ga - 1))
rrt = np.sqrt(np.sqrt(rrt) / Ma)
return rrt
def nnt_Ma(Ma, ga = 1.4):
return TTt_Ma(Ma, ga) ** (1 / (ga - 1))
def a(T, ga = 1.4, R = 287):
return np.sqrt(ga * R * T)
Ma_domain = np.linspace(1, 25, 100)
ga = 1.67
TTt = TTt_Ma(Ma_domain, ga = ga)
rrt = rrt_Ma(Ma_domain, ga = ga)
nnt = nnt_Ma(Ma_domain, ga = ga)
print("Printing rrt")
print(rrt)
# Graph Results
plt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)
plt.ylabel('n/n*', fontdict = font_axis_publish)
plt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)
plt.plot(sim['x'], sim['rhoN_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')
plt.plot(digi_n['r'], digi_n['n_nstar'], label = 'DAC (Lumpkin, Stewart)')
plt.plot(rrt, nnt, label = 'Analytical Solution')
plt.legend()
plt.yscale('log')
plt.ylim(bottom = 1e-4, top = 1)
plt.savefig(curr_dir_path + '/digitized_vs_analytical_n.png')
plt.close()
plt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)
plt.ylabel('T/T*', fontdict = font_axis_publish)
plt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)
plt.plot(sim['x'], sim['Ttra_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')
plt.plot(digi_T['r'], digi_T['T_Tstar'], label = 'DAC (Lumpkin, Stewart)')
plt.plot(rrt, TTt, label = 'Analytical Solution')
plt.legend()
plt.yscale('log')
plt.ylim(bottom = 1e-3, top = 1)
plt.savefig(curr_dir_path + '/digitized_vs_analytical_T.png')
plt.close()
|
from github.interfaces import Type
class LicenseRule(Type):
"""
Represents a license rule.
"""
__slots__ = ()
_repr_fields = [
"key",
]
_graphql_fields = [
"description",
"key",
"label",
]
@property
def description(self):
"""
A description of the license rule.
:type: :class:`str`
"""
return self._get_field("description")
@property
def key(self):
"""
The machine-readable key of the license rule.
:type: :class:`str`
"""
return self._get_field("key")
@property
def label(self):
"""
The human-readable label of the license rule.
:type: :class:`str`
"""
return self._get_field("label")
__all__ = [
"LicenseRule",
]
|
# Copyright (c) 2015 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2016-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glmatthe@cisco.com>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2021 bot <bot@noreply.github.com>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
from astroid import nodes
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import HIGH, IAstroidChecker, ITokenChecker
class ElseifUsedChecker(BaseTokenChecker):
"""Checks for use of "else if" when an "elif" could be used"""
__implements__ = (ITokenChecker, IAstroidChecker)
name = "else_if_used"
msgs = {
"R5501": (
'Consider using "elif" instead of "else if"',
"else-if-used",
"Used when an else statement is immediately followed by "
"an if statement and does not contain statements that "
"would be unrelated to it.",
)
}
def __init__(self, linter=None):
super().__init__(linter)
self._init()
def _init(self):
self._elifs = {}
def process_tokens(self, tokens):
"""Process tokens and look for 'if' or 'elif'"""
self._elifs = {
begin: token for _, token, begin, _, _ in tokens if token in {"elif", "if"}
}
def leave_module(self, _: nodes.Module) -> None:
self._init()
@check_messages("else-if-used")
def visit_if(self, node: nodes.If) -> None:
"""Current if node must directly follow an 'else'"""
if (
isinstance(node.parent, nodes.If)
and node.parent.orelse == [node]
and (node.lineno, node.col_offset) in self._elifs
and self._elifs[(node.lineno, node.col_offset)] == "if"
):
self.add_message("else-if-used", node=node, confidence=HIGH)
def register(linter):
"""Required method to auto register this checker.
:param linter: Main interface object for Pylint plugins
:type linter: Pylint object
"""
linter.register_checker(ElseifUsedChecker(linter))
|
import os, sys
import math
import hydra
import torch
import timm
from hydra.utils import instantiate
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import NativeScaler
import models
from data import create_dataloader
from utils import MetricLogger, SmoothedValue
from utils import fix_random_seed
@hydra.main(config_path='./configs', config_name='pretrain')
def main(cfg):
if cfg.seed is not None:
fix_random_seed(cfg.seed)
torch.backends.cudnn.benchmark = True
# dataloader
trainloader, num_classes = create_dataloader(cfg.data)
# additional data augmentation (mixup/cutmix)
mixup_fn = None
mixup_enable = (cfg.data.mixup.mixup_alpha > 0.) or (cfg.data.mixup.cutmix_alpha > 0.)
if mixup_enable:
mixup_fn = instantiate(cfg.data.mixup, num_classes=num_classes)
print(f'MixUp/Cutmix was enabled\n')
# create model
model = instantiate(cfg.model, num_classes=num_classes)
print(f'Model[{cfg.model.model_name}] was created')
# wrap model with DP
model = torch.nn.parallel.DataParallel(model)
model.cuda()
model_without_dp = model.module
# optimizer
scaled_lr = cfg.optim.args.lr * cfg.data.loader.batch_size / 512.0
cfg.optim.args.lr = scaled_lr
optimizer = instantiate(cfg.optim, model=model)
print(f'Optimizer: \n{optimizer}\n')
# scheduler
lr_scheduler, _ = instantiate(cfg.scheduler, optimizer=optimizer)
print(f'Scheduler: \n{lr_scheduler}\n')
# criterion
if cfg.data.mixup.mixup_alpha > 0.:
criterion = SoftTargetCrossEntropy().cuda()
print('SoftTargetCrossEntropy is used for criterion\n')
elif cfg.data.mixup.label_smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(cfg.data.mixup.label_smoothing).cuda()
print('LabelSmoothingCrossEntropy is used for criterion\n')
else:
criterion = torch.nn.CrossEntropyLoss().cuda()
print('CrossEntropyLoss is used for criterion\n')
loss_scaler = NativeScaler()
# load resume
start_epoch = 1
if cfg.resume is not None:
checkpoint = torch.load(cfg.resume, map_location='cpu')
model_without_dp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
loss_scaler.load_state_dict(checkpoint['scaler'])
start_epoch = checkpoint['epoch'] + 1
print(f'Resume was loaded from {cfg.resume}\n')
print(f'Start training for {cfg.epochs} epochs')
for epoch in range(start_epoch, cfg.epochs + 1):
# train one epoch
model.train()
metric_logger = MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = f'Epoch: [{epoch:03}/{cfg.epochs:03}]'
for data in metric_logger.log_every(trainloader, cfg.print_iter_freq, header):
images = data[0].cuda(non_blocking=True)
labels = data[1].cuda(non_blocking=True)
if mixup_fn is not None:
images, labels = mixup_fn(images, labels)
with torch.cuda.amp.autocast():
outputs = model(images)
loss = criterion(outputs, labels)
loss_value = loss.item()
if not math.isfinite(loss_value):
print(f'Loss is {loss_value}, stopping training')
sys.exit(1)
optimizer.zero_grad()
is_second_order = (hasattr(optimizer, 'is_second_order')) and (optimizer.is_second_order)
loss_scaler(
loss=loss,
optimizer=optimizer,
parameters=model.parameters(),
create_graph=is_second_order
)
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
# gather the stats from all process
metric_logger.synchronize_between_processes()
print(f'Averaged stats: {metric_logger}')
lr_scheduler.step(epoch)
if epoch % cfg.save_epoch_freq == 0:
save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'
torch.save({
'model': model_without_dp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'epoch': epoch
}, save_path)
save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'
torch.save({
'model': model_without_dp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'epoch': epoch
}, save_path)
if __name__ == '__main__':
main()
|
#Copyright 2021 Google LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tempfile
import pdb
import copy
import warnings
warnings.filterwarnings(action='ignore')
import functools
from itertools import combinations
from collections import defaultdict
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
############################################################################################
# data pipelines and feature engg here
# pre-defined TF2 Keras models and your own models here
from deep_autoviml.data_load.classify_features import check_model_options
# Utils
############################################################################################
# TensorFlow ≥2.4 is required
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras import layers
from tensorflow import keras
from tensorflow.keras.layers.experimental.preprocessing import Normalization, StringLookup, Hashing
from tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, CategoryEncoding, CategoryCrossing
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization, Discretization
from tensorflow.keras.layers import Embedding, Flatten
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model, load_model
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import regularizers
import tensorflow_hub as hub
import tensorflow_text as text
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error
from IPython.core.display import Image, display
import pickle
#############################################################################################
##### Suppress all TF2 and TF1.x warnings ###################
try:
tf.logging.set_verbosity(tf.logging.ERROR)
except:
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
############################################################################################
from tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, AveragePooling1D
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, Dropout, Conv1D
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
############################################################################################
def preprocessing_images(train_ds, model_options):
"""
This produces a preprocessing layer for an incoming tf.data.Dataset. It can be images only.
You need to just send in a tf.data.DataSet from the training folder and a model_options dictionary.
It will return a full-model-ready layer that you can add to your Keras Functional model as image layer!
########### Motivation and suggestions for coding for Image processing came from this blog #########
Greatly indebted to Srivatsan for his Github and notebooks: https://github.com/srivatsan88/YouTubeLI
####################################################################################################
"""
try:
####### L O A D F E A T U R E E X T R A C T O R ################
url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
feature_extractor = check_model_options(model_options, "tf_hub_model", url)
img_height = model_options["image_height"]
img_width = model_options["image_width"]
image_channels = model_options["image_channels"]
num_predicts = model_options["num_predicts"]
try:
feature_extractor_layer = hub.KerasLayer(feature_extractor, input_shape=(
img_height,img_width,image_channels))
except:
print('Loading model from Tensorflow Hub failed. Check the URL and try again...')
return
feature_extractor_layer.trainable = False
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
tf.random.set_seed(111)
model = tf.keras.Sequential([
normalization_layer,
feature_extractor_layer,
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(num_predicts,activation='softmax')
])
model.compile(
optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
except:
print(' Error: Failed image preprocessing layer. Returning...')
return
return model
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Convert data to textflint format and run transform functions in textflint
import glob
import json
import os
from textflint import Engine
CONFIG_PATH = "textflint_utils/configs"
TRANSFORM_FIELDS = {
"nli": {"context": "premise", "hypothesis": "hypothesis"},
"sentiment": {"statement": "x"},
"hs": {"statement": "x"},
"qa": {"context": "context", "question": "question"},
}
LABEL_FIELD = {"nli": "label", "sentiment": "label", "hs": "label", "qa": "answer"}
LABEL_MAP = {
"nli": {
"neutral": "neutral",
"contradictory": "contradiction",
"entailed": "entailment",
},
"sentiment": {"positive": "positive", "negative": "negative", "neutral": "neutral"},
"hs": {"hateful": "hateful", "not-hateful": "not-hateful"},
}
def findall(p, s):
# Yields all the positions of the pattern p in the string s.
i = s.find(p)
while i != -1:
yield i
i = s.find(p, i + 1)
# This converts dynabench dataset to textflint format
def reformat_data_to_textflint(samples, task):
converted_samples = []
perturb_fields = TRANSFORM_FIELDS.get(task, None)
label_map = LABEL_MAP.get(task, None)
for i in range(len(samples)):
sample = samples[i]
converted = {"sample_id": i + 1}
if task == "qa":
answer = sample["answer"]
if type(answer) is list:
answers = set(answer)
else:
answers = [answer]
converted["answers"] = []
for answer in answers:
converted["answers"] += [
{"text": answer, "answer_start": i}
for i in findall(answer, sample["context"])
]
converted["title"] = ""
converted["is_impossible"] = False
else:
converted["y"] = label_map[sample["label"]]
for key, value in perturb_fields.items():
converted[value] = sample[key]
converted_samples.append(converted)
return converted_samples
def load_config(config_path):
config = None
with open(config_path) as f:
config = json.loads(f.read())
return config
def get_orig_value(data, sample, field):
return data[sample["sample_id"]][field]
def get_transformed_data(config_path, data, task):
config = load_config(config_path)
out_dir = config["out_dir"]
out_files = os.listdir(out_dir)
trans_samples = []
perturb_fields = TRANSFORM_FIELDS.get(task, None)
label_field = LABEL_FIELD.get(task, None)
for fname in out_files:
if fname.startswith("ori"):
continue
fname = os.path.join(out_dir, fname)
parts = fname.split("_")
new_suffix = "_".join(parts[1:-1])
with open(fname) as f:
for line in f:
sample = json.loads(line)
trans_sample = {"input_id": get_orig_value(data, sample, "uid")}
trans_sample[label_field] = get_orig_value(data, sample, label_field)
for key, value in perturb_fields.items():
trans_sample[key] = sample[value]
# create an unique uid for new examples
trans_sample["uid"] = str(trans_sample["input_id"]) + "_" + new_suffix
trans_samples.append(trans_sample)
return trans_samples
def run_textflint(data, task):
textflint_data = reformat_data_to_textflint(data, task)
engine = Engine()
config_file = os.path.join(CONFIG_PATH, task + "_config.json")
config = load_config(config_file)
out_dir = config["out_dir"]
files = glob.glob(out_dir + "/*")
for f in files:
os.remove(f)
engine.run(textflint_data, config_file)
perturbed_data = get_transformed_data(config_file, data, task)
return perturbed_data
|
import requests
import pyfiglet
ascii_banner = pyfiglet.figlet_format("SMSARCH")
print(ascii_banner)
import requests
while True:
kime = input("kim:")
mesaj = input("mesaj:")
if " " in kime or mesaj == "":
break
resp = requests.post('https://textbelt.com/text', {
'phone': '{}'.format(kime),
'message': '{}'.format(mesaj),
'key': 'textbelt',
})
print("Işlem: {} kalan hakkiniz: {}".format('Basarili'if resp.json()['success'] == 'True'
else 'basarisiz!!!',resp.json()['quotaRemaining']))
c = input("'exit()' or 'ENTER'")
if c=="exit()":
break
else:
pass
|
# PYTHON_ARGCOMPLETE_OK
"""The command line interface to pipx"""
import argparse
import logging
import logging.config
import os
import re
import shlex
import sys
import textwrap
import time
import urllib.parse
from pathlib import Path
from typing import Any, Callable, Dict, List
import argcomplete # type: ignore
from packaging.requirements import InvalidRequirement, Requirement
from packaging.utils import canonicalize_name
import pipx.constants
from pipx import commands, constants
from pipx.animate import hide_cursor, show_cursor
from pipx.colors import bold, green
from pipx.constants import ExitCode
from pipx.emojis import hazard
from pipx.interpreter import DEFAULT_PYTHON
from pipx.util import PipxError, mkdir, pipx_wrap, rmdir
from pipx.venv import VenvContainer
from pipx.version import __version__
logger = logging.getLogger(__name__)
VenvCompleter = Callable[[str], List[str]]
def print_version() -> None:
print(__version__)
SPEC_HELP = textwrap.dedent(
"""\
The package name or specific installation source passed to pip.
Runs `pip install -U SPEC`.
For example `--spec mypackage==2.0.0` or `--spec git+https://github.com/user/repo.git@branch`
"""
)
PIPX_DESCRIPTION = textwrap.dedent(
f"""
Install and execute apps from Python packages.
Binaries can either be installed globally into isolated Virtual Environments
or run directly in a temporary Virtual Environment.
Virtual Environment location is {str(constants.PIPX_LOCAL_VENVS)}.
Symlinks to apps are placed in {str(constants.LOCAL_BIN_DIR)}.
"""
)
PIPX_DESCRIPTION += pipx_wrap(
"""
optional environment variables:
PIPX_HOME Overrides default pipx location. Virtual Environments will be installed to $PIPX_HOME/venvs.
PIPX_BIN_DIR Overrides location of app installations. Apps are symlinked or copied here.
USE_EMOJI Overrides emoji behavior. Default value varies based on platform.
PIPX_DEFAULT_PYTHON Overrides default python used for commands.
""",
subsequent_indent=" " * 24, # match the indent of argparse options
keep_newlines=True,
)
DOC_DEFAULT_PYTHON = os.getenv("PIPX__DOC_DEFAULT_PYTHON", DEFAULT_PYTHON)
INSTALL_DESCRIPTION = textwrap.dedent(
f"""
The install command is the preferred way to globally install apps
from python packages on your system. It creates an isolated virtual
environment for the package, then ensures the package's apps are
accessible on your $PATH.
The result: apps you can run from anywhere, located in packages
you can cleanly upgrade or uninstall. Guaranteed to not have
dependency version conflicts or interfere with your OS's python
packages. 'sudo' is not required to do this.
pipx install PACKAGE_NAME
pipx install --python PYTHON PACKAGE_NAME
pipx install VCS_URL
pipx install ./LOCAL_PATH
pipx install ZIP_FILE
pipx install TAR_GZ_FILE
The PACKAGE_SPEC argument is passed directly to `pip install`.
The default virtual environment location is {constants.DEFAULT_PIPX_HOME}
and can be overridden by setting the environment variable `PIPX_HOME`
(Virtual Environments will be installed to `$PIPX_HOME/venvs`).
The default app location is {constants.DEFAULT_PIPX_BIN_DIR} and can be
overridden by setting the environment variable `PIPX_BIN_DIR`.
The default python executable used to install a package is
{DOC_DEFAULT_PYTHON} and can be overridden
by setting the environment variable `PIPX_DEFAULT_PYTHON`.
"""
)
class LineWrapRawTextHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _split_lines(self, text: str, width: int) -> List[str]:
text = self._whitespace_matcher.sub(" ", text).strip()
return textwrap.wrap(text, width)
class InstalledVenvsCompleter:
def __init__(self, venv_container: VenvContainer) -> None:
self.packages = [str(p.name) for p in sorted(venv_container.iter_venv_dirs())]
def use(self, prefix: str, **kwargs: Any) -> List[str]:
return [
f"{prefix}{x[len(prefix):]}"
for x in self.packages
if x.startswith(canonicalize_name(prefix))
]
def get_pip_args(parsed_args: Dict[str, str]) -> List[str]:
pip_args: List[str] = []
if parsed_args.get("index_url"):
pip_args += ["--index-url", parsed_args["index_url"]]
if parsed_args.get("pip_args"):
pip_args += shlex.split(parsed_args.get("pip_args", ""))
# make sure --editable is last because it needs to be right before
# package specification
if parsed_args.get("editable"):
pip_args += ["--editable"]
return pip_args
def get_venv_args(parsed_args: Dict[str, str]) -> List[str]:
venv_args: List[str] = []
if parsed_args.get("system_site_packages"):
venv_args += ["--system-site-packages"]
return venv_args
def run_pipx_command(args: argparse.Namespace) -> ExitCode: # noqa: C901
verbose = args.verbose if "verbose" in args else False
pip_args = get_pip_args(vars(args))
venv_args = get_venv_args(vars(args))
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
if "package" in args:
package = args.package
if urllib.parse.urlparse(package).scheme:
raise PipxError("Package cannot be a url")
if "spec" in args and args.spec is not None:
if urllib.parse.urlparse(args.spec).scheme:
if "#egg=" not in args.spec:
args.spec = args.spec + f"#egg={package}"
venv_dir = venv_container.get_venv_dir(package)
logger.info(f"Virtual Environment location is {venv_dir}")
if "skip" in args:
skip_list = [canonicalize_name(x) for x in args.skip]
if args.command == "run":
package_or_url = (
args.spec
if ("spec" in args and args.spec is not None)
else args.app_with_args[0]
)
# For any package, we need to just use the name
try:
package_name = Requirement(args.app_with_args[0]).name
except InvalidRequirement:
# Raw URLs to scripts are supported, too, so continue if
# we can't parse this as a package
package_name = args.app_with_args[0]
use_cache = not args.no_cache
commands.run(
package_name,
package_or_url,
args.app_with_args[1:],
args.python,
pip_args,
venv_args,
args.pypackages,
verbose,
use_cache,
)
# We should never reach here because run() is NoReturn.
return ExitCode(1)
elif args.command == "install":
return commands.install(
None,
None,
args.package_spec,
constants.LOCAL_BIN_DIR,
args.python,
pip_args,
venv_args,
verbose,
force=args.force,
include_dependencies=args.include_deps,
suffix=args.suffix,
)
elif args.command == "inject":
return commands.inject(
venv_dir,
None,
args.dependencies,
pip_args,
verbose=verbose,
include_apps=args.include_apps,
include_dependencies=args.include_deps,
force=args.force,
)
elif args.command == "upgrade":
return commands.upgrade(
venv_dir,
pip_args,
verbose,
include_injected=args.include_injected,
force=args.force,
)
elif args.command == "upgrade-all":
return commands.upgrade_all(
venv_container,
verbose,
include_injected=args.include_injected,
skip=skip_list,
force=args.force,
)
elif args.command == "list":
return commands.list_packages(venv_container, args.include_injected, args.json)
elif args.command == "uninstall":
return commands.uninstall(venv_dir, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "uninstall-all":
return commands.uninstall_all(venv_container, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "reinstall":
return commands.reinstall(
venv_dir=venv_dir,
local_bin_dir=constants.LOCAL_BIN_DIR,
python=args.python,
verbose=verbose,
)
elif args.command == "reinstall-all":
return commands.reinstall_all(
venv_container,
constants.LOCAL_BIN_DIR,
args.python,
verbose,
skip=skip_list,
)
elif args.command == "runpip":
if not venv_dir:
raise PipxError("Developer error: venv_dir is not defined.")
return commands.run_pip(package, venv_dir, args.pipargs, args.verbose)
elif args.command == "ensurepath":
try:
return commands.ensure_pipx_paths(force=args.force)
except Exception as e:
logger.debug("Uncaught Exception:", exc_info=True)
raise PipxError(str(e), wrap_message=False)
elif args.command == "completions":
print(constants.completion_instructions)
return ExitCode(0)
else:
raise PipxError(f"Unknown command {args.command}")
def add_pip_venv_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--system-site-packages",
action="store_true",
help="Give the virtual environment access to the system site-packages dir.",
)
parser.add_argument("--index-url", "-i", help="Base URL of Python Package Index")
parser.add_argument(
"--editable",
"-e",
help="Install a project in editable mode",
action="store_true",
)
parser.add_argument(
"--pip-args",
help="Arbitrary pip arguments to pass directly to pip install/upgrade commands",
)
def add_include_dependencies(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--include-deps", help="Include apps of dependent packages", action="store_true"
)
def _add_install(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"install",
help="Install a package",
formatter_class=LineWrapRawTextHelpFormatter,
description=INSTALL_DESCRIPTION,
)
p.add_argument("package_spec", help="package name or pip installation spec")
add_include_dependencies(p)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument(
"--suffix",
default="",
help=(
"Optional suffix for virtual environment and executable names. "
"NOTE: The suffix feature is experimental and subject to change."
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to create the Virtual Environment and run the "
"associated app/apps. Must be v3.6+."
),
)
add_pip_venv_args(p)
def _add_inject(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"inject",
help="Install packages into an existing Virtual Environment",
description="Installs packages to an existing pipx-managed virtual environment.",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to inject into",
).completer = venv_completer
p.add_argument(
"dependencies",
nargs="+",
help="the packages to inject into the Virtual Environment--either package name or pip package spec",
)
p.add_argument(
"--include-apps",
action="store_true",
help="Add apps from the injected packages onto your PATH",
)
add_include_dependencies(p)
add_pip_venv_args(p)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_upgrade(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"upgrade",
help="Upgrade a package",
description="Upgrade a package in a pipx-managed Virtual Environment by running 'pip install --upgrade PACKAGE'",
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
add_pip_venv_args(p)
p.add_argument("--verbose", action="store_true")
def _add_upgrade_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"upgrade-all",
help="Upgrade all packages. Runs `pip install -U <pkgname>` for each package.",
description="Upgrades all packages within their virtual environments by running 'pip install --upgrade PACKAGE'",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_uninstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"uninstall",
help="Uninstall a package",
description="Uninstalls a pipx-managed Virtual Environment by deleting it and any files that point to its apps.",
)
p.add_argument("package").completer = venv_completer
p.add_argument("--verbose", action="store_true")
def _add_uninstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"uninstall-all",
help="Uninstall all packages",
description="Uninstall all pipx-managed packages",
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"reinstall",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall a package",
description=textwrap.dedent(
"""
Reinstalls a package.
Package is uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
"""
),
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"reinstall-all",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall all packages",
description=textwrap.dedent(
"""
Reinstalls all packages.
Packages are uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
This is useful if you upgraded to a new version of Python and want
all your packages to use the latest as well.
"""
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument("--verbose", action="store_true")
def _add_list(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"list",
help="List installed packages",
description="List packages and apps installed with pipx",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Show packages injected into the main app's environment",
)
p.add_argument(
"--json", action="store_true", help="Output rich data in json format."
)
p.add_argument("--verbose", action="store_true")
def _add_run(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"run",
formatter_class=LineWrapRawTextHelpFormatter,
help=(
"Download the latest version of a package to a temporary virtual environment, "
"then run an app from it. Also compatible with local `__pypackages__` "
"directory (experimental)."
),
description=textwrap.dedent(
f"""
Download the latest version of a package to a temporary virtual environment,
then run an app from it. The environment will be cached
and re-used for up to {constants.TEMP_VENV_EXPIRATION_THRESHOLD_DAYS} days. This
means subsequent calls to 'run' for the same package will be faster
since they can re-use the cached Virtual Environment.
In support of PEP 582 'run' will use apps found in a local __pypackages__
directory, if present. Please note that this behavior is experimental,
and acts as a companion tool to pythonloc. It may be modified or
removed in the future. See https://github.com/cs01/pythonloc.
"""
),
)
p.add_argument(
"--no-cache",
action="store_true",
help="Do not re-use cached virtual environment if it exists",
)
p.add_argument(
"app_with_args",
metavar="app ...",
nargs=argparse.REMAINDER,
help="app/package name and any arguments to be passed to it",
default=[],
)
p.add_argument(
"--pypackages",
action="store_true",
help="Require app to be run from local __pypackages__ directory",
)
p.add_argument("--spec", help=SPEC_HELP)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help="The Python version to run package's CLI app with. Must be v3.6+.",
)
add_pip_venv_args(p)
p.set_defaults(subparser=p)
# modify usage text to show required app argument
p.usage = re.sub(r"^usage: ", "", p.format_usage())
# add a double-dash to usage text to show requirement before app
p.usage = re.sub(r"\.\.\.", "app ...", p.usage)
def _add_runpip(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"runpip",
help="Run pip in an existing pipx-managed Virtual Environment",
description="Run pip in an existing pipx-managed Virtual Environment",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to run pip in",
).completer = venv_completer
p.add_argument(
"pipargs",
nargs=argparse.REMAINDER,
default=[],
help="Arguments to forward to pip command",
)
p.add_argument("--verbose", action="store_true")
def _add_ensurepath(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"ensurepath",
help=(
"Ensure directories necessary for pipx operation are in your "
"PATH environment variable."
),
description=(
"Ensure directory where pipx stores apps is in your "
"PATH environment variable. Also if pipx was installed via "
"`pip install --user`, ensure pipx itself is in your PATH. "
"Note that running this may modify "
"your shell's configuration file(s) such as '~/.bashrc'."
),
)
p.add_argument(
"--force",
"-f",
action="store_true",
help=(
"Add text to your shell's config file even if it looks like your "
"PATH already contains paths to pipx and pipx-install apps."
),
)
def get_command_parser() -> argparse.ArgumentParser:
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
completer_venvs = InstalledVenvsCompleter(venv_container)
parser = argparse.ArgumentParser(
prog="pipx",
formatter_class=LineWrapRawTextHelpFormatter,
description=PIPX_DESCRIPTION,
)
parser.man_short_description = PIPX_DESCRIPTION.splitlines()[1] # type: ignore
subparsers = parser.add_subparsers(
dest="command", description="Get help for commands with pipx COMMAND --help"
)
_add_install(subparsers)
_add_inject(subparsers, completer_venvs.use)
_add_upgrade(subparsers, completer_venvs.use)
_add_upgrade_all(subparsers)
_add_uninstall(subparsers, completer_venvs.use)
_add_uninstall_all(subparsers)
_add_reinstall(subparsers, completer_venvs.use)
_add_reinstall_all(subparsers)
_add_list(subparsers)
_add_run(subparsers)
_add_runpip(subparsers, completer_venvs.use)
_add_ensurepath(subparsers)
parser.add_argument("--version", action="store_true", help="Print version and exit")
subparsers.add_parser(
"completions",
help="Print instructions on enabling shell completions for pipx",
description="Print instructions on enabling shell completions for pipx",
)
return parser
def delete_oldest_logs(file_list: List[Path], keep_number: int) -> None:
file_list = sorted(file_list)
if len(file_list) > keep_number:
for existing_file in file_list[:-keep_number]:
try:
existing_file.unlink()
except FileNotFoundError:
pass
def setup_log_file() -> Path:
max_logs = 10
# don't use utils.mkdir, to prevent emission of log message
constants.PIPX_LOG_DIR.mkdir(parents=True, exist_ok=True)
delete_oldest_logs(list(constants.PIPX_LOG_DIR.glob("cmd_*[0-9].log")), max_logs)
delete_oldest_logs(
list(constants.PIPX_LOG_DIR.glob("cmd_*_pip_errors.log")), max_logs
)
datetime_str = time.strftime("%Y-%m-%d_%H.%M.%S")
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}.log"
counter = 1
while log_file.exists() and counter < 10:
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}_{counter}.log"
counter += 1
return log_file
def setup_logging(verbose: bool) -> None:
pipx_str = bold(green("pipx >")) if sys.stdout.isatty() else "pipx >"
pipx.constants.pipx_log_file = setup_log_file()
# "incremental" is False so previous pytest tests don't accumulate handlers
logging_config = {
"version": 1,
"formatters": {
"stream_nonverbose": {
"class": "logging.Formatter",
"format": "{message}",
"style": "{",
},
"stream_verbose": {
"class": "logging.Formatter",
"format": pipx_str + "({funcName}:{lineno}): {message}",
"style": "{",
},
"file": {
"class": "logging.Formatter",
"format": "{relativeCreated: >8.1f}ms ({funcName}:{lineno}): {message}",
"style": "{",
},
},
"handlers": {
"stream": {
"class": "logging.StreamHandler",
"formatter": "stream_verbose" if verbose else "stream_nonverbose",
"level": "INFO" if verbose else "WARNING",
},
"file": {
"class": "logging.FileHandler",
"formatter": "file",
"filename": str(pipx.constants.pipx_log_file),
"encoding": "utf-8",
"level": "DEBUG",
},
},
"loggers": {"pipx": {"handlers": ["stream", "file"], "level": "DEBUG"}},
"incremental": False,
}
logging.config.dictConfig(logging_config)
def setup(args: argparse.Namespace) -> None:
if "version" in args and args.version:
print_version()
sys.exit(0)
setup_logging("verbose" in args and args.verbose)
logger.debug(f"{time.strftime('%Y-%m-%d %H:%M:%S')}")
logger.debug(f"{' '.join(sys.argv)}")
logger.info(f"pipx version is {__version__}")
logger.info(f"Default python interpreter is {repr(DEFAULT_PYTHON)}")
mkdir(constants.PIPX_LOCAL_VENVS)
mkdir(constants.LOCAL_BIN_DIR)
mkdir(constants.PIPX_VENV_CACHEDIR)
rmdir(constants.PIPX_TRASH_DIR, False)
old_pipx_venv_location = constants.PIPX_LOCAL_VENVS / "pipx-app"
if old_pipx_venv_location.exists():
logger.warning(
pipx_wrap(
f"""
{hazard} A virtual environment for pipx was detected at
{str(old_pipx_venv_location)}. The 'pipx-app' package has been
renamed back to 'pipx'
(https://github.com/pypa/pipx/issues/82).
""",
subsequent_indent=" " * 4,
)
)
def check_args(parsed_pipx_args: argparse.Namespace) -> None:
if parsed_pipx_args.command == "run":
# we manually discard a first -- because using nargs=argparse.REMAINDER
# will not do it automatically
if parsed_pipx_args.app_with_args and parsed_pipx_args.app_with_args[0] == "--":
parsed_pipx_args.app_with_args.pop(0)
# since we would like app to be required but not in a separate argparse
# add_argument, we implement our own missing required arg error
if not parsed_pipx_args.app_with_args:
parsed_pipx_args.subparser.error(
"the following arguments are required: app"
)
def cli() -> ExitCode:
"""Entry point from command line"""
try:
hide_cursor()
parser = get_command_parser()
argcomplete.autocomplete(parser)
parsed_pipx_args = parser.parse_args()
setup(parsed_pipx_args)
check_args(parsed_pipx_args)
if not parsed_pipx_args.command:
parser.print_help()
return ExitCode(1)
return run_pipx_command(parsed_pipx_args)
except PipxError as e:
print(str(e), file=sys.stderr)
logger.debug(f"PipxError: {e}", exc_info=True)
return ExitCode(1)
except KeyboardInterrupt:
return ExitCode(1)
except Exception:
logger.debug("Uncaught Exception:", exc_info=True)
raise
finally:
logger.debug("pipx finished.")
show_cursor()
if __name__ == "__main__":
sys.exit(cli())
|
# -*- coding: utf-8 -*-
#
import numpy
from .. import helpers
def integrate(f, rule, dot=numpy.dot):
flt = numpy.vectorize(float)
return dot(f(flt(rule.points).T), flt(rule.weights))
def show(scheme, backend="mpl"):
"""Displays scheme for E_3^r quadrature.
"""
helpers.backend_to_function[backend](
scheme.points, scheme.weights, volume=8 * numpy.pi, edges=[]
)
return
|
# Copyright (c) 2020 Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from .agent_state_geometry_config_readers import *
from .behavior_model_config_readers import *
from .controlled_agents_config_readers import *
from .dynamic_model_config_readers import *
from .execution_model_config_readers import *
from .goal_definition_config_readers import *
|
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deletes temporary and installed files."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import os
import shutil
import python_utils
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, '..', 'oppia_tools')
_PARSER = argparse.ArgumentParser(description="""
Deletes temporary and installed files.
""")
def delete_directory_tree(directory_path):
"""Recursively delete an existing directory tree. Does not do anything if
directory does not exists.
Args:
directory_path: str. Directory path to be deleted.
"""
if not os.path.exists(directory_path):
return
shutil.rmtree(directory_path)
def delete_file(filepath):
"""Delete an existing file. Does not do anything if file does not exists.
Args:
filepath: str. Filepath to be deleted.
"""
if not os.path.isfile(filepath):
return
os.remove(filepath)
def main(args=None):
"""Runs the script to clean temporary and installed files."""
unused_parsed_args = _PARSER.parse_args(args=args)
delete_directory_tree(OPPIA_TOOLS_DIR)
delete_directory_tree('node_modules/')
delete_directory_tree('third_party/')
delete_directory_tree('build/')
delete_directory_tree('backend_prod_files/')
delete_file('.coverage')
delete_directory_tree('local_compiled_js/')
delete_directory_tree('local_compiled_js_for_test/')
delete_file('tsc_output_log.txt')
delete_file('dev_output.txt')
delete_file('.viminfo')
for filename in os.listdir(CURR_DIR):
if filename.startswith('tmpcompiledjs'):
delete_directory_tree(filename)
python_utils.PRINT('Temporary and installed files deleted')
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when clean.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
from typing import Callable, Optional
from enum import Enum
class Event(object):
def __init__(self, value: str, event_trigger: Optional[Callable]=None ):
if event_trigger is None:
event_trigger = Event.default_trigger
self._trigger = event_trigger
self._name_ = self._value_ = value
@property
def trigger(self):
return self._trigger
@property
def name(self):
"""The name of the Enum member."""
return self._name_
@property
def value(self):
"""The value of the Enum member."""
return self._value_
@staticmethod
def default_trigger(engine):
return True
@staticmethod
def once_trigger():
is_triggered = False
def wrapper(engine):
if is_triggered:
return False
is_triggered=True
return True
return wrapper
@staticmethod
def every_trigger(every: int):
def wrapper(engine):
return every>0 and (engine.state.iter % every)==0
return wrapper
def __call__(self, every: Optional[int]=None, once: Optional[bool]=None ):
if every is not None:
assert once is None
return Event(self.value, event_trigger=Event.every_trigger(every) )
if once is not None:
return Event(self.value, event_trigger=Event.once_trigger() )
return Event(self.value)
def __hash__(self):
return hash(self._name_)
def __eq__(self, other):
if hasattr(other, 'value'):
return self.value==other.value
else:
return
class DefaultEvents(Event, Enum):
BEFORE_RUN = "before_train"
AFTER_RUN = "after_train"
BEFORE_EPOCH = "before_epoch"
AFTER_EPOCH = "after_epoch"
BEFORE_STEP = "before_step"
AFTER_STEP = "after_step"
BEFORE_GET_BATCH = "before_get_batch"
AFTER_GET_BATCH = "after_get_batch"
|
#!/usr/bin/env python3
import json
import os
import sys
import uuid
from alphad3m.automl import AutoML
if __name__ == '__main__':
if len(sys.argv) != 3:
sys.stderr.write('Usage: %s <config> <pipeline_uuid>\n' % sys.argv[0])
sys.exit(1)
with open(sys.argv[1]) as config_file:
config = json.load(config_file)
storage = config['temp_storage_root']
ta2 = AutoML(storage_root=storage,
pipelines_considered=os.path.join(storage, 'pipelines_considered'),
executables_root=os.path.join(storage, 'executables'))
result = ta2.run_pipeline(uuid.UUID(hex=sys.argv[2]),
config['training_data_root'],
config['problem_root'])
print(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.