text
stringlengths 2
999k
|
|---|
# coding: utf-8
import pprint
import re
import six
class KeystoneAssociateGroupWithProjectPermissionRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'group_id': 'str',
'role_id': 'str'
}
attribute_map = {
'project_id': 'project_id',
'group_id': 'group_id',
'role_id': 'role_id'
}
def __init__(self, project_id=None, group_id=None, role_id=None):
"""KeystoneAssociateGroupWithProjectPermissionRequest - a model defined in huaweicloud sdk"""
self._project_id = None
self._group_id = None
self._role_id = None
self.discriminator = None
self.project_id = project_id
self.group_id = group_id
self.role_id = role_id
@property
def project_id(self):
"""Gets the project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
项目ID,获取方式请参见:[获取项目名称、项目ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
项目ID,获取方式请参见:[获取项目名称、项目ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param project_id: The project_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:type: str
"""
self._project_id = project_id
@property
def group_id(self):
"""Gets the group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
用户组ID,获取方式请参见:[获取用户组ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
用户组ID,获取方式请参见:[获取用户组ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param group_id: The group_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:type: str
"""
self._group_id = group_id
@property
def role_id(self):
"""Gets the role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
权限ID,获取方式请参见:[获取权限名、权限ID](https://support.huaweicloud.com/api-iam/iam_10_0001.html)。
:return: The role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:rtype: str
"""
return self._role_id
@role_id.setter
def role_id(self, role_id):
"""Sets the role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
权限ID,获取方式请参见:[获取权限名、权限ID](https://support.huaweicloud.com/api-iam/iam_10_0001.html)。
:param role_id: The role_id of this KeystoneAssociateGroupWithProjectPermissionRequest.
:type: str
"""
self._role_id = role_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneAssociateGroupWithProjectPermissionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients_impl as gradient_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.util import nest
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
"""Computes jacobian of `output` w.r.t. `inputs`.
Args:
output: A tensor.
inputs: A tensor or a nested structure of tensor objects.
use_pfor: If true, uses pfor for computing the jacobian. Else uses
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor or a nested structure of tensors with the same structure as
`inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding
value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
shape [x_1, ..., x_m], the corresponding jacobian has shape
[y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is
sparse (IndexedSlices), jacobian function currently makes it dense and
returns a Tensor instead. This may change in the future.
"""
flat_inputs = nest.flatten(inputs)
output_tensor_shape = output.shape
output_shape = array_ops.shape(output)
output = array_ops.reshape(output, [-1])
def loop_fn(i):
y = array_ops.gather(output, i)
return gradient_ops.gradients(y, flat_inputs)
try:
output_size = int(output.shape[0])
except TypeError:
output_size = array_ops.shape(output)[0]
if use_pfor:
pfor_outputs = control_flow_ops.pfor(
loop_fn, output_size, parallel_iterations=parallel_iterations)
else:
pfor_outputs = control_flow_ops.for_loop(
loop_fn,
[output.dtype] * len(flat_inputs),
output_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(pfor_outputs):
if isinstance(out, ops.Tensor):
new_shape = array_ops.concat(
[output_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
pfor_outputs[i] = out
return nest.pack_sequence_as(inputs, pfor_outputs)
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
tf.while_loop.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None, when use_pfor is
true, corresponds to vectorizing all the iterations. When use_pfor is
false, the default value of None corresponds to parallel_iterations=10.
This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError(f"Need first dimension of `output` shape ({output.shape}) "
f"and `inp` shape ({inp.shape}) to match.")
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = array_ops.shape(output)
batch_size = output_shape[0]
output_row_size = array_ops.size(output) // batch_size
inp_shape = array_ops.shape(inp)
# Flatten output to 2-D.
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, inp_shape[0])]):
output = array_ops.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = array_ops.gather(output, i, axis=1)
return gradient_ops.gradients(y, inp)[0]
if use_pfor:
pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = control_flow_ops.for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = array_ops.reshape(pfor_output,
[output_row_size, batch_size, -1])
output = array_ops.transpose(pfor_output, [1, 0, 2])
new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape)
|
"""
Exactly equals to Model21 (the best results so far), but differnt configurations.
Exactly based on Model10, but ReLU to GeLU
Based on Model8, add dropout and max, avg combine.
Based on Local model, add residual connections.
The extraction is doubled for depth.
Learning Point Cloud with Progressively Local representation.
[B,3,N] - {[B,G,K,d]-[B,G,d]} - {[B,G',K,d]-[B,G',d]} -cls
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from einops import rearrange, repeat
from pointnet2_ops import pointnet2_utils
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
distance = torch.min(distance, dist)
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def knn_point(nsample, xyz, new_xyz):
"""
Input:
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
sqrdists = square_distance(new_xyz, xyz)
_, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)
return group_idx
class LocalGrouper(nn.Module):
def __init__(self, groups, kneighbors, **kwargs):
"""
Give xyz[b,p,3] and fea[b,p,d], return new_xyz[b,g,3] and new_fea[b,g,k,2d]
:param groups: groups number
:param kneighbors: k-nerighbors
:param kwargs: others
"""
super(LocalGrouper, self).__init__()
self.groups = groups
self.kneighbors = kneighbors
def forward(self, xyz, points):
B, N, C = xyz.shape
S = self.groups
xyz = xyz.contiguous() # xyz [btach, points, xyz]
# fps_idx = farthest_point_sample(xyz, self.groups).long()
fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long() # [B, npoint]
new_xyz = index_points(xyz, fps_idx)
new_points = index_points(points, fps_idx)
idx = knn_point(self.kneighbors, xyz, new_xyz)
# idx = query_ball_point(radius, nsample, xyz, new_xyz)
# grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_points = index_points(points, idx)
grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)
new_points = torch.cat([grouped_points_norm,
new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)]
, dim=-1)
return new_xyz, new_points
class FCBNReLU1D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, bias=False):
super(FCBNReLU1D, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(out_channels),
nn.GELU()
)
def forward(self, x):
return self.net(x)
class FCBNReLU1DRes(nn.Module):
def __init__(self, channel, kernel_size=1, bias=False):
super(FCBNReLU1DRes, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel),
nn.GELU(),
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel)
)
def forward(self, x):
return F.gelu(self.net(x)+x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 32, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
# project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Conv1d(inner_dim, dim,1),
nn.BatchNorm1d(dim)
)
def forward(self, x):
x = x.permute(0,2,1)
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b (h d) n')
return self.to_out(out)
class TransformerBlock(nn.Module):
def __init__(self, dim, heads=8, dim_head=32, **kwargs):
"""
[b batch, d dimension, k points]
:param dim: input data dimension
:param heads: heads number
:param dim_head: dimension in each head
:param kwargs:
"""
super(TransformerBlock, self).__init__()
self.attention = Attention(dim=dim, heads=heads, dim_head=dim_head)
self.ffn = nn.Sequential(
nn.Conv1d(dim, dim, 1, bias=False),
nn.BatchNorm1d(dim)
)
def forward(self, x):
"""
:input x: [b batch, d dimension, p points,]
:return: [b batch, d dimension, p points,]
"""
att = self.attention(x)
att = F.gelu(att+x)
out = self.ffn(att)
out = F.gelu(att+out)
return out
class PreExtraction(nn.Module):
def __init__(self, channels, blocks=1):
"""
input: [b,g,k,d]: output:[b,d,g]
:param channels:
:param blocks:
"""
super(PreExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = self.operation(x) # [b, d, k]
x = self.transformer(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class PosExtraction(nn.Module):
def __init__(self, channels, blocks=1):
"""
input[b,d,g]; output[b,d,g]
:param channels:
:param blocks:
"""
super(PosExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x): # [b, d, k]
return self.transformer(self.operation(x))
class Model23(nn.Module):
def __init__(self, points=1024, class_num=40, embed_dim=64,
pre_blocks=[2,2,2,2], pos_blocks=[2,2,2,2], k_neighbors=[32,32,32,32],
reducers=[2,2,2,2], **kwargs):
super(Model23, self).__init__()
self.stages = len(pre_blocks)
self.class_num = class_num
self.points=points
self.embedding = nn.Sequential(
FCBNReLU1D(3, embed_dim),
FCBNReLU1D(embed_dim, embed_dim)
)
assert len(pre_blocks)==len(k_neighbors)==len(reducers)==len(pos_blocks), \
"Please check stage number consistent for pre_blocks, pos_blocks k_neighbors, reducers."
self.local_grouper_list = nn.ModuleList()
self.pre_blocks_list = nn.ModuleList()
self.pos_blocks_list = nn.ModuleList()
last_channel = embed_dim
anchor_points = self.points
for i in range(len(pre_blocks)):
out_channel = last_channel*2
pre_block_num=pre_blocks[i]
pos_block_num = pos_blocks[i]
kneighbor = k_neighbors[i]
reduce = reducers[i]
anchor_points = anchor_points//reduce
# append local_grouper_list
local_grouper = LocalGrouper(anchor_points, kneighbor) #[b,g,k,d]
self.local_grouper_list.append(local_grouper)
# append pre_block_list
pre_block_module = PreExtraction(out_channel, pre_block_num)
self.pre_blocks_list.append(pre_block_module)
# append pos_block_list
pos_block_module = PosExtraction(out_channel, pos_block_num)
self.pos_blocks_list.append(pos_block_module)
last_channel = out_channel
self.classifier = nn.Sequential(
nn.Linear(last_channel*2, 512),
nn.BatchNorm1d(512),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(256, self.class_num)
)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
x = self.embedding(x) # B,D,N
for i in range(self.stages):
xyz, x = self.local_grouper_list[i](xyz, x.permute(0, 2, 1)) # [b,g,3] [b,g,k,d]
x = self.pre_blocks_list[i](x) # [b,d,g]
x = self.pos_blocks_list[i](x) # [b,d,g]
x_max = F.adaptive_max_pool1d(x,1).squeeze(dim=-1)
x_mean = x.mean(dim=-1,keepdim=False)
x = torch.cat([x_max, x_mean], dim=-1)
x = self.classifier(x)
return x
def model23A(num_classes=40, **kwargs) -> Model23: # 19201MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23B(num_classes=40, **kwargs) -> Model23: # 19185MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[1,1], pos_blocks=[1,1], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23C(num_classes=40, **kwargs) -> Model23: # 19537MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[32,32,32],
reducers=[4,2,2], **kwargs)
def model23D(num_classes=40, **kwargs) -> Model23: # 31927MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[16,32,32],
reducers=[2,2,2], **kwargs)
def model23E(num_classes=40, **kwargs) -> Model23: # 19215MiB # 93.476% on vis sever
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[3,3], pos_blocks=[3,3], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23F(num_classes=40, **kwargs) -> Model23: # 6437MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[16,16],
reducers=[4,4], **kwargs)
def model23G(num_classes=40, **kwargs) -> Model23: # 19201MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[24,24],
reducers=[4,4], **kwargs)
# don't train H, it is same to model21H
def model23H(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4], pos_blocks=[4,4], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23I(num_classes=40, **kwargs) -> Model23: # 20283MiB
return Model23(points=1024, class_num=num_classes, embed_dim=256,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
# Extremely large model, 101 layers in total.
def model23J(num_classes=40, **kwargs) -> Model23: # 24999MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4,4,4], pos_blocks=[4,4,4,4], k_neighbors=[16,16,16,16],
reducers=[4,2,2,2], **kwargs)
# Also Eextremely large model, 101 layers in total.
def model23K(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[10,10], pos_blocks=[10,10], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
if __name__ == '__main__':
data = torch.rand(2,128,10)
att = Attention(128)
out = att(data)
print(out.shape)
batch, groups,neighbors,dim=2,512,32,16
x = torch.rand(batch,groups,neighbors,dim)
pre_extractor = PreExtraction(dim,3)
out = pre_extractor(x)
print(out.shape)
x = torch.rand(batch, dim, groups)
pos_extractor = PosExtraction(dim, 3)
out = pos_extractor(x)
print(out.shape)
data = torch.rand(2, 3, 1024)
print("===> testing model ...")
model = Model23()
out = model(data)
print(out.shape)
print("===> testing modelE ...")
model = model23E()
out = model(data)
print(out.shape)
|
import os
import sys
from SBI.structure import PDB
from default_config.masif_opts import masif_opts
print(masif_opts["ligand"]["assembly_dir"])
if not os.path.exists(masif_opts["ligand"]["assembly_dir"]):
os.mkdir(masif_opts["ligand"]["assembly_dir"])
def assemble(pdb_id):
# Reads and builds the biological assembly of a structure
print(os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id)))
struct = PDB(
os.path.join(masif_opts["raw_pdb_dir"][:-1]+"_protonized", "{}.pdb".format(pdb_id)), header=True
)
exit(0)
try:
struct_assembly = struct.apply_biomolecule_matrices()[0]
except:
return 0
struct_assembly.write(
os.path.join(masif_opts["ligand"]["assembly_dir"], "{}.pdb".format(pdb_id))
)
return 1
pdb_id = sys.argv[1]
res = assemble(pdb_id)
if res:
print("Building assembly was successfull for {}".format(pdb_id))
else:
print("Building assembly was not successfull for {}".format(pdb_id))
|
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 14250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.sparse_tensor."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseTensorTest(test_util.TensorFlowTestCase):
def testPythonConstruction(self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)
for sp in [
sparse_tensor.SparseTensor(indices, values, shape),
sparse_tensor.SparseTensor.from_value(sp_value),
sparse_tensor.SparseTensor.from_value(
sparse_tensor.SparseTensor(indices, values, shape))]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.dense_shape.dtype, dtypes.int64)
self.assertEqual(sp.get_shape(), (4, 5))
value = self.evaluate(sp)
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.dense_shape)
sp_value = self.evaluate(sp)
self.assertAllEqual(sp_value.indices, value.indices)
self.assertAllEqual(sp_value.values, value.values)
self.assertAllEqual(sp_value.dense_shape, value.dense_shape)
def testShape(self):
@def_function.function
def test_fn(tensor):
tensor = sparse_ops.sparse_transpose(tensor)
self.assertEqual(tensor.shape.rank, 2)
return tensor
tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
test_fn(tensor)
def testIsSparse(self):
self.assertFalse(sparse_tensor.is_sparse(3))
self.assertFalse(sparse_tensor.is_sparse("foo"))
self.assertFalse(sparse_tensor.is_sparse(np.array(3)))
self.assertTrue(
sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))
self.assertTrue(
sparse_tensor.is_sparse(
sparse_tensor.SparseTensorValue([[0]], [0], [1])))
def testConsumers(self):
with context.graph_mode():
sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])
w = ops.convert_to_tensor(np.ones([4, 1], np.float32))
out = sparse_ops.sparse_tensor_dense_matmul(sp, w)
self.assertEqual(len(sp.consumers()), 1)
self.assertEqual(sp.consumers()[0], out.op)
dense = sparse_ops.sparse_tensor_to_dense(sp)
self.assertEqual(len(sp.consumers()), 2)
self.assertIn(dense.op, sp.consumers())
self.assertIn(out.op, sp.consumers())
def testWithValues(self):
source = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
new_tensor = source.with_values([5.0, 1.0])
self.assertAllEqual(new_tensor.indices, source.indices)
self.assertAllEqual(new_tensor.values, [5.0, 1.0])
self.assertAllEqual(new_tensor.dense_shape, source.dense_shape)
# ensure new value's shape is checked
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
source.with_values([[5.0, 1.0]])
class ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):
def test_convert_dense(self):
value = [42, 43]
from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
value)
self.assertAllEqual(value, self.evaluate(from_value))
def test_convert_sparse(self):
indices = [[0, 1], [1, 0]]
values = [42, 43]
shape = [2, 2]
sparse_tensor_value = sparse_tensor.SparseTensorValue(
indices, values, shape)
st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)
from_value = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(sparse_tensor_value))
from_tensor = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(st))
for convertee in [from_value, from_tensor]:
self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)
self.assertAllEqual(sparse_tensor_value.values, convertee.values)
self.assertAllEqual(
sparse_tensor_value.dense_shape, convertee.dense_shape)
class SparseTensorShapeTest(test_util.TensorFlowTestCase):
def test_simple(self):
indices = [[0, 2]]
values = [1]
dense_shape = [5, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertIsInstance(sp.shape, tensor_shape.TensorShape)
self.assertIsInstance(sp.dense_shape, ops.Tensor)
self.assertEqual(sp.shape.as_list(), [5, 5])
def test_unknown_shape(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 2]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(
dtype=dtypes.int64, shape=[2,]))
def test_partial_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 2]]
values = [1]
y = ops.convert_to_tensor(3, dtype=dtypes.int64)
dense_shape = [x, y]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 3])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[]))
def test_neg_shape(self):
indices = [[0, 2]]
values = [1]
dense_shape = [-1, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 5])
def test_unknown_tensor_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 0]]
values = [1]
dense_shape = array_ops.shape(x)
dense_shape = math_ops.cast(dense_shape, dtypes.int64)
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None, None]))
def test_unknown_rank(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 0]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.rank, None)
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None]))
@test_util.run_all_in_graph_and_eager_modes
class SparseTensorSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.shape.rank, None)
self.assertEqual(spec1.dtype, dtypes.float32)
spec2 = sparse_tensor.SparseTensorSpec([None, None], dtypes.string)
self.assertEqual(spec2.shape.as_list(), [None, None])
self.assertEqual(spec2.dtype, dtypes.string)
def testValueType(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.value_type, sparse_tensor.SparseTensor)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(),
(tensor_shape.TensorShape(None), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(dtype=dtypes.int32),
(tensor_shape.TensorShape(None), dtypes.int32)),
]) # pyformat: disable
def testSerialize(self, st_spec, expected):
serialization = st_spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(dtype=dtypes.string), [
tensor_spec.TensorSpec([None, None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64)
]),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]), [
tensor_spec.TensorSpec([None, 3], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.float32),
tensor_spec.TensorSpec([3], dtypes.int64)
]),
])
def testComponentSpecs(self, st_spec, expected):
self.assertEqual(st_spec._component_specs, expected)
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromComponents(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
actual_components = st_spec._to_components(st)
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = st_spec._from_components(actual_components)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("SparseTensorValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([[0], [8]])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec = sparse_tensor.SparseTensorSpec()
st = spec._from_components([indices, values, dense_shape])
self.assertIsInstance(st, sparse_tensor.SparseTensorValue)
self.assertAllEqual(st.indices, indices)
self.assertAllEqual(st.values, values)
self.assertAllEqual(st.dense_shape, dense_shape)
@parameterized.parameters([
sparse_tensor.SparseTensorSpec(dtype=dtypes.string),
sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
])
def testFlatTensorSpecs(self, st_spec):
self.assertEqual(st_spec._flat_tensor_specs,
[tensor_spec.TensorSpec(None, dtypes.variant)])
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromTensorList(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
tensor_list = st_spec._to_tensor_list(st)
st_reconstructed = st_spec._from_tensor_list(tensor_list)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([2, None], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([4, None], dtypes.float32), None,
sparse_tensor.SparseTensorSpec([None, 4, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([2], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32)),
])
def testBatch(self, spec, batch_size, expected):
self.assertEqual(spec._batch(batch_size), expected)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([32, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([None, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32),
sparse_tensor.SparseTensorSpec([2], dtypes.float32)),
])
def testUnbatch(self, spec, expected):
self.assertEqual(spec._unbatch(), expected)
if __name__ == "__main__":
googletest.main()
|
"""
Empatica E4 is a wearable device that offers real-time physiological data
acquisition such as blood volume pulse, electrodermal activity (EDA), heart
rate, interbeat intervals, 3-axis acceleration and skin temperature.
"""
import os
import random
import numpy as np
import pandas as pd
class EmpaticaReader:
"""
Read, timeshift and write data generated by Empatica E4.
Attributes
----------
start_times : dict
Contain the timestamp of the first measurement for all
measured signals (BVP, ACC, etc.).
sample_freqs : dict ]
Contain the sampling frequencies of all measured signals
in Hz.
IBI : pandas.DataFrame
Contain inter-beat interval data. The column
"seconds_since_start" is the time in seconds between the start of
measurements and the column "IBI" is the duration in seconds between
consecutive beats.
ACC : pandas.DataFrame
Contain the data measured with the onboard MEMS type
3-axis accelerometer, indexed by time of measurement.
BVP : pandas.DataFrame
Contain blood volume pulse data, indexed by time of
measurement.
EDA : pandas.DataFrame
Contain data captured from the electrodermal activity
sensor, indexed by time of measurement.
HR : pandas.DataFrame
Contain heart rate data, indexed by time of
measurement.
TEMP : pandas.DataFrame
Contain temperature data, indexed by time of
measurement.
data : pandas.DataFrame
Joined dataframe of the ACC, BVP, EDA, HR and TEMP
dataframes (see above). May contain NaN values because sampling
frequencies differ across signals.
"""
def __init__(self, path):
"""
Parse the csv files located in the specified directory into dataframes.
Parameters
----------
path : str
Path of the directory that contains the individual signal csv
files. The files must be named ACC.csv, BVP.csv, EDA.csv, HR.csv,
IBI.csv and TEMP.csv. If present, the file tags.csv is also read.
"""
self.start_times = {}
self.sample_freqs = {}
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if files is None:
print('Empty directory. Nothing to read.')
return None
self.ACC = self._read_signal(os.path.join(path, 'ACC.csv'), 'ACC', col_names=['X', 'Y', 'Z'])
self.BVP = self._read_signal(os.path.join(path, 'BVP.csv'), 'BVP')
self.EDA = self._read_signal(os.path.join(path, 'EDA.csv'), 'EDA')
self.HR = self._read_signal(os.path.join(path, 'HR.csv'), 'HR')
self.TEMP = self._read_signal(os.path.join(path, 'TEMP.csv'), 'TEMP')
self.IBI = self._read_ibi(os.path.join(path, 'IBI.csv'))
self.tags = self._read_tags(os.path.join(path, 'tags.csv'))
self.data = self._get_joined_dataframe()
def write(self, dir_path):
"""
Write the signal dataframes back to individual csv files formatted the
same way as they were read.
Parameters
----------
path : str
Path of the directory in which the csv files are created.
If the directory exists, the csv files are written using writing mode 'w'
ignoring other files in the directory.
If the directory doe not exist, it will be created.
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
if self.ACC is not None:
self._write_signal(os.path.join(dir_path, 'ACC.csv'), self.ACC, 'ACC')
if self.BVP is not None:
self._write_signal(os.path.join(dir_path, 'BVP.csv'), self.BVP, 'BVP')
if self.EDA is not None:
self._write_signal(os.path.join(dir_path, 'EDA.csv'), self.EDA, 'EDA')
if self.HR is not None:
self._write_signal(os.path.join(dir_path, 'HR.csv'), self.HR, 'HR')
if self.TEMP is not None:
self._write_signal(os.path.join(dir_path, 'TEMP.csv'), self.TEMP, 'TEMP')
if self.IBI is not None:
self._write_ibi(os.path.join(dir_path, 'IBI.csv'))
if self.tags is not None:
self._write_tags(os.path.join(dir_path, 'tags.csv'))
def _read_signal(self, path, signal_name, col_names=None):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time_str = file.readline().split(', ')[0]
self.start_times[signal_name] = pd.Timestamp(float(start_time_str), unit='s')
sample_freq_str = file.readline().split(', ')[0]
self.sample_freqs[signal_name] = float(sample_freq_str)
col_names = [signal_name] if col_names is None else col_names
dataframe = pd.read_csv(file, header=None, names=col_names)
dataframe.index = pd.date_range(
start=self.start_times[signal_name],
freq=f"{1 / self.sample_freqs[signal_name]}S",
periods=len(dataframe))
if col_names is not None:
dataframe.rename(dict(enumerate(col_names)), inplace=True)
else:
dataframe.rename({0: signal_name}, inplace=True)
return dataframe.squeeze()
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_signal(self, path, dataframe, signal_name):
n_cols = len(dataframe.columns) if isinstance(dataframe, pd.DataFrame) else 1
meta = np.array([[self.start_times[signal_name].value / 1e9] * n_cols,
[self.sample_freqs[signal_name]] * n_cols])
with open(path, 'w') as file:
np.savetxt(file, meta, fmt='%s', delimiter=', ', newline='\n')
dataframe.to_csv(file, index=None, header=None, line_terminator='\n')
def _read_ibi(self, path):
try:
if os.stat(path).st_size > 0:
with open(path, 'r') as file:
start_time = pd.Timestamp(float(file.readline().split(',')[0]), unit='s')
self.start_times['IBI'] = start_time
df = pd.read_csv(file, names=['time', 'IBI'], header=None)
df['time'] = pd.to_timedelta(df['time'], unit='s')
df['time'] = start_time + df['time']
return df.set_index('time')
else:
print(f"Not reading signal because the file {path} is empty.")
except OSError:
print(f"Not reading signal because the file {path} does not exist.")
return None
def _write_ibi(self, path):
with open(path, 'w') as file:
file.write(f"{self.start_times['IBI'].value // 1e9}, IBI\n")
write_df = self.IBI.copy()
write_df.index = (write_df.index - self.start_times['IBI']).values.astype(int) / 1e9
write_df.to_csv(file, header=None, line_terminator='\n')
def _read_tags(self, path):
try:
if os.stat(path).st_size > 0:
return pd.read_csv(path, header=None,
parse_dates=[0],
date_parser=lambda x : pd.to_datetime(x, unit='s'),
names=['tags'],
squeeze=True)
else:
print(f"Not reading tags because the file {path} is empty.")
except OSError:
print(f"Not reading tags because the file {path} does not exist.")
return None
def _write_tags(self, path):
if self.tags is not None:
tags_write_series = self.tags.map(lambda x: x.value / 1e9)
tags_write_series.to_csv(path, header=None, index=None, line_terminator='\n')
def timeshift(self, shift='random'):
"""
Timeshift all time related columns as well as the starting_times dict.
Parameters
----------
shift : None/'random', pd.Timestamp or pd.Timedelta
If shift is not specified, shifts the data by a random time interval
between one month and two years to the past.
If shift is a timdelta, adds that timedelta to all time-related attributes.
If shift is a timestamp, shifts the data such that the earliest entry
has that timestamp. The remaining values will mantain the same
time difference to the first entry.
"""
if shift == 'random':
one_month = pd.Timedelta('- 30 days').value
two_years = pd.Timedelta('- 730 days').value
random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))
self.timeshift(random_timedelta)
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP, self.data]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if isinstance(shift, pd.Timestamp):
min_start_time = min(self.start_times.values())
new_start_times = dict()
for signal_name, start_time in self.start_times.items():
new_start_times[signal_name] = shift + (start_time - min_start_time)
self.start_times = new_start_times
if self.tags is not None:
timedeltas = self.tags - self.tags.min()
self.tags = shift + timedeltas
for dataframe in dataframes:
timedeltas = dataframe.index - dataframe.index.min()
dataframe.index = shift + timedeltas
if isinstance(shift, pd.Timedelta):
for signal_name in self.start_times:
self.start_times[signal_name] += shift
if self.tags is not None:
self.tags += shift
for dataframe in dataframes:
dataframe.index += shift
def _get_joined_dataframe(self):
dataframes = []
variables = [self.ACC, self.BVP, self.EDA,
self.HR, self.TEMP]
for variable in variables:
if variable is not None:
dataframes.append(variable)
if not dataframes:
print('No joined dataframe possible due to lack of data.')
return None
joined_idx = pd.concat([pd.Series(dataframe.index) for dataframe in dataframes])
joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())
joined_dataframe = pd.DataFrame(index=joined_idx)
if self.ACC is not None:
joined_dataframe.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']
joined_dataframe.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']
joined_dataframe.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']
if self.BVP is not None:
joined_dataframe.loc[self.BVP.index, 'BVP'] = self.BVP
if self.EDA is not None:
joined_dataframe.loc[self.EDA.index, 'EDA'] = self.EDA
if self.HR is not None:
joined_dataframe.loc[self.HR.index, 'HR'] = self.HR
if self.TEMP is not None:
joined_dataframe.loc[self.TEMP.index, 'TEMP'] = self.TEMP
return joined_dataframe
|
import pandas as pd
from typing import Tuple
from .data_processing import UNKNOWN_ID_REGEX
from .utils import calcDeltas
__all__ = [
"filterKnownOrbits",
"filterOrbits"
]
def filterKnownOrbits(
orbits: pd.DataFrame,
orbit_observations: pd.DataFrame,
associations: pd.DataFrame,
min_obs: int = 5,
) -> Tuple[pd.DataFrame]:
"""
Remove all observations of unknown objects, keeping only observations of objects with
a known association. If any orbits have fewer than min_obs observations after removing
unknown observations then remove those orbits as well.
This function will also set the provisional and permanent designation columns as required
by the ADES file format.
Parameters
----------
orbits : `~pandas.DataFrame`
DataFrame of orbits.
orbit_observations : `~pandas.DataFrame`
Dataframe of orbit observations with at least one column with the orbit ID ('orbit_id') and
one column with the 'obs_id'
associations : `~pandas.DataFrame`
DataFrame of known associations, with one column of containing the observation ID ('obs_id')
and another column containing the association ('obj_id'). Any unknown objects should have
been assigned an unknown ID. See preprocessObservations.
min_obs : int
The minimum number of observations for an object to be considered as recovered.
Returns
-------
known_orbits : `~pandas.DataFrame`
Orbits of previously known objects.
known_orbit_observations : `~pandas.DataFrame`
Observations of previously known objects, the constituent observations
to which the orbits were fit.
"""
# Merge associations with orbit observations
labeled_observations = orbit_observations.merge(associations[["obs_id", "obj_id"]], on="obs_id", how="left")
# Keep only observations of known objects
labeled_observations = labeled_observations[~labeled_observations["obj_id"].str.contains(UNKNOWN_ID_REGEX, regex=True)]
# Keep only known objects with at least min_obs observations
occurences = labeled_observations["orbit_id"].value_counts()
orbit_ids = occurences.index.values[occurences.values >= min_obs]
# Filter input orbits
orbits_mask = orbits["orbit_id"].isin(orbit_ids)
orbit_observations_mask = labeled_observations["orbit_id"].isin(orbit_ids)
known_orbits = orbits[orbits_mask].copy()
known_orbit_observations = labeled_observations[orbit_observations_mask].copy()
# Split into permanent and provisional designations
if len(known_orbit_observations) > 0:
known_orbit_observations.loc[:, "permID"] = ""
known_orbit_observations.loc[:, "provID"] = ""
else:
known_orbit_observations["permID"] = ""
known_orbit_observations["provID"] = ""
# Process permanent IDs first
# TODO : add an equivalent for Comets
perm_ids = known_orbit_observations["obj_id"].str.isnumeric()
known_orbit_observations.loc[perm_ids, "permID"] = known_orbit_observations[perm_ids]["obj_id"].values
# Identify provisional IDs next
prov_ids = (
(~known_orbit_observations["obj_id"].str.isnumeric())
& (~known_orbit_observations["obj_id"].str.contains(UNKNOWN_ID_REGEX, regex=True))
)
known_orbit_observations.loc[prov_ids, "provID"] = known_orbit_observations[prov_ids]["obj_id"].values
# Reorder the columns to put the labels toward the front
cols = known_orbit_observations.columns
first = ["orbit_id", "permID", "provID", "obj_id", "obs_id"]
cols_ordered = first + cols[~cols.isin(first)].tolist()
known_orbit_observations = known_orbit_observations[cols_ordered]
return known_orbits, known_orbit_observations
def filterOrbits(
orbits: pd.DataFrame,
orbit_observations: pd.DataFrame,
associations: pd.DataFrame,
min_obs: int = 5,
min_time_separation: float = 30.,
delta_cols: list = ["mjd_utc", "mag", "RA_deg", "Dec_deg"]
) -> Tuple[Tuple[pd.DataFrame]]:
"""
Filter orbits into orbits of previously known objects and potential discovery candidates.
Parameters
----------
orbits : `~pandas.DataFrame`
DataFrame of orbits.
orbit_observations : `~pandas.DataFrame`
Dataframe of orbit observations with at least one column with the orbit ID ('orbit_id') and
one column with the 'obs_id'
associations : `~pandas.DataFrame`
DataFrame of known associations, with one column of containing the observation ID ('obs_id')
and another column containing the association ('obj_id'). Any unknown objects should have
been assigned an unknown ID. See preprocessObservations.
min_obs : int
The minimum number of observations for an object to be considered as recovered.
min_time_separation : int
The minimum time two observations should be separated in minutes. If any observations
for a single orbit are seperated by less than this amount then only the first observation is kept.
This is useful to prevent stationary sources from biasing orbit fits, although may decrease overall
completeness.
delta_cols : list[str]
Columns for which to calculate deltas (must include mjd_utc).
Returns
-------
discovery_candidates : (`~pandas.DataFrame`, `~pandas.DataFrame`)
DataFrame of dicovery candidate orbits and discovery candidate observations.
known_orbits : (`~pandas.DataFrame`, `~pandas.DataFrame`)
DataFrame of known orbits and known orbit observations.
"""
# Calculate deltas of a variety of quantities (this returns the orbit_observations dataframe
# with the delta columns added)
deltas = calcDeltas(
orbit_observations,
groupby_cols=["orbit_id", "night_id"],
delta_cols=delta_cols
)
# Mark all observations within min_time of another as filtered
deltas.loc[:, "filtered"] = 1
deltas.loc[(deltas["dmjd_utc"].isna()) | (deltas["dmjd_utc"] >= min_time_separation / 60 / 24), "filtered"] = 0
orbits_ = orbits.copy()
orbit_observations_ = deltas.copy()
# Identify known orbits (also remove any observations of unknown objects from these orbits)
recovered_known_orbits, recovered_known_orbit_observations = filterKnownOrbits(
orbits_,
orbit_observations_,
associations,
min_obs=min_obs
)
# Remove the known orbits from the pool of orbits
# The remaining orbits are potential candidates
known_orbit_ids = recovered_known_orbits["orbit_id"].values
candidate_orbits = orbits_[~orbits_["orbit_id"].isin(known_orbit_ids)]
candidate_orbit_observations = orbit_observations_[~orbit_observations_["orbit_id"].isin(known_orbit_ids)]
# Remove any observations of the candidate discoveries that are potentially
# too close in time to eachother (removes stationary source that could bias results)
# Any orbits that now have fewer than min_obs observations are also removed
candidate_orbit_observations = candidate_orbit_observations[candidate_orbit_observations["filtered"] == 0]
occurences = candidate_orbit_observations["orbit_id"].value_counts()
orbit_ids = occurences.index.values[occurences.values >= min_obs]
candidate_orbits = orbits[orbits["orbit_id"].isin(orbit_ids)]
candidate_orbit_observations = candidate_orbit_observations[candidate_orbit_observations["orbit_id"].isin(orbit_ids)]
# Add a trkSub column to the discovery candidates
trk_subs = [f"t{i[0:4]}{i[-3:]}" for i in candidate_orbit_observations["orbit_id"].values]
candidate_orbit_observations.insert(1, "trkSub", trk_subs)
discovery_candidates = (candidate_orbits, candidate_orbit_observations)
known_orbits = (recovered_known_orbits, recovered_known_orbit_observations)
return discovery_candidates, known_orbits
|
import Archi
import yaml
def test_model_loading():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
assert 'KeyValueMemory' in model.modules.keys()
assert 'key_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'value_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'read_key_plus_conf' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'CoreLSTM' in model.modules.keys()
assert 'CoreLSTM' in model.stream_handler.placeholders['inputs'].keys()
assert 'hidden' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'cell' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'iteration' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
def test_model_forward():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
import torch
inputs_dict = {
'x':torch.rand(4,3,64,64),
}
output = model(**inputs_dict)
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
output1 = model(**inputs_dict)
assert 'lstm_output' in output['modules']['CoreLSTM']
assert 'processed_input' in output['modules']['Encoder']
assert 'processed_input' in output['modules']['ToGateFCN']
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
assert output1['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() != 0.0
assert len(dict(model.named_parameters())) != 0
for np, p in model.named_parameters():
print(np)
if __name__ == '__main__':
test_model_loading()
test_model_forward()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import signal
from subprocess import PIPE, STDOUT, Popen
from tempfile import TemporaryDirectory, gettempdir
from typing import Dict, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import context_to_airflow_vars
class BashOperator(BaseOperator):
"""
Execute a Bash script, command or set of commands.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BashOperator`
If BaseOperator.do_xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command completes
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed. (templated)
:type bash_command: str
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:type env: dict
:param output_encoding: Output encoding of bash command
:type output_encoding: str
On execution of this operator the task will be up for retry
when exception is raised. However, if a sub-command exits with non-zero
value Airflow will not recognize it as failure unless the whole shell exits
with a failure. The easiest way of achieving this is to prefix the command
with ``set -e;``
Example:
.. code-block:: python
bash_command = "set -e; python3 script.py '{{ next_execution_date }}'"
"""
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
ui_color = '#f0ede4'
@apply_defaults
def __init__(
self,
bash_command: str,
env: Optional[Dict[str, str]] = None,
output_encoding: str = 'utf-8',
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
self.sub_process = None
def execute(self, context):
"""
Execute the bash command in a temporary directory
which will be cleaned afterwards
"""
self.log.info('Tmp dir root location: \n %s', gettempdir())
# Prepare env for child process.
env = self.env
if env is None:
env = os.environ.copy()
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug('Exporting the following env vars:\n%s',
'\n'.join(["{}={}".format(k, v)
for k, v in airflow_context_vars.items()]))
env.update(airflow_context_vars)
with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
def pre_exec():
# Restore default signal disposition and invoke setsid
for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
if hasattr(signal, sig):
signal.signal(getattr(signal, sig), signal.SIG_DFL)
os.setsid()
self.log.info('Running command: %s', self.bash_command)
self.sub_process = Popen( # pylint: disable=subprocess-popen-preexec-fn
['bash', "-c", self.bash_command],
stdout=PIPE,
stderr=STDOUT,
cwd=tmp_dir,
env=env,
preexec_fn=pre_exec)
self.log.info('Output:')
line = ''
for raw_line in iter(self.sub_process.stdout.readline, b''):
line = raw_line.decode(self.output_encoding).rstrip()
self.log.info("%s", line)
self.sub_process.wait()
self.log.info('Command exited with return code %s', self.sub_process.returncode)
if self.sub_process.returncode != 0:
raise AirflowException('Bash command failed. The command returned a non-zero exit code.')
return line
def on_kill(self):
self.log.info('Sending SIGTERM signal to bash process group')
if self.sub_process and hasattr(self.sub_process, 'pid'):
os.killpg(os.getpgid(self.sub_process.pid), signal.SIGTERM)
|
# -*- coding: utf-8 -*-
import urlparse
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase
from tests.factories import AuthUserFactory
from api.base.settings.defaults import API_BASE
class TestUsers(ApiTestCase):
def setUp(self):
super(TestUsers, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
def tearDown(self):
super(TestUsers, self).tearDown()
def test_returns_200(self):
res = self.app.get('/{}users/'.format(API_BASE))
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_find_user_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_two._id, ids)
def test_all_users_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_multiple_in_users(self):
url = "/{}users/?filter[full_name]=fred".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_single_user_in_users(self):
url = "/{}users/?filter[full_name]=my".format(API_BASE)
self.user_one.fullname = 'My Mom'
self.user_one.save()
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_find_no_user_in_users(self):
url = "/{}users/?filter[full_name]=NotMyMom".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_not_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_users_list_takes_profile_image_size_param(self):
size = 42
url = "/{}users/?profile_image_size={}".format(API_BASE, size)
res = self.app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['links']['profile_image']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert_equal(int(query_dict.get('s')[0]), size)
|
from muteria.drivers.testgeneration.testcase_formats.python_unittest.unittest\
import *
|
#!/usr/bin/python
"""
--------------------------------------------------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2016 William Yang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------------------------------------------------
pixelprint.py
LED matrix printer
--------------------------------------------------------------------------------------------------------------
"""
# Raspberry Pi 2 GPIO
import time
import RPi.GPIO as GPIO
# Letters and Numbers
import alphanumeric
# 5x8 LED Matrix row pins
pin_r1 = 5
pin_r2 = 6
pin_r3 = 13
pin_r4 = 19
pin_r5 = 26
# 5x8 LED Matrix col pins
pin_c1 = 21
pin_c2 = 20
pin_c3 = 16
pin_c4 = 12
# Time to wait before next letter
PAUSE_INTERVAL = 300
# Time it takes to scan col
COL_SCAN = 0.0001
# Time it taks to scan row
ROW_SCAN = 0.0008
# Number of cols
NUM_COLS = 8
"""
-------------------------------------------------------
Main LED Matrix class
-------------------------------------------------------
"""
class LEDMatrixControl:
def __init__(self):
"""
---------------------------------------------------------
constructor
---------------------------------------------------------
"""
self.row_ctrl = [pin_r1, pin_r2, pin_r3, pin_r4, pin_r5]
self.col_ctrl = [pin_c1, pin_c2, pin_c3, pin_c4]
GPIO.setmode(GPIO.BCM)
for each in self.row_ctrl:
GPIO.setup(each, GPIO.OUT)
for each in self.col_ctrl:
GPIO.setup(each, GPIO.OUT)
def _decToBinPadded(self, decimal):
"""
---------------------------------------------------------
private method to convert decimal to binary, then pad 0's
---------------------------------------------------------
"""
raw = str(bin(decimal))
part = raw[2:]
final = part.zfill(4)
a = True if final[0] == "1" else False
b = True if final[1] == "1" else False
c = True if final[2] == "1" else False
d = True if final[3] == "1" else False
return [a, b, c, d]
def matrixPrint(self, user_str):
"""
---------------------------------------------------------
main print function.
Use LEDMatrixControlObj.matrixPrint("YOUR TEXT 123.456")
---------------------------------------------------------
"""
pipeline = []
for each in user_str:
print(each)
pipeline.append(alphanumeric.pixelize(each))
self._printPipeline(pipeline, True)
def matrixPrintRepeat(self, user_str):
"""
---------------------------------------------------------
main print function repeating.
Use LEDMatrixControlObj.matrixPrintRepeat("YOUR TEXT 123.456")
---------------------------------------------------------
"""
pipeline = []
for each in user_str:
print(each)
pipeline.append(alphanumeric.pixelize(each))
self._printPipeline(pipeline, False)
def _printPipeline(self, chars, mode):
"""
---------------------------------------------------------
Internal printer pipeline
---------------------------------------------------------
"""
order = 0
count = 0
i = 0
repeat = True
while repeat:
current = chars[order]
for each in self.row_ctrl:
GPIO.output(each, True)
j = 0
if(count == PAUSE_INTERVAL and order < len(chars)):
count = 0
order = order + 1
if(order == len(chars)):
order = 0
if(mode):
repeat = False
count = count + 1
while(j < NUM_COLS):
answer = self._decToBinPadded(j)
for i in range(0, len(self.col_ctrl)):
GPIO.output(self.col_ctrl[i], answer[i])
for i in range(0, len(self.row_ctrl)):
if(i in current[len(current) - j - 1]):
GPIO.output(self.row_ctrl[i], False)
else:
GPIO.output(self.row_ctrl[i], True)
j += 1
time.sleep(COL_SCAN)
time.sleep(ROW_SCAN)
if(i == 4):
i = 0
else:
i += 1
|
import unittest
from pytorch_metric_learning.utils import accuracy_calculator
import numpy as np
class TestCalculateAccuracies(unittest.TestCase):
def test_accuracy_calculator(self):
query_labels = np.array([1, 1, 2, 3, 4])
knn_labels1 = np.array(
[
[0, 1, 1, 2, 2],
[1, 0, 1, 1, 3],
[4, 4, 4, 4, 2],
[3, 1, 3, 1, 3],
[0, 0, 4, 2, 2],
]
)
label_counts1 = {1: 3, 2: 5, 3: 4, 4: 5}
knn_labels2 = knn_labels1 + 5
label_counts2 = {k + 5: v for k, v in label_counts1.items()}
for avg_of_avgs in [False, True]:
for i, (knn_labels, label_counts) in enumerate(
[(knn_labels1, label_counts1), (knn_labels2, label_counts2)]
):
AC = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=avg_of_avgs
)
kwargs = {
"query_labels": query_labels,
"label_counts": label_counts,
"knn_labels": knn_labels,
"not_lone_query_mask": np.ones(5).astype(np.bool)
if i == 0
else np.zeros(5).astype(np.bool),
}
function_dict = AC.get_function_dict()
for ecfss in [False, True]:
if ecfss:
kwargs["knn_labels"] = kwargs["knn_labels"][:, 1:]
kwargs["embeddings_come_from_same_source"] = ecfss
acc = AC._get_accuracy(function_dict, **kwargs)
if i == 1:
self.assertTrue(acc["precision_at_1"] == 0)
self.assertTrue(acc["r_precision"] == 0)
self.assertTrue(acc["mean_average_precision_at_r"] == 0)
self.assertTrue(acc["mean_average_precision"] == 0)
else:
self.assertTrue(
acc["precision_at_1"]
== self.correct_precision_at_1(ecfss, avg_of_avgs)
)
self.assertTrue(
acc["r_precision"]
== self.correct_r_precision(ecfss, avg_of_avgs)
)
self.assertTrue(
acc["mean_average_precision_at_r"]
== self.correct_mean_average_precision_at_r(
ecfss, avg_of_avgs
)
)
self.assertTrue(
acc["mean_average_precision"]
== self.correct_mean_average_precision(ecfss, avg_of_avgs)
)
def correct_precision_at_1(self, embeddings_come_from_same_source, avg_of_avgs):
if not embeddings_come_from_same_source:
if not avg_of_avgs:
return 0.4
else:
return (0.5 + 0 + 1 + 0) / 4
else:
if not avg_of_avgs:
return 1.0 / 5
else:
return (0.5 + 0 + 0 + 0) / 4
def correct_r_precision(self, embeddings_come_from_same_source, avg_of_avgs):
if not embeddings_come_from_same_source:
acc0 = 2.0 / 3
acc1 = 2.0 / 3
acc2 = 1.0 / 5
acc3 = 2.0 / 4
acc4 = 1.0 / 5
else:
acc0 = 1.0 / 1
acc1 = 1.0 / 2
acc2 = 1.0 / 4
acc3 = 1.0 / 3
acc4 = 1.0 / 4
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def correct_mean_average_precision_at_r(
self, embeddings_come_from_same_source, avg_of_avgs
):
if not embeddings_come_from_same_source:
acc0 = (1.0 / 2 + 2.0 / 3) / 3
acc1 = (1 + 2.0 / 3) / 3
acc2 = (1.0 / 5) / 5
acc3 = (1 + 2.0 / 3) / 4
acc4 = (1.0 / 3) / 5
else:
acc0 = 1
acc1 = (1.0 / 2) / 2
acc2 = (1.0 / 4) / 4
acc3 = (1.0 / 2) / 3
acc4 = (1.0 / 2) / 4
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def correct_mean_average_precision(
self, embeddings_come_from_same_source, avg_of_avgs
):
if not embeddings_come_from_same_source:
acc0 = (1.0 / 2 + 2.0 / 3) / 2
acc1 = (1 + 2.0 / 3 + 3.0 / 4) / 3
acc2 = (1.0 / 5) / 1
acc3 = (1 + 2.0 / 3 + 3.0 / 5) / 3
acc4 = (1.0 / 3) / 1
else:
acc0 = 1
acc1 = (1.0 / 2 + 2.0 / 3) / 2
acc2 = 1.0 / 4
acc3 = (1.0 / 2 + 2.0 / 4) / 2
acc4 = 1.0 / 2
if not avg_of_avgs:
return np.mean([acc0, acc1, acc2, acc3, acc4])
else:
return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])
def test_get_label_counts(self):
label_counts, num_k = accuracy_calculator.get_label_counts(
[0, 1, 3, 2, 3, 1, 3, 3, 4, 6, 5, 10, 4, 4, 4, 4, 6, 6, 5]
)
self.assertTrue(
label_counts == {0: 1, 1: 2, 2: 1, 3: 4, 4: 5, 5: 2, 6: 3, 10: 1}
)
self.assertTrue(num_k == 5)
def test_get_lone_query_labels(self):
query_labels = np.array([0, 1, 2, 3, 4, 5, 6])
reference_labels = np.array([0, 0, 0, 1, 2, 2, 3, 4, 5, 6])
reference_label_counts, _ = accuracy_calculator.get_label_counts(
reference_labels
)
lone_query_labels = accuracy_calculator.get_lone_query_labels(
query_labels, reference_labels, reference_label_counts, True
)
self.assertTrue(
np.all(np.unique(lone_query_labels) == np.array([1, 3, 4, 5, 6]))
)
query_labels = np.array([0, 1, 2, 3, 4])
reference_labels = np.array([0, 0, 0, 1, 2, 2, 4, 5, 6])
lone_query_labels = accuracy_calculator.get_lone_query_labels(
query_labels, reference_labels, reference_label_counts, False
)
self.assertTrue(np.all(np.unique(lone_query_labels) == np.array([3])))
class TestCalculateAccuraciesAndFaiss(unittest.TestCase):
def test_accuracy_calculator_and_faiss(self):
AC = accuracy_calculator.AccuracyCalculator(exclude=("NMI", "AMI"))
query = np.arange(10)[:, None].astype(np.float32)
reference = np.arange(10)[:, None].astype(np.float32)
query_labels = np.arange(10).astype(np.int)
reference_labels = np.arange(10).astype(np.int)
acc = AC.get_accuracy(query, reference, query_labels, reference_labels, False)
self.assertTrue(acc["precision_at_1"] == 1)
self.assertTrue(acc["r_precision"] == 1)
self.assertTrue(acc["mean_average_precision_at_r"] == 1)
reference = (np.arange(20) / 2.0)[:, None].astype(np.float32)
reference_labels = np.zeros(20).astype(np.int)
reference_labels[::2] = query_labels
reference_labels[1::2] = np.ones(10).astype(np.int)
acc = AC.get_accuracy(query, reference, query_labels, reference_labels, True)
self.assertTrue(acc["precision_at_1"] == 1)
self.assertTrue(acc["r_precision"] == 0.5)
self.assertTrue(
acc["mean_average_precision_at_r"]
== (1 + 2.0 / 2 + 3.0 / 5 + 4.0 / 7 + 5.0 / 9) / 10
)
def test_accuracy_calculator_and_faiss_avg_of_avgs(self):
AC_global_average = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=False
)
AC_per_class_average = accuracy_calculator.AccuracyCalculator(
exclude=("NMI", "AMI"), avg_of_avgs=True
)
query = np.arange(10)[:, None].astype(np.float32)
reference = np.arange(10)[:, None].astype(np.float32)
query[-1] = 100
reference[0] = -100
query_labels = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
reference_labels = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
acc = AC_global_average.get_accuracy(
query, reference, query_labels, reference_labels, False
)
self.assertTrue(acc["precision_at_1"] == 0.9)
self.assertTrue(acc["r_precision"] == 0.9)
self.assertTrue(acc["mean_average_precision_at_r"] == 0.9)
acc = AC_per_class_average.get_accuracy(
query, reference, query_labels, reference_labels, False
)
self.assertTrue(acc["precision_at_1"] == 0.5)
self.assertTrue(acc["r_precision"] == 0.5)
self.assertTrue(acc["mean_average_precision_at_r"] == 0.5)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
# TODO(b/159343581): Properly support CompositeTensor in all functions in this
# file.
import functools
import operator
import sys
import six
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import composite_tensor_gradient
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Note that we need to lazy load the following two modules to avoid creating
# circular dependencies.
# TODO(b/119775953): fix the circular dependencies.
pfor_ops = LazyLoader(
"pfor_ops", globals(),
"tensorflow.python.ops.parallel_for.control_flow_ops")
function = LazyLoader("function", globals(),
"tensorflow.python.eager.function")
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
context.ensure_initialized()
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tfe.TFE_OpNameGetAttrType(h, op_type, attr_name)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
# pybind11 enums do not return the raw value like SWIG enums do. They are
# useful when comparing amongst each other but not direct integers as we are
# doing in most tests.
# https://pybind11.readthedocs.io/en/stable/classes.html#enumerations-and-internal-types
# TODO(amitpatankar): After all SWIG transitions, convert the enum comparisons
# from integer value to class.
if attr_type == int(pywrap_tfe.TF_ATTR_TYPE):
return dtypes.as_dtype(value)
if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]:
return [dtypes.as_dtype(v) for v in value]
if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE):
return tensor_shape.as_shape(value).as_proto()
if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
if isinstance(value, str):
return value.encode()
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
self.skip_input_indices = skip_input_indices
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads, skip_input_indices, forward_pass_name_scope):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
skip_input_indices: a tuple that is passed to the gradient function,
indicating which inputs to skip calculating the gradient for
forward_pass_name_scope: the namescope of the op in the forward pass.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
# This does not work with v1 TensorArrays.
if ops.executing_eagerly_outside_functions(
) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):
gradient_name_scope = "gradient_tape/"
if forward_pass_name_scope:
gradient_name_scope += forward_pass_name_scope + "/"
with ops.name_scope(gradient_name_scope):
return grad_fn(mock_op, *out_grads)
else:
return grad_fn(mock_op, *out_grads)
pywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function)
def _must_record_gradient():
return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()
@tf_export("__internal__.record_gradient", v1=[])
def record_gradient(op_name, inputs, attrs, outputs):
"""Explicitly record the gradient for a given op.
Args:
op_name: The op name as listed in the `OpDef` for the op.
inputs: A list of tensor inputs to the op.
attrs: The op attributes as a flattened list of alternating attribute names
and attribute values.
outputs: A list of tensor outputs from the op.
"""
pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,
ops.get_name_scope())
execute.must_record_gradient = _must_record_gradient
execute.record_gradient = record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all trainable TFE
variables accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
for s in sources:
if getattr(s, "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors yet."
)
grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all trainable TFE variables
accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getfullargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
if args[0] == "self":
return range(len(args) - 1)
else:
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Note that only tensors with real or complex dtypes are differentiable.
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of `f` with respect to all of `params`. The function takes an extra optional
keyword argument `dy`. Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and its vjp w.r.t.
params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function returns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(arg) if i in parameter_positions else arg
for i, arg in enumerate(args)
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
if getattr(args[i], "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors"
"yet.")
sources.append(args[i])
tape.watch(this_tape, args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
this_tape, nest.flatten(result), sources, output_gradients=dy)
return result, vjp
return decorated
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, indexed_slices.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, indexed_slices.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return indexed_slices.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def aggregate_indexed_slices_gradients(grads):
"""Aggregates gradients containing `IndexedSlices`s."""
if len(grads) < 1:
return None
if len(grads) == 1:
return grads[0]
grads = [g for g in grads if g is not None]
# If any gradient is a `Tensor`, sum them up and return a dense tensor
# object.
if any(isinstance(g, ops.Tensor) for g in grads):
return math_ops.add_n(grads)
# The following `_as_indexed_slices_list` casts ids of IndexedSlices into
# int64. It is to make sure the inputs of `concat` all have same the data
# type.
grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access
grads = [flatten_nested_indexed_slices(x) for x in grads]
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = indexed_slices.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all(isinstance(g, ops.Tensor) for g in gradients):
return gen_math_ops.add_n(gradients)
else:
assert all(
isinstance(g, (ops.Tensor, indexed_slices.IndexedSlices))
for g in gradients)
return aggregate_indexed_slices_gradients(gradients)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
shape_tuple = grad._shape_tuple() # pylint: disable=protected-access
elif isinstance(grad, indexed_slices.IndexedSlices):
shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access
else:
raise ValueError("`grad` not a Tensor or IndexedSlices.")
if shape_tuple is None or None in shape_tuple:
return 0
return functools.reduce(operator.mul, shape_tuple, 1)
def _fast_fill(value, shape, dtype):
return array_ops.fill(
constant_op.constant(shape, dtype=dtypes.int32),
constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Helper to return (possibly cached) zero tensors in eager mode."""
# Note: variants will use _zeros_like
if dtype == dtypes.string or dtype == dtypes.resource:
return None
ctx = context.context()
if not ctx.executing_eagerly():
return array_ops.zeros(shape, dtype)
device = ctx.device_name
if tensor_util.is_tf_type(shape):
shape_key = shape.ref()
else:
shape_key = shape
cache_key = shape_key, dtype, device
cached = ctx.zeros_cache().get(cache_key)
if cached is None:
if dtypes.as_dtype(dtype).is_bool:
value = False
else:
value = 0
cached = _fast_fill(value, shape, dtype)
ctx.zeros_cache().put(cache_key, cached)
return cached
def _ones(shape, dtype):
as_dtype = dtypes.as_dtype(dtype)
if as_dtype == dtypes.string:
return None
if not context.executing_eagerly():
return array_ops.ones(shape, dtype)
if as_dtype.is_bool:
value = True
else:
value = 1
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(value, dtype=dtype)
return _fast_fill(value, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
zeros_fn=_zeros,
ones_fn=_ones,
zeros_like_fn=default_gradient.zeros_like,
ones_like_fn=default_gradient.ones_like,
graph_shape_fn=gen_array_ops.shape)
pywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace)
def _handle_or_self(x):
"""Unwrap resource variable/ndarray to return tensors."""
if resource_variable_ops.is_resource_variable(x):
return x.handle
return x
@tf_export("GradientTape", "autodiff.GradientTape", v1=["GradientTape"])
class GradientTape(object):
"""Record operations for automatic differentiation.
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,
where `trainable=True` is default in both cases) are automatically watched.
Tensors can be manually watched by invoking the `watch` method on this context
manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
>>> x = tf.constant(3.0)
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = x * x
>>> dy_dx = g.gradient(y, x)
>>> print(dy_dx)
tf.Tensor(6.0, shape=(), dtype=float32)
GradientTapes can be nested to compute higher-order derivatives. For example,
>>> x = tf.constant(5.0)
>>> with tf.GradientTape() as g:
... g.watch(x)
... with tf.GradientTape() as gg:
... gg.watch(x)
... y = x * x
... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x
>>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2
>>> print(dy_dx)
tf.Tensor(10.0, shape=(), dtype=float32)
>>> print(d2y_dx2)
tf.Tensor(2.0, shape=(), dtype=float32)
By default, the resources held by a GradientTape are released as soon as
GradientTape.gradient() method is called. To compute multiple gradients over
the same computation, create a persistent gradient tape. This allows multiple
calls to the gradient() method as resources are released when the tape object
is garbage collected. For example:
>>> x = tf.constant(3.0)
>>> with tf.GradientTape(persistent=True) as g:
... g.watch(x)
... y = x * x
... z = y * y
>>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3)
>>> print(dz_dx)
tf.Tensor(108.0, shape=(), dtype=float32)
>>> dy_dx = g.gradient(y, x)
>>> print(dy_dx)
tf.Tensor(6.0, shape=(), dtype=float32)
By default GradientTape will automatically watch any trainable variables that
are accessed inside the context. If you want fine grained control over which
variables are watched you can disable automatic tracking by passing
`watch_accessed_variables=False` to the tape constructor:
>>> x = tf.Variable(2.0)
>>> w = tf.Variable(5.0)
>>> with tf.GradientTape(
... watch_accessed_variables=False, persistent=True) as tape:
... tape.watch(x)
... y = x ** 2 # Gradients will be available for `x`.
... z = w ** 3 # No gradients will be available as `w` isn't being watched.
>>> dy_dx = tape.gradient(y, x)
>>> print(dy_dx)
tf.Tensor(4.0, shape=(), dtype=float32)
>>> # No gradients will be available as `w` isn't being watched.
>>> dz_dw = tape.gradient(z, w)
>>> print(dz_dw)
None
Note that when using models you should ensure that your variables exist when
using `watch_accessed_variables=False`. Otherwise it's quite easy to make your
first iteration not have any gradients:
```python
a = tf.keras.layers.Dense(32)
b = tf.keras.layers.Dense(32)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(a.variables) # Since `a.build` has not been called at this point
# `a.variables` will return an empty list and the
# tape will not be watching anything.
result = b(a(inputs))
tape.gradient(result, a.variables) # The result of this computation will be
# a list of `None`s since a's variables
# are not being watched.
```
Note that only tensors with real or complex dtypes are differentiable.
"""
def __init__(self, persistent=False, watch_accessed_variables=True):
"""Creates a new GradientTape.
Args:
persistent: Boolean controlling whether a persistent gradient tape
is created. False by default, which means at most one call can
be made to the gradient() method on this object.
watch_accessed_variables: Boolean controlling whether the tape will
automatically `watch` any (trainable) variables accessed while the tape
is active. Defaults to True meaning gradients can be requested from any
result computed in the tape derived from reading a trainable `Variable`.
If False users must explicitly `watch` any `Variable`s they want to
request gradients from.
"""
self._tape = None
self._persistent = persistent
self._watch_accessed_variables = watch_accessed_variables
self._watched_variables = ()
self._recording = False
def __enter__(self):
"""Enters a context inside which operations are recorded on this tape."""
self._push_tape()
return self
def __exit__(self, typ, value, traceback):
"""Exits the recording context, no further operations are traced."""
if self._recording:
self._pop_tape()
def _push_tape(self):
"""Pushes a new tape onto the tape stack."""
if self._recording:
raise ValueError("Tape is still recording, This can happen if you try to "
"re-enter an already-active tape.")
if self._tape is None:
self._tape = tape.push_new_tape(
persistent=self._persistent,
watch_accessed_variables=self._watch_accessed_variables)
else:
tape.push_tape(self._tape)
self._recording = True
def _pop_tape(self):
if not self._recording:
raise ValueError("Tape is not recording.")
tape.pop_tape(self._tape)
self._recording = False
@tf_contextlib.contextmanager
def _ensure_recording(self):
"""Ensures that this tape is recording."""
if not self._recording:
try:
self._push_tape()
yield
finally:
self._pop_tape()
else:
yield
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
Raises:
ValueError: if it encounters something that is not a tensor.
"""
for t in nest.flatten(tensor, expand_composites=True):
if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):
raise ValueError("Passed in object of type {}, not tf.Tensor".format(
type(t)))
if not backprop_util.IsTrainable(t):
logging.log_first_n(
logging.WARN, "The dtype of the watched tensor must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
if hasattr(t, "handle"):
# There are many variable-like objects, all of them currently have
# `handle` attribute that points to a tensor. If this changes, internals
# of watch_variable need to change as well.
tape.watch_variable(self._tape, t)
else:
tape.watch(self._tape, t)
@tf_contextlib.contextmanager
def stop_recording(self):
"""Temporarily stops recording operations on this tape.
Operations executed while this context manager is active will not be
recorded on the tape. This is useful for reducing the memory used by tracing
all computations.
For example:
>>> x = tf.constant(4.0)
>>> with tf.GradientTape() as tape:
... with tape.stop_recording():
... y = x ** 2
>>> dy_dx = tape.gradient(y, x)
>>> print(dy_dx)
None
Yields:
None
Raises:
RuntimeError: if the tape is not currently recording.
"""
if self._tape is None:
raise RuntimeError(
"Trying to stop recording a tape which is not recording.")
self._pop_tape()
try:
yield
finally:
self._push_tape()
def reset(self):
"""Clears all information stored in this tape.
Equivalent to exiting and reentering the tape context manager with a new
tape. For example, the two following code blocks are equivalent:
```
with tf.GradientTape() as t:
loss = loss_fn()
with tf.GradientTape() as t:
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
# The following is equivalent to the above
with tf.GradientTape() as t:
loss = loss_fn()
t.reset()
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
```
This is useful if you don't want to exit the context manager for the tape,
or can't because the desired reset point is inside a control flow construct:
```
with tf.GradientTape() as t:
loss = ...
if loss > k:
t.reset()
```
"""
self._pop_tape()
self._tape = None
self._push_tape()
def watched_variables(self):
"""Returns variables watched by this tape in order of construction."""
if self._tape is not None:
self._watched_variables = self._tape.watched_variables()
return self._watched_variables
def gradient(self,
target,
sources,
output_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Computes the gradient using operations recorded in context of this tape.
Note: Unless you set `persistent=True` a GradientTape can only be used to
compute one set of gradients (or jacobians).
In addition to Tensors, gradient also supports RaggedTensors. For example,
>>> x = tf.ragged.constant([[1.0, 2.0], [3.0]])
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = x * x
>>> g.gradient(y, x)
<tf.RaggedTensor [[2.0, 4.0], [6.0]]>
Args:
target: a list or nested structure of Tensors or Variables or
CompositeTensors to be differentiated.
sources: a list or nested structure of Tensors or Variables or
CompositeTensors. `target` will be differentiated against elements in
`sources`.
output_gradients: a list of gradients, one for each differentiable
element of target. Defaults to None.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None, or
CompositeTensor), one for each element in `sources`. Returned structure
is the same as the structure of `sources`.
Raises:
RuntimeError: If called on a used, non-persistent tape.
RuntimeError: If called inside the context of the tape.
TypeError: If the target is a None object.
ValueError: If the target is a variable or if unconnected gradients is
called with an unknown value.
"""
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to "
"compute one set of gradients (or jacobians)")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(
logging.WARN, "Calling GradientTape.gradient on a persistent "
"tape inside its context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derivatives.", 1)
if target is None:
raise TypeError("Argument `target` should be a list or nested structure"
" of Tensors, Variables or CompositeTensors to be "
"differentiated, but received None.")
flat_targets = []
for t in nest.flatten(target):
if not backprop_util.IsTrainable(t):
logging.vlog(
logging.WARN, "The dtype of the target tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if resource_variable_ops.is_resource_variable(t):
with self:
t = ops.convert_to_tensor(t)
flat_targets.append(t)
flat_targets = composite_tensor_gradient.get_flat_tensors_for_gradients(
flat_targets)
flat_sources = nest.flatten(sources)
for t in flat_sources:
if not backprop_util.IsTrainable(t):
logging.vlog(
logging.WARN, "The dtype of the source tensor must be "
"floating (e.g. tf.float32) when calling GradientTape.gradient, "
"got %r", t.dtype)
if getattr(t, "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors yet."
)
flat_sources_raw = flat_sources
flat_sources = composite_tensor_gradient.get_flat_tensors_for_gradients(
flat_sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = nest.flatten(output_gradients)
output_gradients = (
composite_tensor_gradient.get_flat_tensors_for_gradients(
output_gradients))
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in output_gradients]
flat_grad = imperative_grad.imperative_grad(
self._tape,
flat_targets,
flat_sources,
output_gradients=output_gradients,
sources_raw=flat_sources_raw,
unconnected_gradients=unconnected_gradients)
if not self._persistent:
# Keep track of watched variables before setting tape to None
self._watched_variables = self._tape.watched_variables()
self._tape = None
flat_grad = composite_tensor_gradient.replace_flat_tensors_for_gradients(
flat_sources_raw, flat_grad)
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
def jacobian(self,
target,
sources,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
"""Computes the jacobian using operations recorded in context of this tape.
Note: Unless you set `persistent=True` a GradientTape can only be used to
compute one set of gradients (or jacobians).
Note: By default the jacobian implementation uses parallel for (pfor), which
creates a tf.function under the hood for each jacobian call. For better
performance, and to avoid recompilation and vectorization rewrites on each
call, enclose GradientTape code in @tf.function.
See[wikipedia
article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)
for the definition of a Jacobian.
Example usage:
```python
with tf.GradientTape() as g:
x = tf.constant([1.0, 2.0])
g.watch(x)
y = x * x
jacobian = g.jacobian(y, x)
# jacobian value is [[2., 0.], [0., 4.]]
```
Args:
target: Tensor to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
parallel_iterations: A knob to control how many iterations are dispatched
in parallel. This knob can be used to control the total memory usage.
experimental_use_pfor: If true, vectorizes the jacobian computation. Else
falls back to a sequential while_loop. Vectorization can sometimes fail
or lead to excessive memory usage. This option can be used to disable
vectorization in such cases.
Returns:
A list or nested structure of Tensors (or None), one for each element in
`sources`. Returned structure is the same as the structure of `sources`.
Note if any gradient is sparse (IndexedSlices), jacobian function
currently makes it dense and returns a Tensor instead. This may change in
the future.
Raises:
RuntimeError: If called on a used, non-persistent tape.
RuntimeError: If called on a non-persistent tape with eager execution
enabled and without enabling experimental_use_pfor.
ValueError: If vectorization of jacobian computation fails.
"""
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to "
"compute one set of gradients (or jacobians)")
flat_sources = nest.flatten(sources)
target_static_shape = target.shape
target_shape = array_ops.shape(target)
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
with self._ensure_recording():
target = array_ops.reshape(target, [-1])
def loop_fn(i):
with self._ensure_recording():
y = array_ops.gather(target, i)
return self.gradient(y, flat_sources,
unconnected_gradients=unconnected_gradients)
try:
target_size = int(target.shape[0])
except TypeError:
target_size = array_ops.shape(target)[0]
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"jacobian computation. Vectorization can be disabled by setting"
" experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the jacobian with eager execution enabled and with "
" experimental_use_pfor set to False.")
output = pfor_ops.for_loop(
loop_fn, [target.dtype] * len(flat_sources), target_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(output):
if out is not None:
new_shape = array_ops.concat(
[target_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
if context.executing_eagerly():
out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))
output[i] = out
return nest.pack_sequence_as(sources, output)
def batch_jacobian(self,
target,
source,
unconnected_gradients=UnconnectedGradients.NONE,
parallel_iterations=None,
experimental_use_pfor=True):
"""Computes and stacks per-example jacobians.
See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)
for the definition of a Jacobian. This function is essentially an efficient
implementation of the following:
`tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`.
Note that compared to `GradientTape.jacobian` which computes gradient of
each output value w.r.t each input value, this function is useful when
`target[i,...]` is independent of `source[j,...]` for `j != i`. This
assumption allows more efficient computation as compared to
`GradientTape.jacobian`. The output, as well as intermediate activations,
are lower dimensional and avoid a bunch of redundant zeros which would
result in the jacobian computation given the independence assumption.
Note: Unless you set `persistent=True` a GradientTape can only be used to
compute one set of gradients (or jacobians).
Note: By default the batch_jacobian implementation uses parallel for (pfor),
which creates a tf.function under the hood for each batch_jacobian call.
For better performance, and to avoid recompilation and vectorization
rewrites on each call, enclose GradientTape code in @tf.function.
Example usage:
```python
with tf.GradientTape() as g:
x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)
g.watch(x)
y = x * x
batch_jacobian = g.batch_jacobian(y, x)
# batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
```
Args:
target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n].
`target[i,...]` should only depend on `source[i,...]`.
source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m].
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
parallel_iterations: A knob to control how many iterations are dispatched
in parallel. This knob can be used to control the total memory usage.
experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else
uses a tf.while_loop.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
RuntimeError: If called on a used, non-persistent tape.
RuntimeError: If called on a non-persistent tape with eager execution
enabled and without enabling experimental_use_pfor.
ValueError: If vectorization of jacobian computation fails or if first
dimension of `target` and `source` do not match.
"""
if self._tape is None:
raise RuntimeError("A non-persistent GradientTape can only be used to"
"compute one set of gradients (or jacobians)")
target_shape = target.shape
if target_shape.rank is None:
dim = tensor_shape.Dimension(None)
else:
dim = target_shape.dims[0]
if not (target_shape.with_rank_at_least(2) and
source.shape.with_rank_at_least(2) and
dim.is_compatible_with(source.shape[0])):
raise ValueError(
"Need first dimension of target shape (%s) and "
"source shape (%s) to match." % (target.shape, source.shape))
if target_shape.is_fully_defined():
batch_size = int(target_shape[0])
target_row_size = target_shape.num_elements() // batch_size
else:
target_shape = array_ops.shape(target)
batch_size = target_shape[0]
target_row_size = array_ops.size(target) // batch_size
source_shape = array_ops.shape(source)
# Flatten target to 2-D.
# Note that we push and pop the tape here and below. This is needed since we
# need gradients through the enclosed operations.
with self._ensure_recording():
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, source_shape[0])]):
target = array_ops.reshape(target, [batch_size, target_row_size])
run_once = False
def loop_fn(i):
nonlocal run_once
if run_once and not self._persistent:
if parallel_iterations is not None:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian with parallel_iterations.")
else:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian.")
run_once = True
with self._ensure_recording():
y = array_ops.gather(target, i, axis=1)
return self.gradient(y, source,
unconnected_gradients=unconnected_gradients)
if experimental_use_pfor:
try:
output = pfor_ops.pfor(loop_fn, target_row_size,
parallel_iterations=parallel_iterations)
except ValueError as err:
six.reraise(
ValueError,
ValueError(
str(err) + "\nEncountered an exception while vectorizing the "
"batch_jacobian computation. Vectorization can be disabled by "
"setting experimental_use_pfor to False."),
sys.exc_info()[2])
else:
if context.executing_eagerly() and not self._persistent:
raise RuntimeError(
"GradientTape must be created with persistent=True"
" to compute the batch_jacobian with eager execution enabled and "
" with experimental_use_pfor set to False.")
output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,
parallel_iterations=parallel_iterations)
new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)
if output is None:
# Note that this block is returning zeros when it could use `None` to
# represent unconnected gradients. This is to maintain compatibility with
# the previous behavior, which ignored `unconnected_gradients`.
output = array_ops.zeros(new_shape, target.dtype)
return output
else:
output = array_ops.reshape(output,
[target_row_size, batch_size, -1])
output = array_ops.transpose(output, [1, 0, 2])
output = array_ops.reshape(output, new_shape)
return output
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
# See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
from policy import replacement_policy as rp
from globals import OPTS
class sim_sram:
"""
This is a simulation module for SRAMs.
It is used in sim_cache to read and write data.
"""
def __init__(self, num_words, num_ways, num_rows):
self.num_words = num_words
self.num_ways = num_ways
self.num_rows = num_rows
def reset(self):
""" Reset all arrays of the SRAM. """
self.valid_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.dirty_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.tag_array = [[0] * self.num_ways for _ in range(self.num_rows)]
self.data_array = [[[0] * self.num_words for _ in range(self.num_ways)] for _ in range(self.num_rows)]
if OPTS.replacement_policy == rp.FIFO:
self.fifo_array = [0] * self.num_rows
if OPTS.replacement_policy == rp.LRU:
self.lru_array = [[0] * self.num_ways for _ in range(self.num_rows)]
def read_valid(self, set, way):
""" Return the valid bit of given set and way. """
return self.valid_array[set][way]
def read_dirty(self, set, way):
""" Return the dirty bit of given set and way. """
return self.dirty_array[set][way]
def read_tag(self, set, way):
""" Return the tag of given set and way. """
return self.tag_array[set][way]
def read_fifo(self, set):
""" Return the FIFO bits of given set and way. """
return self.fifo_array[set]
def read_lru(self, set, way):
""" Return the LRU bits of given set and way. """
return self.lru_array[set][way]
def read_word(self, set, way, offset):
""" Return the data word of given set, way, and offset. """
return self.data_array[set][way][offset]
def read_line(self, set, way):
""" Return the data line of given set and way. """
return self.data_array[set][way].copy()
def write_valid(self, set, way, data):
""" Write the valid bit of given set and way. """
self.valid_array[set][way] = data
def write_dirty(self, set, way, data):
""" Write the dirty bit of given set and way. """
self.dirty_array[set][way] = data
def write_tag(self, set, way, data):
""" Write the tag of given set and way. """
self.tag_array[set][way] = data
def write_fifo(self, set, data):
""" Write the FIFO bits of given set and way. """
self.fifo_array[set] = data % self.num_ways
def write_lru(self, set, way, data):
""" Write the LRU bits of given set and way. """
self.lru_array[set][way] = data
def write_word(self, set, way, offset, data):
""" Write the data word of given set, way, and offset. """
self.data_array[set][way][offset] = data
def write_line(self, set, way, data):
""" Write the data line of given set and way. """
self.data_array[set][way] = data
|
# coding: utf-8
# maposmatic, the web front-end of the MapOSMatic city map generation system
# Copyright (C) 2009 David Decotigny
# Copyright (C) 2009 Frédéric Lehobey
# Copyright (C) 2009 David Mentré
# Copyright (C) 2009 Maxime Petazzoni
# Copyright (C) 2009 Thomas Petazzoni
# Copyright (C) 2009 Gaël Utard
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys
sys.path.append("/home/maposmatic/maposmatic")
sys.path.append("/home/maposmatic/ocitysmap")
os.environ["DJANGO_SETTINGS_MODULE"] = 'www.settings'
os.environ["MAPOSMATIC_LOG_FILE"] = "/home/maposmatic/maposmatic/logs/maposmatic-www.log"
os.environ["PGCONNECT_TIMEOUT"] = "1"
import django.core.handlers.wsgi
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import csv
import os
import tensorflow.compat.v1 as tf
class Example(object):
__metaclass__ = abc.ABCMeta
def __init__(self, task_name):
self.task_name = task_name
class Task(object):
"""Override this class to add a new task."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, long_sequences=False):
self.config = config
self.name = name
self.long_sequences = long_sequences
def get_examples(self, split):
return self.load_data(split + ".tsv", split)
def get_test_splits(self):
return ["test"]
def load_data(self, fname, split):
examples = self._create_examples(
read_tsv(os.path.join(self.config.raw_data_dir(self.name), fname),
max_lines=50 if self.config.debug else None),
split)
return examples
@abc.abstractmethod
def _create_examples(self, lines, split):
pass
@abc.abstractmethod
def get_scorer(self):
pass
@abc.abstractmethod
def get_feature_specs(self):
pass
@abc.abstractmethod
def featurize(self, example, is_training):
pass
@abc.abstractmethod
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
pass
def __repr__(self):
return "Task(" + self.name + ")"
def read_tsv(input_file, quotechar=None, max_lines=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for i, line in enumerate(reader):
if max_lines and i >= max_lines:
break
lines.append(line)
return lines
|
# -*- coding: utf-8 -*-
# Scrapy settings for amazon_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amazon_scraper'
SPIDER_MODULES = ['amazon_scraper.spiders']
NEWSPIDER_MODULE = 'amazon_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'amazon_scraper_3 (+your@email.here)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 2
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amazon_scraper.middlewares.AmazonScraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'amazon_scraper.middlewares.AmazonScraperDownloaderMiddleware': 543,
#}
# DOWNLOADER_MIDDLEWARES = {
# 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# 'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400,
# }
# RANDOM_UA_TYPE = "desktop"
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'amazon_scraper.pipelines.AmazonScraperPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
class AveragePooling2D(pooling_2d.Pooling2D):
"""Average pooling over a set of 2d planes."""
# TODO(beam2d): Support cover_all mode.
def forward_cpu(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
col = conv.im2col_cpu(x[0], self.kh, self.kw, self.sy, self.sx,
self.ph, self.pw)
y = col.mean(axis=(2, 3))
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto'):
self.retain_inputs((0,))
return super(AveragePooling2D, self).forward_gpu(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)
y_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x[0].dtype)
coeff = 1. / (self.kh * self.kw)
kern = cuda.elementwise(
'raw T in, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T out', '''
int c0 = i / (out_h * out_w);
int out_y = i / out_w % out_h;
int out_x = i % out_w;
int in_y_0 = max(0, out_y * sy - ph);
int in_y_1 = min(h, out_y * sy + kh - ph);
int in_x_0 = max(0, out_x * sx - pw);
int in_x_1 = min(w, out_x * sx + kw - pw);
T val = 0;
for (int y = in_y_0; y < in_y_1; ++y) {
int offset_y = w * (y + h * c0);
for (int x = in_x_0; x < in_x_1; ++x) {
val = val + in[x + offset_y];
}
}
out = val * coeff;
''', 'avg_pool_fwd')
kern(x[0].reduced_view(), h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff, y)
return y,
def backward(self, indexes, gy):
return AveragePooling2DGrad(self).apply(gy)
def create_pool_desc(self):
return cuda.cudnn.create_pooling_descriptor(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING)
class AveragePooling2DGrad(function_node.FunctionNode):
def __init__(self, apool2d):
self.kh = apool2d.kh
self.kw = apool2d.kw
self.sy = apool2d.sy
self.sx = apool2d.sx
self.ph = apool2d.ph
self.pw = apool2d.pw
self._used_cudnn = apool2d._used_cudnn
if not self._used_cudnn:
self._in_shape = apool2d._in_shape
self._in_dtype = apool2d._in_dtype
self.apool2d = apool2d
def forward_cpu(self, gy):
h, w = self._in_shape[2:]
gcol = numpy.tile(gy[0][:, :, None, None],
(1, 1, self.kh, self.kw, 1, 1))
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
gx /= self.kh * self.kw
return gx,
def forward_gpu(self, gy):
if self._used_cudnn:
x, = self.apool2d.get_retained_inputs()
return self.apool2d.backward_gpu((x.data,), gy)
n, c, h, w = self._in_shape
y_h, y_w = gy[0].shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
coeff = 1. / (self.kh * self.kw)
cuda.elementwise(
'raw T gy, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T gx',
'''
int c0 = i / (h * w);
int y = i / w % h + ph;
int x = i % w + pw;
int out_y_0 = max(0, (y - kh + sy) / sy);
int out_y_1 = min(out_h, (y + sy) / sy);
int out_x_0 = max(0, (x - kw + sx) / sx);
int out_x_1 = min(out_w, (x + sx) / sx);
int hc0 = out_h * c0;
T val = 0;
for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {
for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {
val = val + gy[out_x + out_w * (out_y + hc0)];
}
}
gx = val * coeff;
''', 'avg_pool_bwd')(gy[0].reduced_view(),
h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff,
gx)
return gx,
def backward(self, indexes, grad_outputs):
return AveragePooling2D(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
False).apply(grad_outputs)
def average_pooling_2d(x, ksize, stride=None, pad=0):
"""Spatial average pooling function.
This function acts similarly to :class:`~functions.Convolution2D`, but
it computes the average of input spatial patch for each channel
without any parameter instead of computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int or pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_2d`. Average pooling runs in non-cover-all mode.
"""
return AveragePooling2D(ksize, stride, pad, False).apply((x,))[0]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/5 19:35
# @Author : cendeavor
# @File : forms.py
# @Software: PyCharm
from flask_wtf import Form, FlaskForm
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import Required, DataRequired, EqualTo
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('注册')
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('登录')
|
from __future__ import division
from tkinter import *
import tkMessageBox
from PIL import Image, ImageTk
import os
import glob
import random
w0 = 1 #图片原始宽度
h0 = 1 #图片原始高度
# colors for the bboxes
COLORS = ['red','blue','yellow','pink','cyan','green','black']
#image size
SIZE = 256,256
#指定缩放后的图像大小
DEST_SIZE = 500,500
class LabelTool():
def __init__(self,master):
#set up the main frame
self.parent = master
self.parent.title('LabelTool')
self.frame = Frame(self.parent)
self.frame.pack(fill=BOTH,expand=1)
self.parent.resizable(width=TRUE,height=TRUE)
#initialize global state
self.imageDir = ''
self.imageList = []
self.egDir = ''
self.egList = []
self.outDir =''
self.cur = 0
self.total = 0
self.category =0
self.imagename=''
self.labelfilename=''
self.tkimg = None
# initialize mouse state
self.STATE={}
self.STATE['click']=0
self.STATE['x'],self.STATE['y']=0,0
#reference to bbox
self.bboxIdList = []
self.bboxId = None
self.bboxList = []
self.hl=None
self.vl=None
# ----------------- GUI stuff ---------------------
# dir entry & load
self.label = Label(self.frame,text='Image Dir:')
self.label.grid(row=0,column=0,sticky=E)
self.entry=Entry(self.frame)
self.entry.grid(row=0, column=1, sticky=W + E)
self.ldBtn = Button(self.frame, text="Load", command=self.loadDir)
self.ldBtn.grid(row=0, column=2, sticky=W + E)
# main panel for labeling
self.mainPanel = Canvas(self.frame, cursor='tcross')
self.mainPanel.bind("<Button-1>", self.mouseClick)
self.mainPanel.bind("<Motion>", self.mouseMove)
self.parent.bind("<Escape>", self.cancelBBox) # press <Espace> to cancel current bbox
self.parent.bind("s", self.cancelBBox)
self.parent.bind("a", self.prevImage) # press 'a' to go backforward
self.parent.bind("d", self.nextImage) # press 'd' to go forward
self.mainPanel.grid(row=1, column=1, rowspan=4, sticky=W + N)
# showing bbox info & delete bbox
self.lb1 = Label(self.frame, text='Bounding boxes:')
self.lb1.grid(row=1, column=2, sticky=W + N)
self.listbox = Listbox(self.frame, width=28, height=12)
self.listbox.grid(row=2, column=2, sticky=N)
self.btnDel = Button(self.frame, text='Delete', command=self.delBBox)
self.btnDel.grid(row=3, column=2, sticky=W + E + N)
self.btnClear = Button(self.frame, text='ClearAll', command=self.clearBBox)
self.btnClear.grid(row=4, column=2, sticky=W + E + N)
# control panel for image navigation
self.ctrPanel = Frame(self.frame)
self.ctrPanel.grid(row=5, column=1, columnspan=2, sticky=W + E)
self.prevBtn = Button(self.ctrPanel, text='<< Prev', width=10, command=self.prevImage)
self.prevBtn.pack(side=LEFT, padx=5, pady=3)
self.nextBtn = Button(self.ctrPanel, text='Next >>', width=10, command=self.nextImage)
self.nextBtn.pack(side=LEFT, padx=5, pady=3)
self.progLabel = Label(self.ctrPanel, text="Progress: / ")
self.progLabel.pack(side=LEFT, padx=5)
self.tmpLabel = Label(self.ctrPanel, text="Go to Image No.")
self.tmpLabel.pack(side=LEFT, padx=5)
self.idxEntry = Entry(self.ctrPanel, width=5)
self.idxEntry.pack(side=LEFT)
self.goBtn = Button(self.ctrPanel, text='Go', command=self.gotoImage)
self.goBtn.pack(side=LEFT)
# example pannel for illustration
self.egPanel = Frame(self.frame, border=10)
self.egPanel.grid(row=1, column=0, rowspan=5, sticky=N)
self.tmpLabel2 = Label(self.egPanel, text="Examples:")
self.tmpLabel2.pack(side=TOP, pady=5)
self.egLabels = []
for i in range(3):
self.egLabels.append(Label(self.egPanel))
self.egLabels[-1].pack(side=TOP)
# display mouse position
self.disp = Label(self.ctrPanel, text='')
self.disp.pack(side=RIGHT)
self.frame.columnconfigure(1, weight=1)
self.frame.rowconfigure(4, weight=1)
def loadDir(self,dbg=False):
if not dbg:
s = self.entry.get()
self.parent.focus()
self.category=int(s)
else:
s = r'D:\Data store file\labelGUI'
print('self.category =%d' % (self.category))
self.imageDir = os.path.join(r'./images', '%03d' % (self.category))
print(self.imageDir)
self.imageList = glob.glob(os.path.join(self.imageDir, '*.jpg'))
if len(self.imageList) == 0:
print
'No .jpg images found in the specified dir!'
return
else:
print
'num=%d' % (len(self.imageList))
# default to the 1st image in the collection
self.cur = 1
self.total = len(self.imageList)
# set up output dir
self.outDir = os.path.join(r'./labels', '%03d' % (self.category))
if not os.path.exists(self.outDir):
os.mkdir(self.outDir)
# load example bboxes
self.egDir = os.path.join(r'./Examples', '%03d' % (self.category))
# if not os.path.exists(self.egDir):
# return
filelist = glob.glob(os.path.join(self.egDir, '*.jpg'))
self.tmp = []
self.egList = []
random.shuffle(filelist)
for (i, f) in enumerate(filelist):
if i == 3:
break
im = Image.open(f)
r = min(SIZE[0] / im.size[0], SIZE[1] / im.size[1])
new_size = int(r * im.size[0]), int(r * im.size[1])
self.tmp.append(im.resize(new_size, Image.ANTIALIAS))
self.egList.append(ImageTk.PhotoImage(self.tmp[-1]))
self.egLabels[i].config(image=self.egList[-1], width=SIZE[0], height=SIZE[1])
self.loadImage()
print
'%d images loaded from %s' % (self.total, s)
def loadImage(self):
# load image
imagepath = self.imageList[self.cur - 1]
pil_image = Image.open(imagepath)
global w0,h0
w0,h0=pil_image.size
# 缩放到指定大小
pil_image = pil_image.resize((DEST_SIZE[0], DEST_SIZE[1]), Image.ANTIALIAS)
# pil_image = imgresize(w, h, w_box, h_box, pil_image)
self.img = pil_image
self.tkimg = ImageTk.PhotoImage(pil_image)
self.mainPanel.config(width=max(self.tkimg.width(), 400), height=max(self.tkimg.height(), 400))
self.mainPanel.create_image(0, 0, image=self.tkimg, anchor=NW)
self.progLabel.config(text="%04d/%04d" % (self.cur, self.total))
# load labels
self.clearBBox()
self.imagename = os.path.split(imagepath)[-1].split('.')[0]
labelname = self.imagename + '.txt'
self.labelfilename = os.path.join(self.outDir, labelname)
bbox_cnt = 0
if os.path.exists(self.labelfilename):
with open(self.labelfilename) as f:
for (i, line) in enumerate(f):
if i == 0:
bbox_cnt = int(line.strip())
continue
print
line
tmp = [(t.strip()) for t in line.split()]
print
"********************"
print
DEST_SIZE
# tmp = (0.1, 0.3, 0.5, 0.5)
print
"tmp[0,1,2,3]===%.2f, %.2f, %.2f, %.2f" % (float(tmp[0]), float(tmp[1]), float(tmp[2]), float(tmp[3]))
# print "%.2f,%.2f,%.2f,%.2f" %(tmp[0] tmp[1] tmp[2] tmp[3] )
print
"********************"
# tx = (10, 20, 30, 40)
# self.bboxList.append(tuple(tx))
self.bboxList.append(tuple(tmp))
tmp[0] = float(tmp[0])
tmp[1] = float(tmp[1])
tmp[2] = float(tmp[2])
tmp[3] = float(tmp[3])
tx0 = int(tmp[0] * DEST_SIZE[0])
ty0 = int(tmp[1] * DEST_SIZE[1])
tx1 = int(tmp[2] * DEST_SIZE[0])
ty1 = int(tmp[3] * DEST_SIZE[1])
print
"tx0, ty0, tx1, ty1"
print
tx0, ty0, tx1, ty1
tmpId = self.mainPanel.create_rectangle(tx0, ty0, tx1, ty1, \
width=2, \
outline=COLORS[(len(self.bboxList) - 1) % len(COLORS)])
self.bboxIdList.append(tmpId)
self.listbox.insert(END, '(%.2f,%.2f)-(%.2f,%.2f)' % (tmp[0], tmp[1], tmp[2], tmp[3]))
# self.listbox.insert(END, '(%d, %d) -> (%d, %d)' %(tmp[0], tmp[1], tmp[2], tmp[3]))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
def saveImage(self):
# print "-----1--self.bboxList---------"
print
self.bboxList
# print "-----2--self.bboxList---------"
with open(self.labelfilename, 'w') as f:
f.write('%d\n' % len(self.bboxList))
for bbox in self.bboxList:
f.write(' '.join(map(str, bbox)) + '\n')
print('Image No. %d saved' % (self.cur))
def mouseClick(self, event):
if self.STATE['click'] == 0:
self.STATE['x'], self.STATE['y'] = event.x, event.y
else:
x1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'], event.x)
y1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'], event.y)
x1, x2 = x1 / DEST_SIZE[0], x2 / DEST_SIZE[0];
y1, y2 = y1 / DEST_SIZE[1], y2 / DEST_SIZE[1];
self.bboxList.append((x1, y1, x2, y2))
self.bboxIdList.append(self.bboxId)
self.bboxId = None
self.listbox.insert(END, '(%.2f, %.2f)-(%.2f, %.2f)' % (x1, y1, x2, y2))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
self.STATE['click'] = 1 - self.STATE['click']
def mouseMove(self, event):
self.disp.config(text='x: %.2f, y: %.2f' % (event.x / DEST_SIZE[0], event.y / DEST_SIZE[1]))
if self.tkimg:
if self.hl:
self.mainPanel.delete(self.hl)
self.hl = self.mainPanel.create_line(0, event.y, self.tkimg.width(), event.y, width=2)
if self.vl:
self.mainPanel.delete(self.vl)
self.vl = self.mainPanel.create_line(event.x, 0, event.x, self.tkimg.height(), width=2)
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = self.mainPanel.create_rectangle(self.STATE['x'], self.STATE['y'], \
event.x, event.y, \
width=2, \
outline=COLORS[len(self.bboxList) % len(COLORS)])
def cancelBBox(self, event):
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = None
self.STATE['click'] = 0
def delBBox(self):
sel = self.listbox.curselection()
if len(sel) != 1:
return
idx = int(sel[0])
self.mainPanel.delete(self.bboxIdList[idx])
self.bboxIdList.pop(idx)
self.bboxList.pop(idx)
self.listbox.delete(idx)
def clearBBox(self):
for idx in range(len(self.bboxIdList)):
self.mainPanel.delete(self.bboxIdList[idx])
self.listbox.delete(0, len(self.bboxList))
self.bboxIdList = []
self.bboxList = []
def prevImage(self, event=None):
self.saveImage()
if self.cur > 1:
self.cur -= 1
self.loadImage()
def nextImage(self, event=None):
self.saveImage()
if self.cur < self.total:
self.cur += 1
self.loadImage()
def gotoImage(self):
idx = int(self.idxEntry.get())
if 1 <= idx and idx <= self.total:
self.saveImage()
self.cur = idx
self.loadImage()
## def setImage(self, imagepath = r'test2.png'):
## self.img = Image.open(imagepath)
## self.tkimg = ImageTk.PhotoImage(self.img)
## self.mainPanel.config(width = self.tkimg.width())
## self.mainPanel.config(height = self.tkimg.height())
## self.mainPanel.create_image(0, 0, image = self.tkimg, anchor=NW)
def imgresize(w, h, w_box, h_box, pil_image):
'''
resize a pil_image object so it will fit into
a box of size w_box times h_box, but retain aspect ratio
'''
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
if __name__ == '__main__':
root = Tk()
tool = LabelTool(root)
root.mainloop()
|
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
`adafruit_pyportal.network`
================================================================================
CircuitPython driver for Adafruit PyPortal.
* Author(s): Limor Fried, Kevin J. Walters, Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* `Adafruit PyPortal <https://www.adafruit.com/product/4116>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
# pylint: disable=unused-import
from adafruit_portalbase.network import (
NetworkBase,
CONTENT_JSON,
CONTENT_TEXT,
)
# pylint: enable=unused-import
from adafruit_pyportal.wifi import WiFi
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PyPortal.git"
# you'll need to pass in an io username, width, height, format (bit depth), io key, and then url!
IMAGE_CONVERTER_SERVICE = (
"https://io.adafruit.com/api/v2/%s/integrations/image-formatter?"
"x-aio-key=%s&width=%d&height=%d&output=BMP%d&url=%s"
)
class Network(NetworkBase):
"""Class representing the Adafruit PyPortal.
:param status_neopixel: The pin for the status NeoPixel. Use ``board.NEOPIXEL`` for the on-board
NeoPixel. Defaults to ``None``, not the status LED
:param esp: A passed ESP32 object, Can be used in cases where the ESP32 chip needs to be used
before calling the pyportal class. Defaults to ``None``.
:param busio.SPI external_spi: A previously declared spi object. Defaults to ``None``.
:param bool extract_values: If true, single-length fetched values are automatically extracted
from lists and tuples. Defaults to ``True``.
:param debug: Turn on debug print outs. Defaults to False.
:param convert_image: Determine whether or not to use the AdafruitIO image converter service.
Set as False if your image is already resized. Defaults to True.
:param image_url_path: The HTTP traversal path for a background image to display.
Defaults to ``None``.
:param image_json_path: The JSON traversal path for a background image to display. Defaults to
``None``.
:param image_resize: What size to resize the image we got from the json_path, make this a tuple
of the width and height you want. Defaults to ``None``.
:param image_position: The position of the image on the display as an (x, y) tuple. Defaults to
``None``.
:param image_dim_json_path: The JSON traversal path for the original dimensions of image tuple.
Used with fetch(). Defaults to ``None``.
"""
def __init__(
self,
*,
status_neopixel=None,
esp=None,
external_spi=None,
extract_values=True,
debug=False,
convert_image=True,
image_url_path=None,
image_json_path=None,
image_resize=None,
image_position=None,
image_dim_json_path=None,
secrets_data=None,
):
wifi = WiFi(status_neopixel=status_neopixel, esp=esp, external_spi=external_spi)
super().__init__(
wifi,
extract_values=extract_values,
debug=debug,
secrets_data=secrets_data,
)
self._convert_image = convert_image
self._image_json_path = image_json_path
self._image_url_path = image_url_path
self._image_resize = image_resize
self._image_position = image_position
self._image_dim_json_path = image_dim_json_path
gc.collect()
@property
def ip_address(self):
"""Return the IP Address nicely formatted"""
return self._wifi.esp.pretty_ip(self._wifi.esp.ip_address)
def image_converter_url(self, image_url, width, height, color_depth=16):
"""Generate a converted image url from the url passed in,
with the given width and height. aio_username and aio_key must be
set in secrets."""
try:
aio_username = self._secrets["aio_username"]
aio_key = self._secrets["aio_key"]
except KeyError as error:
raise KeyError(
"\n\nOur image converter service require a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'" # pylint: disable=line-too-long
) from error
return IMAGE_CONVERTER_SERVICE % (
aio_username,
aio_key,
width,
height,
color_depth,
image_url,
)
# pylint: disable=too-many-branches, too-many-statements
def process_image(self, json_data, sd_card=False):
"""
Process image content
:param json_data: The JSON data that we can pluck values from
:param bool sd_card: Whether or not we have an SD card inserted
"""
filename = None
position = None
image_url = None
if self._image_url_path:
image_url = self._image_url_path
if self._image_json_path:
image_url = self.json_traverse(json_data, self._image_json_path)
iwidth = 0
iheight = 0
if self._image_dim_json_path:
iwidth = int(self.json_traverse(json_data, self._image_dim_json_path[0]))
iheight = int(self.json_traverse(json_data, self._image_dim_json_path[1]))
print("image dim:", iwidth, iheight)
if image_url:
print("original URL:", image_url)
if self._convert_image:
if iwidth < iheight:
image_url = self.image_converter_url(
image_url,
int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
),
self._image_resize[1],
)
else:
image_url = self.image_converter_url(
image_url, self._image_resize[0], self._image_resize[1]
)
print("convert URL:", image_url)
# convert image to bitmap and cache
# print("**not actually wgetting**")
filename = "/cache.bmp"
chunk_size = 4096 # default chunk size is 12K (for QSPI)
if sd_card:
filename = "/sd" + filename
chunk_size = 512 # current bug in big SD writes -> stick to 1 block
try:
self.wget(image_url, filename, chunk_size=chunk_size)
except OSError as error:
raise OSError(
"""\n\nNo writable filesystem found for saving datastream. Insert an SD card or set internal filesystem to be unsafe by setting 'disable_concurrent_write_protection' in the mount options in boot.py""" # pylint: disable=line-too-long
) from error
except RuntimeError as error:
raise RuntimeError("wget didn't write a complete file") from error
if iwidth < iheight:
pwidth = int(
self._image_resize[1]
* self._image_resize[1]
/ self._image_resize[0]
)
position = (
self._image_position[0] + int((self._image_resize[0] - pwidth) / 2),
self._image_position[1],
)
else:
position = self._image_position
image_url = None
gc.collect()
return filename, position
|
import logging
import pandas as pd
from ast import literal_eval
from .constants import NUMBERING_RESULTS
from sadie.numbering.scheme_numbering import scheme_numbering
logger = logging.getLogger("NUMBERING")
class NumberingResults(pd.DataFrame):
def __init__(self, *args, scheme="", region_definition="", allowed_chains=[], allowed_species=[], **kwargs):
# use the __init__ method from DataFrame to ensure
# that we're inheriting the correct behavior
super(NumberingResults, self).__init__(*args, **kwargs)
# self["scheme"] = scheme
# self["region_definition"] = region_definition
# self["allowed_species"] = ",".join(allowed_species)
# self["allowed_chains"] = ",".join(allowed_chains)
# self._add_segment_regions()
@property
def _constructor(self):
return NumberingResults
def get_alignment_table(self) -> pd.DataFrame:
"""Get a numbered alignment table from the numbering and insertions
Returns
-------
pd.DataFrame
A dataframe with Id, chain_type, scheme and numbering. Values are the amino acid sequences
"""
all_dataframes = []
# I'm not sure if there is a more effiecient way to do this other than iterate through the df and pivot each row
for index in range(len(self)):
all_dataframes.append(self._pivot_alignment(self.iloc[index]))
all_dataframes = pd.concat(all_dataframes)
all_dataframes = all_dataframes.sort_index(axis=1, level=[0, 1])
all_dataframes.columns = list(map(lambda x: str(x[0]) + x[1], all_dataframes.columns.values))
all_dataframes = all_dataframes.reset_index()
return self[["Id", "chain_type", "scheme"]].merge(all_dataframes, on="Id").copy()
def _get_region(self, row, start, end, segment_name):
with_segment = "".join(
list(
map(
lambda x: x[-1],
list(
filter(
lambda x: x[0] >= start and x[0] <= end,
list(
zip(
row["Numbering"],
row["Insertion"],
row["Numbered_Sequence"],
)
),
)
),
)
)
)
without_segment = with_segment.replace("-", "")
return pd.Series(
{
f"{segment_name}_gaps": with_segment,
f"{segment_name}_no_gaps": without_segment,
}
)
def _add_segment_regions(self) -> "NumberingResults":
"""Private method to delineate the framework and cdr boundaries from the numbering
Returns
-------
NumberingResults
Instance of NumberingResults
"""
return_frames = []
for group, sub_df in self.groupby(["scheme", "region_definition", "Chain"]):
numbering = group[0]
chain = {"H": "heavy", "KL": "light"}[group[-1]]
boundaries = group[1]
numbering_lookup = scheme_numbering[numbering][chain][boundaries]
for region in [
"fwr1_aa",
"cdr1_aa",
"fwr2_aa",
"cdr2_aa",
"fwr3_aa",
"cdr3_aa",
"fwr4_aa",
]:
_start = numbering_lookup[f"{region}_start"]
_end = numbering_lookup[f"{region}_end"]
sub_df = sub_df.join(self.apply(lambda x: self._get_region(x, _start, _end, region), axis=1))
return_frames.append(sub_df)
segmented_df = pd.concat(return_frames).reset_index(drop=True)
# everything preceding the antibody
segmented_df["leader"] = segmented_df[["sequence", "seqstart_index"]].apply(lambda x: x[0][: x[1]], axis=1)
# everything following the antibody. keyword tail will clash with pandas
segmented_df["follow"] = segmented_df[["sequence", "seqend_index"]].apply(lambda x: x[0][x[1] + 1 :], axis=1)
return segmented_df
def _pivot_alignment(self, row: pd.Series) -> pd.DataFrame:
"""Private method to pivot a segmented row into an alignment series
Parameters
----------
row : pd.Series
indidual Numbering result row
Returns
-------
pivoted dataframe
"""
pivoted_df = (
pd.DataFrame(
zip(row["Numbering"], row["Insertion"], row["Numbered_Sequence"]),
columns=["numbering", "insertion", "sequence"],
)
.assign(Id=row["Id"])
.pivot("Id", ["numbering", "insertion"], "sequence")
)
return pivoted_df
def get_sanatized_antibodies(self):
# drop sequences that don't start at the first amino acid and dont end at the last amino acid.
return self[(self["seqstart_index"] == 0) & (self["seqend_index"] == self["sequence"].str.len() - 1)]
@staticmethod
def read_csv(*args, **kwargs):
return NumberingResults(
pd.read_csv(
*args,
index_col=0,
dtype=NUMBERING_RESULTS,
converters={"Numbering": literal_eval, "Insertion": literal_eval, "Numbered_Sequence": literal_eval},
**kwargs,
)
)
def drop_bad_numbering(self) -> "NumberingResults":
return self[(self["seqstart_index"] == 0) & (self["seqend_index"] == self["sequence"].str.len() - 1)]
|
# -*- coding: utf-8 -*-
# pylint: disable=no-member
"""
This file contains tests with base functionality.
"""
def test_merge(merged):
"""
Test that all values from settings are present.
"""
assert hasattr(merged, 'SECRET_KEY')
assert hasattr(merged, 'STATIC_ROOT')
def test_override(merged, monkeypatch):
"""
This setting must be overridden in the testing.py
"""
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tests.settings.merged')
from django.conf import settings
# noinspection PyUnresolvedReferences
assert merged.STATIC_ROOT == settings.STATIC_ROOT
def test_recursion_inclusion(recursion):
"""
Tests `include` function for inclusion files only once.
It protects of infinite recursion.
"""
assert hasattr(recursion, 'RECURSION_OK')
def test_stacked_settings(stacked):
"""
Tests `include` function for inclusion files only once.
It protects of infinite recursion.
"""
assert hasattr(stacked, 'STACKED_BASE_LOADED')
assert hasattr(stacked, 'STACKED_DB_PERSISTENT')
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "share-34244.botics.co"
site_params = {
"name": "SHARE",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
"""
Django settings for do_it_django_prj project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^il&w&37030%c0kbg@9(h+k(jsps53_)brjyw)mksmj=*c^5vf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'single_pages',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'do_it_django_prj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'do_it_django_prj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, '_media')
|
import logging
import datetime
import os
def config_logs():
# Logfile
logfolder = "logs/"
logdate = datetime.datetime.now().strftime("%y-%m-%d_%H:%M") + "_"
logfile = "aerodust.log"
logpath = logfolder + logfile
#logpath = logfolder + logdate + logfile
if not os.path.exists(logfolder):
os.makedirs(logfolder)
# Format
logformat = '%(asctime)s %(levelname)s: %(message)s'
datefmt='%m/%d/%Y %I:%M:%S %p'
# Get the Root Logger and
rootLogger = logging.getLogger()
# Create a formatter
logFormatter = logging.Formatter(logformat, datefmt)
# Create and add the file stream handler to the logger
fileHandler = logging.FileHandler(logpath)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
# Create and add the console stream handler to the logger
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
#rootLogger.setLevel(logging.DEBUG)
|
# Run with Python 3
# Copyright 2019 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause
"""\
HTTP server that can be used as a back end to Captive Web View applications.
The server is based around a Python3 Simple HTTP Server extended to pick files
from one of a number of directories.
The server will change directory to the common parent of all directories
specified.
"""
#
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# Tutorial: https://docs.python.org/3/howto/argparse.html
# Reference: https://docs.python.org/3/library/argparse.html
import argparse
#
# Module for HTTP server
# https://docs.python.org/3/library/http.server.html
from http.server import HTTPServer, SimpleHTTPRequestHandler
#
# JSON module.
# https://docs.python.org/3/library/json.html
import json
#
# Module for changing the current directory.
# https://docs.python.org/3/library/os.html#os.chdir
from os import chdir
#
# File path module.
# https://docs.python.org/3/library/os.path.html
import os.path
#
# Module for OO path handling.
# https://docs.python.org/3/library/pathlib.html
from pathlib import Path
#
# Module for recursive copy.
# https://docs.python.org/3/library/shutil.html
import shutil
#
# Module to create an HTTP server that spawns a thread for each request.
# https://docs.python.org/3/library/socketserver.html#module-socketserver
# The ThreadingMixIn is needed because of an apparent defect in Python, see:
# https://github.com/Microsoft/WSL/issues/1906
# https://bugs.python.org/issue31639
# The defect is fixed in 3.7 Python.
# TOTH: https://github.com/sjjhsjjh/blender-driver/blob/master/blender_driver/application/http.py#L45
from socketserver import ThreadingMixIn
#
# Module for manipulation of the import path.
# https://docs.python.org/3/library/sys.html#sys.path
import sys
#
# Module for text dedentation.
# Only used for --help description.
# https://docs.python.org/3/library/textwrap.html
import textwrap
def project_path(*segments):
return Path(__file__).resolve().parents[1].joinpath(*segments)
class Server(ThreadingMixIn, HTTPServer):
@property
def directories(self):
return self._directories
@directories.setter
def directories(self, directories):
self._directories = tuple(directories)
@property
def relativePaths(self):
return self._relativePaths
def path_for_file(self, filename):
filename = os.path.basename(filename)
if filename == "":
filename = "index.html"
for index, directory in enumerate(self.directories):
if directory.joinpath(filename).is_file():
return self.relativePaths[index].joinpath(filename)
raise ValueError('File "{}" not found.'.format(filename))
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Server method `handle_command` must be set by Main subclass.")
@property
def start_message(self):
"""Message suitable for logging when the server is started."""
def directory_lines(width=80, indent=2):
# This array accumulates diagnostic logs. It is yield'd after
# everything, unless the final yield is commented out.
transcript = ["\n"]
for directory in self.directories:
first = True
lineLen = 0
for index, leg in enumerate(directory.parts):
if leg == os.path.sep and index == 0:
continue
append = ''.join(("" if index == 0 else os.path.sep, leg))
appendLen = len(append)
while True:
lineStart = False
transcript.extend('{:2d} {:2d} "{}"\n'.format(
lineLen, appendLen, append))
if lineLen == 0:
line = "{:<{indent}}".format(
">" if first else "", indent=indent)
lineLen += len(line)
yield "\n"
yield line
lineStart = True
if lineLen + appendLen > width:
if lineStart:
yield append
first = False
lineLen = 0
if lineStart:
break
else:
lineLen += appendLen
yield append
break
# Uncomment the following line to get diagnostic logs.
# yield "".join(transcript)
#
# Get the actual port number and server address. The port number could
# be different, if zero was specified.
address = self.server_address
return 'Starting HTTP server at http://{}:{} for:{}\ncd {}'.format(
'localhost' if address[0] == '127.0.0.1' else address[0]
, int(address[1])
, "".join(tuple(directory_lines()))
, os.path.commonpath(self.directories))
def serve_forever(self):
chdir(os.path.commonpath(self.directories))
fromDir = Path.cwd()
self._relativePaths = tuple(
directory.relative_to(fromDir) for directory in self.directories)
return super().serve_forever()
class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
responsePath = None
# Check for resources that are allowed to be requested from root. Chrome
# seems to request everything other than the favicon with a path though.
try:
parted = self.path.rpartition("/")
if parted[0] == "" and (parted[1] == "/" or parted[1] == ""):
self.log_message("%s", 'Root resource "{}".'.format(self.path))
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
# Check for other resources in allowed directories.
directoryIndex = None
if responsePath is None:
effectivePath = (
self.path[1:] if self.path.startswith("/") else self.path)
for index, prefix in enumerate(self.server.relativePaths):
if effectivePath.startswith(str(prefix)):
directoryIndex = index
break
if directoryIndex is None:
self.send_error(403)
return
# By now, it's determined that the path in the request is one that
# is allowed by the server. It might have been requested from a
# resource in one directory but be in another. The path_for_file()
# method takes care of that.
try:
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
self.log_message("%s", 'Response path "{}" "{}" {}.'.format(
self.path, responsePath, directoryIndex))
if responsePath is not None:
self.path = str(responsePath)
super().do_GET()
def _send_object(self, responseObject):
responseBytes = json.dumps(responseObject).encode()
self.log_message("%s", 'Response object {} {}.'.format(
responseObject, responseBytes))
self.send_response(200)
self.end_headers()
self.wfile.write(responseBytes)
def do_POST(self):
# TOTH: https://github.com/sjjhsjjh/blender-driver/blob/master/blender_driver/application/http.py#L263
contentLengthHeader = self.headers.get('Content-Length')
contentLength = (
0 if contentLengthHeader is None else int(contentLengthHeader))
contentJSON = (
self.rfile.read(contentLength).decode('utf-8') if contentLength > 0
else None)
content = None if contentJSON is None else json.loads(contentJSON)
self.log_message("%s", "POST object {}.".format(
json.dumps(content, indent=2)))
if content is None:
self.send_error(400)
else:
try:
response = self.server.handle_command(content, self)
if response is not None:
self._send_object(response)
except:
self.send_error(501)
raise
# self.path is ignored.
class Main:
def __init__(self, argv):
argumentParser = argparse.ArgumentParser(
# formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(__doc__))
argumentParser.add_argument(
'-p', '--port', type=int, default=8001, help=
'Port number. Default: 8001.')
argumentParser.add_argument(
dest='directories', metavar='directory', type=str, nargs='+', help=
'Directory from which to server web content.')
self.arguments = argumentParser.parse_args(argv[1:])
self.server = Server(('localhost', self.arguments.port), Handler)
self.server.handle_command = self.handle_command
def __call__(self):
self.server.directories = (
*(
Path(directory).resolve()
for directory in self.arguments.directories
), project_path(
'forAndroid', 'captivewebview', 'src', 'main', 'assets',
'library')
)
for directory in self.server.directories:
if not directory.is_dir():
raise ValueError(f'Not a directory "{directory}".')
print(self.server.start_message)
self.server.serve_forever()
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Method `handle_command` must be implemented by Main subclass.")
class CaptivityMain(Main):
def __init__(self, argv):
argv = (*argv, str(project_path(
'forAndroid', 'Captivity', 'src', 'main', 'assets', 'UserInterface'
)))
return super().__init__(argv)
# Override.
def handle_command(self, commandObject, httpHandler):
# Following code would send a redirect to the client. Unfortunately,
# that causes the client to redirect the POST, instead of it loading
# another page instead.
#
# if "load" in commandObject:
# responseBytes = json.dumps({}).encode()
# httpHandler.log_message("%s", 'Redirect {}.'.format(
# responseBytes))
# httpHandler.send_response(303, json.dumps(commandObject))
# httpHandler.send_header('Location', commandObject["load"])
# httpHandler.end_headers()
# httpHandler.wfile.write(responseBytes)
# return None
# TOTH for ** syntax: https://stackoverflow.com/a/26853961
return {
**commandObject,
"confirm": " ".join((self.__class__.__name__,
httpHandler.server_version,
httpHandler.sys_version))
}
if __name__ == '__main__':
sys.exit(CaptivityMain(sys.argv)())
|
import sys
from quadrotor_env import quad, render, animation
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
import numpy as np
from quadrotor_env import quad, render, animation
from model import ActorCritic
"""
MECHANICAL ENGINEERING POST-GRADUATE PROGRAM
UNIVERSIDADE FEDERAL DO ABC - SANTO ANDRÉ, BRASIL
NOME: RAFAEL COSTA FERNANDES
RA: 21201920754
E−MAIL: COSTA.FERNANDES@UFABC.EDU.BR
DESCRIPTION:
PPO testing algorithm (no training, only forward passes)
"""
time_int_step = 0.01
max_timesteps = 1000
T = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
env = quad(time_int_step, max_timesteps, euler=0, direct_control=1, deep_learning=1, T=T, debug=0)
state_dim = env.deep_learning_in_size
policy = ActorCritic(state_dim, action_dim=4, action_std=0).to(device)
#LOAD TRAINED POLICY
try:
policy.load_state_dict(torch.load('PPO_continuous_solved_drone.pth',map_location=device))
print('Saved policy loaded')
except:
print('Could not load policy')
sys.exit(1)
#PLOTTER SETUP
print_states = [0, 2, 4, 6, 7, 8, 9, 10, 11, 12]
plot_labels = ['x', 'y', 'z', 'phi', 'theta', 'psi', 'f1', 'f2', 'f3', 'f4']
line_styles = ['-', '-', '-', '--', '--', '--', ':', ':', ':', ':',]
plotter = render(print_states, plot_labels, line_styles, depth_plot_list=0, animate=0)
# DO ONE RANDOM EPISODE
plotter.clear()
state = env.reset()
first_state = np.concatenate((env.previous_state[0:6],env.ang,np.zeros(4)))
plotter.add(0,first_state)
done = False
t=0
while not done:
t+=time_int_step
action = policy.actor(torch.FloatTensor(state).to(device)).cpu().detach().numpy()
state, _, done = env.step(action)
plot_state = np.concatenate((env.state[0:6],env.ang,action))
plotter.add(t,plot_state)
print('Env Solved, printing...')
plotter.plot()
# plotter.depth_plot()
an = animation()
an.animate(plotter.states)
plotter.clear()
|
from flask import Flask, escape, request
from flask import send_file
from Graph.plot import Plot
app = Flask(__name__)
@app.route('/', methods=["POST"])
def hello():
print(request.method)
req_data= request.get_json()
print(req_data)
name = request.args.get("name", "World")
return f'Hello, {escape(name)}!'
@app.route('/get_image',methods=["POST"])
def get_image():
req_data= request.get_json()
plot= Plot()
plot.labels_x=list(req_data["labels_x"])
plot.labels_y=req_data["label_y"]
plot.title=req_data["title"]
plot.legend=list(req_data["legend"])
plot.valueGroup1=list(req_data["valueGroup"][0])
plot.valueGroup2=list(req_data["valueGroup"][1])
plot.filename=req_data["filename"]
if req_data["type"]=="1":
plot.createGroupBarPlot()
elif req_data["type"]=="2":
plot.createPieChart()
return send_file(req_data["filename"], mimetype='image/png')
|
DUMMY_MENU_ID = 999999
DUMMY_MENU_SLUG = 'SLUGGOESHERE'
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import unittest
import numpy as np
from datasets import load_dataset
from transformers import Wav2Vec2Config, is_flax_available
from transformers.testing_utils import (
is_librosa_available,
is_pyctcdecode_available,
require_flax,
require_librosa,
require_pyctcdecode,
require_soundfile,
slow,
)
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
import optax
from flax.traverse_util import flatten_dict
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_flax_wav2vec2 import (
FlaxWav2Vec2ForCTC,
FlaxWav2Vec2ForPreTraining,
FlaxWav2Vec2GumbelVectorQuantizer,
FlaxWav2Vec2Model,
_compute_mask_indices,
_sample_negative_indices,
)
if is_pyctcdecode_available():
from transformers import Wav2Vec2ProcessorWithLM
if is_librosa_available():
import librosa
class FlaxWav2Vec2ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024, # speech is longer
is_training=False,
hidden_size=24,
feat_extract_norm="layer",
feat_extract_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(32, 32, 32),
conv_stride=(4, 4, 4),
conv_kernel=(8, 8, 8),
conv_bias=False,
num_conv_pos_embeddings=16,
num_conv_pos_embedding_groups=2,
num_hidden_layers=4,
num_attention_heads=2,
hidden_dropout_prob=0.1, # this is most likely not correctly set yet
intermediate_size=20,
layer_norm_eps=1e-5,
hidden_act="gelu",
initializer_range=0.02,
vocab_size=32,
do_stable_layer_norm=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_dropout = feat_extract_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = conv_dim
self.conv_stride = conv_stride
self.conv_kernel = conv_kernel
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.intermediate_size = intermediate_size
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.scope = scope
output_seq_length = self.seq_length
for kernel, stride in zip(self.conv_kernel, self.conv_stride):
output_seq_length = (output_seq_length - (kernel - 1)) / stride
self.output_seq_length = int(math.ceil(output_seq_length))
self.encoder_seq_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = Wav2Vec2Config(
do_stable_layer_norm=self.do_stable_layer_norm,
hidden_size=self.hidden_size,
feat_extract_norm=self.feat_extract_norm,
feat_extract_dropout=self.feat_extract_dropout,
feat_extract_activation=self.feat_extract_activation,
conv_dim=self.conv_dim,
conv_stride=self.conv_stride,
conv_kernel=self.conv_kernel,
conv_bias=self.conv_bias,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
hidden_dropout_prob=self.hidden_dropout_prob,
intermediate_size=self.intermediate_size,
layer_norm_eps=self.layer_norm_eps,
hidden_act=self.hidden_act,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
)
return config, input_values, attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_values, attention_mask = config_and_inputs
inputs_dict = {"input_values": input_values, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class FlaxWav2Vec2ModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (
(FlaxWav2Vec2Model, FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining) if is_flax_available() else ()
)
def setUp(self):
self.model_tester = FlaxWav2Vec2ModelTester(self)
def test_train(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_values = inputs_dict["input_values"]
attention_mask = inputs_dict["attention_mask"]
model = FlaxWav2Vec2ForPreTraining(config)
features_shape = (
input_values.shape[0],
model._get_feat_extract_output_lengths(np.array(input_values.shape[1])),
)
batch_size, sequence_length = features_shape[:2]
mask_prob = 0.5
mask_length = 4
mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
dropout_rng, gumbel_rng = jax.random.split(jax.random.PRNGKey(0))
output = model(
input_values,
attention_mask=attention_mask,
mask_time_indices=mask_time_indices,
train=True,
dropout_rng=dropout_rng,
gumbel_rng=gumbel_rng,
)[0]
self.assertTrue(output.shape == (batch_size, sequence_length, model.config.proj_codevector_dim))
# overwrite because of `input_values`
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
# overwrite because of `input_values`
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_values, attention_mask=None, **kwargs):
return model(input_values=input_values, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_freeze_feature_encoder(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_values = inputs_dict["input_values"]
attention_mask = inputs_dict["attention_mask"]
model = FlaxWav2Vec2ForPreTraining(config)
params = model.params
# dummy loss function
def compute_loss(
params, input_values, attention_mask, freeze_feature_encoder: bool = False, epsilon: float = 1e-8
):
outputs = model(
input_values,
attention_mask=attention_mask,
freeze_feature_encoder=freeze_feature_encoder,
params=params,
)
# compute cosine similarity of projected and projected_quantized states
cosine_sim = optax.cosine_similarity(
outputs.projected_states, outputs.projected_quantized_states, epsilon=epsilon
)
loss = cosine_sim.sum()
return loss, outputs.to_tuple()
# transform the loss function to get the gradients
grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
# compute loss, outputs and gradients for unfrozen model
(loss, outputs), grads = grad_fn(params, input_values, attention_mask, freeze_feature_encoder=False)
# compare to loss, outputs and gradients for frozen model
(loss_frozen, outputs_frozen), grads_frozen = grad_fn(
params, input_values, attention_mask, freeze_feature_encoder=True
)
# ensure that the outputs and losses remain precisely equal
for output, output_frozen in zip(outputs, outputs_frozen):
self.assertTrue((output == output_frozen).all())
self.assertEqual(loss, loss_frozen)
grads = flatten_dict(grads)
grads_frozen = flatten_dict(grads_frozen)
# ensure that the dicts of gradients contain the same keys
self.assertEqual(grads.keys(), grads_frozen.keys())
# ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non-zero entries when unfrozen
feature_extractor_grads = tuple(grads[k] for k in grads if "feature_extractor" in k)
feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" in k)
for feature_extractor_grad, feature_extractor_grad_frozen in zip(
feature_extractor_grads, feature_extractor_grads_frozen
):
self.assertTrue((feature_extractor_grad_frozen == 0.0).all())
self.assertTrue((feature_extractor_grad > 0.0).any())
# ensure that the gradients of all unfrozen layers remain equal, i.e. all layers excluding the frozen 'feature_extractor'
grads = tuple(grads[k] for k in grads if "feature_extractor" not in k)
grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if "feature_extractor" not in k)
for grad, grad_frozen in zip(grads, grads_frozen):
self.assertTrue((grad == grad_frozen).all())
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True)
outputs = model(np.ones((1, 1024), dtype="f4"))
self.assertIsNotNone(outputs)
@require_flax
class FlaxWav2Vec2UtilsTest(unittest.TestCase):
def test_compute_mask_indices(self):
batch_size = 4
sequence_length = 60
mask_prob = 0.5
mask_length = 1
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])
def test_compute_mask_indices_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)
# because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
def test_compute_mask_indices_attn_mask_overlap(self):
batch_size = 4
sequence_length = 80
mask_prob = 0.5
mask_length = 4
attention_mask = np.ones((batch_size, sequence_length), dtype=np.int32)
attention_mask[:2, sequence_length // 2 :] = 0
mask = _compute_mask_indices(
(batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask
)
for batch_sum in mask.sum(axis=-1):
self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)
self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0)
def test_compute_perplexity(self):
probs = np.arange(100).reshape(2, 5, 10) / 100
ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs)
self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3)
# mask half of the input
mask = np.ones((2,), dtype=np.bool)
mask[0] = 0
ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask)
self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3)
def test_sample_negatives(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(
sequence_length, hidden_size
) # each value in vector consits of same value
features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))
negative_indices = _sample_negative_indices(features.shape, num_negatives)
features = features.reshape(-1, hidden_size) # BTC => (BxT)C
# take negative vectors from sampled indices
sampled_negatives = features[negative_indices.reshape(-1)]
negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(
2, 0, 1, 3
)
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not values of vectors
# => this means that `unique()` yields a single value for `hidden_size` dim
self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))
def test_sample_negatives_with_attn_mask(self):
batch_size = 2
sequence_length = 10
hidden_size = 4
num_negatives = 3
features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(
sequence_length, hidden_size
) # each value in vector consits of same value
# second half of last input tensor is padded
attention_mask = np.ones((batch_size, sequence_length), dtype=np.int8)
attention_mask[-1, sequence_length // 2 :] = 0
forbidden_indices = (
np.arange(sequence_length // 2, sequence_length, dtype=np.int32) + (batch_size - 1) * sequence_length
).tolist()
features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))
negative_indices = _sample_negative_indices(features.shape, num_negatives, attention_mask=attention_mask)
# make sure that no padding tokens are sampled
self.assertTrue(all([idx not in negative_indices for idx in forbidden_indices]))
features = features.reshape(-1, hidden_size) # BTC => (BxT)C
# take negative vectors from sampled indices
sampled_negatives = features[negative_indices.reshape(-1)]
negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(
2, 0, 1, 3
)
self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))
# make sure no negatively sampled vector is actually a positive one
for negative in negatives:
self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)
# make sure that full vectors are sampled and not just slices of vectors
# => this means that `unique()` yields a single value for `hidden_size` dim
self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))
@require_flax
@require_soundfile
@slow
class FlaxWav2Vec2ModelIntegrationTest(unittest.TestCase):
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id").filter(
lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]
)[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_inference_ctc_robust_batched(self):
model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", from_pt=True)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True)
input_speech = self._load_datasamples(4)
inputs = processor(input_speech, return_tensors="np", padding=True)
input_values = inputs.input_values
attention_mask = inputs.attention_mask
logits = model(input_values, attention_mask=attention_mask).logits
predicted_ids = jnp.argmax(logits, axis=-1)
predicted_trans = processor.batch_decode(predicted_ids)
EXPECTED_TRANSCRIPTIONS = [
"a man said to the universe sir i exist",
"sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore",
"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about",
"his instant panic was followed by a small sharp blow high on his chest",
]
self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
def test_inference_pretrained(self):
model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60", from_pt=True)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-large-lv60", return_attention_mask=True
)
input_speech = self._load_datasamples(2)
inputs_dict = feature_extractor(input_speech, return_tensors="np", padding=True)
features_shape = (
inputs_dict["input_values"].shape[0],
model._get_feat_extract_output_lengths(np.array(inputs_dict["input_values"].shape[1])),
)
mask_time_indices = _compute_mask_indices(
features_shape,
model.config.mask_time_prob,
model.config.mask_time_length,
min_masks=2,
)
outputs = model(
inputs_dict.input_values,
attention_mask=inputs_dict.attention_mask,
mask_time_indices=mask_time_indices,
)
# compute cosine similarity
cosine_sim = optax.cosine_similarity(
outputs.projected_states, outputs.projected_quantized_states, epsilon=1e-8
)
# retrieve cosine sim of masked features
cosine_sim_masked = cosine_sim[mask_time_indices]
# ... now compare to randomly initialized model
config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-large-lv60")
model_rand = FlaxWav2Vec2ForPreTraining(config)
outputs_rand = model_rand(
inputs_dict.input_values,
attention_mask=inputs_dict.attention_mask,
mask_time_indices=mask_time_indices,
)
# compute cosine similarity
cosine_sim_rand = optax.cosine_similarity(
outputs_rand.projected_states, outputs_rand.projected_quantized_states
)
# retrieve cosine sim of masked features
cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices]
# a pretrained wav2vec2 model has learned to predict the quantized latent states
# => the cosine similarity between quantized states and predicted states > 0.5
# a random wav2vec2 model has not learned to predict the quantized latent states
# => the cosine similarity between quantized states and predicted states is very likely < 0.1
self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)
@require_pyctcdecode
@require_librosa
def test_wav2vec2_with_lm(self):
ds = load_dataset("common_voice", "es", split="test", streaming=True)
sample = next(iter(ds))
resampled_audio = librosa.resample(sample["audio"]["array"], 48_000, 16_000)
model = FlaxWav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")
processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm")
input_values = processor(resampled_audio, return_tensors="np").input_values
logits = model(input_values).logits
transcription = processor.batch_decode(np.array(logits)).text
self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero")
|
import os
def require(file, *args):
with open(os.path.join(os.path.dirname(file), *args), "r") as fh:
source = fh.read()
return source
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model function definition, including both architecture and loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import coco_metric
import efficientdet_arch
import hparams_config
import iou_utils
import nms_np
import retinanet_arch
import utils
from keras import anchors
from keras import postprocess
_DEFAULT_BATCH_SIZE = 64
def update_learning_rate_schedule_parameters(params):
"""Updates params that are related to the learning rate schedule."""
# params['batch_size'] is per-shard within model_fn if strategy=tpu.
batch_size = (
params['batch_size'] * params['num_shards']
if params['strategy'] == 'tpu' else params['batch_size'])
# Learning rate is proportional to the batch size
params['adjusted_learning_rate'] = (
params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE)
steps_per_epoch = params['num_examples_per_epoch'] / batch_size
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
params['first_lr_drop_step'] = int(params['first_lr_drop_epoch'] *
steps_per_epoch)
params['second_lr_drop_step'] = int(params['second_lr_drop_epoch'] *
steps_per_epoch)
params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)
params['steps_per_epoch'] = steps_per_epoch
def stepwise_lr_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step,
first_lr_drop_step, second_lr_drop_step, global_step):
"""Handles linear scaling rule, gradual warmup, and LR decay."""
# lr_warmup_init is the starting learning rate; the learning rate is linearly
# scaled up to the full learning rate after `lr_warmup_step` before decaying.
logging.info('LR schedule method: stepwise')
linear_warmup = (
lr_warmup_init +
(tf.cast(global_step, dtype=tf.float32) / lr_warmup_step *
(adjusted_learning_rate - lr_warmup_init)))
learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup,
adjusted_learning_rate)
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate
def cosine_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, total_steps,
step):
logging.info('LR schedule method: cosine')
linear_warmup = (
lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
cosine_lr = 0.5 * adjusted_lr * (
1 + tf.cos(np.pi * tf.cast(step, tf.float32) / decay_steps))
return tf.where(step < lr_warmup_step, linear_warmup, cosine_lr)
def polynomial_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, power,
total_steps, step):
logging.info('LR schedule method: polynomial')
linear_warmup = (
lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *
(adjusted_lr - lr_warmup_init)))
polynomial_lr = adjusted_lr * tf.pow(
1 - (tf.cast(step, tf.float32) / total_steps), power)
return tf.where(step < lr_warmup_step, linear_warmup, polynomial_lr)
def learning_rate_schedule(params, global_step):
"""Learning rate schedule based on global step."""
lr_decay_method = params['lr_decay_method']
if lr_decay_method == 'stepwise':
return stepwise_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['first_lr_drop_step'],
params['second_lr_drop_step'], global_step)
if lr_decay_method == 'cosine':
return cosine_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'], params['total_steps'],
global_step)
if lr_decay_method == 'polynomial':
return polynomial_lr_schedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['poly_lr_power'],
params['total_steps'], global_step)
if lr_decay_method == 'constant':
return params['adjusted_learning_rate']
raise ValueError('unknown lr_decay_method: {}'.format(lr_decay_method))
def focal_loss(y_pred, y_true, alpha, gamma, normalizer, label_smoothing=0.0):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
y_pred: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
y_true: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
normalizer: Divide loss by this value.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
loss: A float32 scalar representing normalized total loss.
"""
with tf.name_scope('focal_loss'):
alpha = tf.convert_to_tensor(alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(gamma, dtype=y_pred.dtype)
# compute focal loss multipliers before label smoothing, such that it will
# not blow up the loss.
pred_prob = tf.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = (1.0 - p_t) ** gamma
# apply label smoothing for cross_entropy for each entry.
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
# compute the final loss and return
return alpha_factor * modulating_factor * ce / normalizer
def _box_loss(box_outputs, box_targets, num_positives, delta=0.1):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
normalizer = num_positives * 4.0
mask = tf.not_equal(box_targets, 0.0)
box_loss = tf.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf.losses.Reduction.SUM)
box_loss /= normalizer
return box_loss
def _box_iou_loss(box_outputs, box_targets, num_positives, iou_loss_type):
"""Computes box iou loss."""
normalizer = num_positives * 4.0
box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets, iou_loss_type)
box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer
return box_iou_loss
def detection_loss(cls_outputs, box_outputs, labels, params):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundtruth targets.
params: the dictionary including training parameters specified in
default_haprams function in this file.
Returns:
total_loss: an integer tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
box_iou_loss: an integer tensor representing total box iou loss.
"""
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
levels = cls_outputs.keys()
cls_losses = []
box_losses = []
for level in levels:
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level],
params['num_classes'])
if params['data_format'] == 'channels_first':
bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, -1, width, height])
else:
bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, width, height, -1])
box_targets_at_level = labels['box_targets_%d' % level]
cls_loss = focal_loss(
cls_outputs[level],
cls_targets_at_level,
params['alpha'],
params['gamma'],
normalizer=num_positives_sum,
label_smoothing=params['label_smoothing'])
if params['data_format'] == 'channels_first':
cls_loss = tf.reshape(cls_loss,
[bs, -1, width, height, params['num_classes']])
else:
cls_loss = tf.reshape(cls_loss,
[bs, width, height, -1, params['num_classes']])
cls_loss *= tf.cast(
tf.expand_dims(tf.not_equal(labels['cls_targets_%d' % level], -2), -1),
tf.float32)
cls_losses.append(tf.reduce_sum(cls_loss))
if params['box_loss_weight']:
box_losses.append(
_box_loss(
box_outputs[level],
box_targets_at_level,
num_positives_sum,
delta=params['delta']))
if params['iou_loss_type']:
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
box_output_list = [tf.reshape(box_outputs[i], [-1, 4]) for i in levels]
box_outputs = tf.concat(box_output_list, axis=0)
box_target_list = [
tf.reshape(labels['box_targets_%d' % level], [-1, 4])
for level in levels
]
box_targets = tf.concat(box_target_list, axis=0)
anchor_boxes = tf.tile(input_anchors.boxes, [params['batch_size'], 1])
box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes)
box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes)
box_iou_loss = _box_iou_loss(box_outputs, box_targets, num_positives_sum,
params['iou_loss_type'])
else:
box_iou_loss = 0
# Sum per level losses to total loss.
cls_loss = tf.add_n(cls_losses)
box_loss = tf.add_n(box_losses) if box_losses else 0
total_loss = (
cls_loss +
params['box_loss_weight'] * box_loss +
params['iou_loss_weight'] * box_iou_loss)
return total_loss, cls_loss, box_loss, box_iou_loss
def reg_l2_loss(weight_decay, regex=r'.*(kernel|weight):0$'):
"""Return regularization l2 loss loss."""
var_match = re.compile(regex)
return weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if var_match.match(v.name)
])
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
"""Model definition entry.
Args:
features: the input image tensor with shape [batch_size, height, width, 3].
The height and width are fixed and equal.
labels: the input labels in a dictionary. The labels include class targets
and box targets which are dense label maps. The labels are generated from
get_input_fn function in data/dataloader.py
mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.
params: the dictionary defines hyperparameters of model. The default
settings are in default_hparams function in this file.
model: the model outputs class logits and box regression outputs.
variable_filter_fn: the filter function that takes trainable_variables and
returns the variable list after applying the filter rule.
Returns:
tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.
Raises:
RuntimeError: if both ckpt and backbone_ckpt are set.
"""
utils.image('input_image', features)
training_hooks = []
def _model_outputs(inputs):
# Convert params (dict) to Config for easier access.
return model(inputs, config=hparams_config.Config(params))
precision = utils.get_precision(params['strategy'], params['mixed_precision'])
cls_outputs, box_outputs = utils.build_model_with_precision(
precision, _model_outputs, features, params['is_training_bn'])
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
# First check if it is in PREDICT mode.
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'image': features,
}
for level in levels:
predictions['cls_outputs_%d' % level] = cls_outputs[level]
predictions['box_outputs_%d' % level] = box_outputs[level]
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Set up training loss and learning rate.
update_learning_rate_schedule_parameters(params)
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_schedule(params, global_step)
# cls_loss and box_loss are for logging. only total_loss is optimized.
det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(
cls_outputs, box_outputs, labels, params)
reg_l2loss = reg_l2_loss(params['weight_decay'])
total_loss = det_loss + reg_l2loss
if mode == tf.estimator.ModeKeys.TRAIN:
utils.scalar('lrn_rate', learning_rate)
utils.scalar('trainloss/cls_loss', cls_loss)
utils.scalar('trainloss/box_loss', box_loss)
utils.scalar('trainloss/det_loss', det_loss)
utils.scalar('trainloss/reg_l2_loss', reg_l2loss)
utils.scalar('trainloss/loss', total_loss)
if params['iou_loss_type']:
utils.scalar('trainloss/box_iou_loss', box_iou_loss)
train_epochs = tf.cast(global_step, tf.float32) / params['steps_per_epoch']
utils.scalar('train_epochs', train_epochs)
moving_average_decay = params['moving_average_decay']
if moving_average_decay:
ema = tf.train.ExponentialMovingAverage(
decay=moving_average_decay, num_updates=global_step)
ema_vars = utils.get_ema_vars()
if params['strategy'] == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
learning_rate = learning_rate * hvd.size()
if mode == tf.estimator.ModeKeys.TRAIN:
if params['optimizer'].lower() == 'sgd':
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=params['momentum'])
elif params['optimizer'].lower() == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
else:
raise ValueError('optimizers should be adam or sgd')
if params['strategy'] == 'tpu':
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
elif params['strategy'] == 'horovod':
optimizer = hvd.DistributedOptimizer(optimizer)
training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
var_list = tf.trainable_variables()
if variable_filter_fn:
var_list = variable_filter_fn(var_list)
if params.get('clip_gradients_norm', 0) > 0:
logging.info('clip gradients norm by %f', params['clip_gradients_norm'])
grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
with tf.name_scope('clip'):
grads = [gv[0] for gv in grads_and_vars]
tvars = [gv[1] for gv in grads_and_vars]
clipped_grads, gnorm = tf.clip_by_global_norm(
grads, params['clip_gradients_norm'])
utils.scalar('gnorm', gnorm)
grads_and_vars = list(zip(clipped_grads, tvars))
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
else:
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
total_loss, global_step, var_list=var_list)
if moving_average_decay:
with tf.control_dependencies([train_op]):
train_op = ema.apply(ema_vars)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(**kwargs):
"""Returns a dictionary that has the evaluation metrics."""
if params['nms_configs'].get('pyfunc', True):
detections_bs = []
for index in range(kwargs['boxes'].shape[0]):
nms_configs = params['nms_configs']
detections = tf.numpy_function(
functools.partial(nms_np.per_class_nms, nms_configs=nms_configs),
[
kwargs['boxes'][index],
kwargs['scores'][index],
kwargs['classes'][index],
tf.slice(kwargs['image_ids'], [index], [1]),
tf.slice(kwargs['image_scales'], [index], [1]),
params['num_classes'],
nms_configs['max_output_size'],
], tf.float32)
detections_bs.append(detections)
else:
# These two branches should be equivalent, but currently they are not.
# TODO(tanmingxing): enable the non_pyfun path after bug fix.
nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],
kwargs['image_scales'])
img_ids = tf.cast(
tf.expand_dims(kwargs['image_ids'], -1), nms_scores.dtype)
detections_bs = [
img_ids * tf.ones_like(nms_scores),
nms_boxes[:, :, 1],
nms_boxes[:, :, 0],
nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
nms_scores,
nms_classes,
]
detections_bs = tf.stack(detections_bs, axis=-1, name='detnections')
if params.get('testdev_dir', None):
logging.info('Eval testdev_dir %s', params['testdev_dir'])
eval_metric = coco_metric.EvaluationMetric(
testdev_dir=params['testdev_dir'])
coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
tf.zeros([1]))
else:
logging.info('Eval val with groudtruths %s.', params['val_json_file'])
eval_metric = coco_metric.EvaluationMetric(
filename=params['val_json_file'])
coco_metrics = eval_metric.estimator_metric_fn(
detections_bs, kwargs['groundtruth_data'])
# Add metrics to output.
cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
output_metrics = {
'cls_loss': cls_loss,
'box_loss': box_loss,
}
output_metrics.update(coco_metrics)
return output_metrics
cls_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(cls_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
box_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(box_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
cls_outputs = postprocess.to_list(cls_outputs)
box_outputs = postprocess.to_list(box_outputs)
params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS
boxes, scores, classes = postprocess.pre_nms(params, cls_outputs,
box_outputs)
metric_fn_inputs = {
'cls_loss_repeat': cls_loss_repeat,
'box_loss_repeat': box_loss_repeat,
'image_ids': labels['source_ids'],
'groundtruth_data': labels['groundtruth_data'],
'image_scales': labels['image_scales'],
'boxes': boxes,
'scores': scores,
'classes': classes,
}
eval_metrics = (metric_fn, metric_fn_inputs)
checkpoint = params.get('ckpt') or params.get('backbone_ckpt')
if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:
# Initialize the model from an EfficientDet or backbone checkpoint.
if params.get('ckpt') and params.get('backbone_ckpt'):
raise RuntimeError(
'--backbone_ckpt and --checkpoint are mutually exclusive')
if params.get('backbone_ckpt'):
var_scope = params['backbone_name'] + '/'
if params['ckpt_var_scope'] is None:
# Use backbone name as default checkpoint scope.
ckpt_scope = params['backbone_name'] + '/'
else:
ckpt_scope = params['ckpt_var_scope'] + '/'
else:
# Load every var in the given checkpoint
var_scope = ckpt_scope = '/'
def scaffold_fn():
"""Loads pretrained model through scaffold function."""
logging.info('restore variables from %s', checkpoint)
var_map = utils.get_ckpt_var_map(
ckpt_path=checkpoint,
ckpt_scope=ckpt_scope,
var_scope=var_scope,
skip_mismatch=params['skip_mismatch'])
tf.train.init_from_checkpoint(checkpoint, var_map)
return tf.train.Scaffold()
elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:
def scaffold_fn():
"""Load moving average variables for eval."""
logging.info('Load EMA vars with ema_decay=%f', moving_average_decay)
restore_vars_dict = ema.variables_to_restore(ema_vars)
saver = tf.train.Saver(restore_vars_dict)
return tf.train.Scaffold(saver=saver)
else:
scaffold_fn = None
if params['strategy'] != 'tpu':
# Profile every 1K steps.
profile_hook = tf.train.ProfilerHook(
save_steps=1000, output_dir=params['model_dir'])
training_hooks.append(profile_hook)
# Report memory allocation if OOM
class OomReportingHook(tf.estimator.SessionRunHook):
def before_run(self, run_context):
return tf.estimator.SessionRunArgs(
fetches=[],
options=tf.RunOptions(report_tensor_allocations_upon_oom=True))
training_hooks.append(OomReportingHook())
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metrics,
host_call=utils.get_tpu_host_call(global_step, params),
scaffold_fn=scaffold_fn,
training_hooks=training_hooks)
def retinanet_model_fn(features, labels, mode, params):
"""RetinaNet model."""
variable_filter_fn = functools.partial(
retinanet_arch.remove_variables, resnet_depth=params['resnet_depth'])
return _model_fn(
features,
labels,
mode,
params,
model=retinanet_arch.retinanet,
variable_filter_fn=variable_filter_fn)
def efficientdet_model_fn(features, labels, mode, params):
"""EfficientDet model."""
variable_filter_fn = functools.partial(
efficientdet_arch.freeze_vars, pattern=params['var_freeze_expr'])
return _model_fn(
features,
labels,
mode,
params,
model=efficientdet_arch.efficientdet,
variable_filter_fn=variable_filter_fn)
def get_model_arch(model_name='efficientdet-d0'):
"""Get model architecture for a given model name."""
if 'retinanet' in model_name:
return retinanet_arch.retinanet
if 'efficientdet' in model_name:
return efficientdet_arch.efficientdet
raise ValueError('Invalide model name {}'.format(model_name))
def get_model_fn(model_name='efficientdet-d0'):
"""Get model fn for a given model name."""
if 'retinanet' in model_name:
return retinanet_model_fn
if 'efficientdet' in model_name:
return efficientdet_model_fn
raise ValueError('Invalide model name {}'.format(model_name))
|
"""Module for Testing the Meetup Endpoint."""
import json
# Local Import
from .basecase import TestBaseCase as base
class TestMeetup(base):
"""Testing the Meetup Endpoints with valid input."""
def setUp(self):
base.setUp(self)
def test_create_meetup(self):
"""Testing Creation of a Meetup."""
response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type,
)
response_data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertEqual(response_data["message"], "Meetup was created successfully.")
def test_fetching_all_meetups(self):
"""Testing Fetching of all meetups."""
post_response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type
)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(
post_response_data["message"], "Meetup was created successfully."
)
response = self.client.get("/api/v1/meetups/upcoming", content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_fetch_single_meetup(self):
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Fetching Single Question.
response = self.client.get('api/v1/meetups/{}'.format(post_response_data["data"]["id"]), content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_rsvp_to_meetup(self):
"""Test RSVPing to a meetup."""
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Posting RSVP.
response = self.client.post('/api/v1/meetups/{}/rsvps'.format(post_response_data["data"]["id"]), data=json.dumps(self.rsvp_payload), content_type=self.content_type)
self.assertEqual(response.status_code, 201)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v3 ResNet50 Model Defined in Keras."""
from tensorflow.keras.layers import UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet import ResNet50
from yolo3.models.layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions
def yolo3_resnet50_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 ResNet50 model CNN body in Keras."""
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
# f3 : 52 x 52 x 512
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_resnet50_body(inputs, num_anchors, num_classes):
'''Create YOLO_v3 Lite ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
# f3 : 52 x 52 x 512
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_spp_resnet50_body(inputs, num_anchors, num_classes):
'''Create YOLO_v3 Lite SPP ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
# f3 : 52 x 52 x 512
f3 = resnet50.get_layer('conv3_block4_out').output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def tiny_yolo3_resnet50_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
f1_channel_num = 1024
f2_channel_num = 512
y1, y2 = tiny_yolo3_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
def tiny_yolo3lite_resnet50_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 Lite ResNet50 model CNN body in keras.'''
resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)
print('backbone layers number: {}'.format(len(resnet50.layers)))
# input: 416 x 416 x 3
# conv5_block3_out: 13 x 13 x 2048
# conv4_block6_out: 26 x 26 x 1024
# conv3_block4_out: 52 x 52 x 512
# f1 :13 x 13 x 2048
f1 = resnet50.get_layer('conv5_block3_out').output
# f2: 26 x 26 x 1024
f2 = resnet50.get_layer('conv4_block6_out').output
f1_channel_num = 1024
f2_channel_num = 512
y1, y2 = tiny_yolo3lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
|
"""
AWR + SAC from demo experiment
"""
from railrl.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from railrl.demos.source.mdp_path_loader import MDPPathLoader
from railrl.launchers.experiments.ashvin.awr_sac_rl import experiment
import railrl.misc.hyperparameter as hyp
from railrl.launchers.arglauncher import run_variants
from railrl.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=5000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(5),
'trainer_kwargs.beta': [0.1, 0.3, 1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", "shared"],
'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
|
import numpy as np
from math import *
import pymultinest
import sys
sys.path.insert(0, '/home/kochenma/pysb')
from pysb.integrate import Solver
import csv
import datetime
import time as tm
from model_778 import model
from pysb.pathfinder import set_path
set_path('bng', '/home/kochenma/BioNetGen')
data_object = []
with open('earm_data.csv') as data_file:
reader = csv.reader(data_file)
line = list(reader)
for each in line:
data_object.append(each)
for i, each in enumerate(data_object):
if i > 0:
for j, item in enumerate(each):
data_object[i][j] = float(data_object[i][j])
data_object = data_object[1:]
time = []
for each in data_object:
time.append(float(each[0]))
model_solver = Solver(model, time, integrator='vode', integrator_options={'atol': 1e-12, 'rtol': 1e-12})
def prior(cube, ndim, nparams):
for k, every in enumerate(model.parameters):
if every.name[-3:] == '1kf':
cube[k] = cube[k]*4 - 4
if every.name[-3:] == '2kf':
cube[k] = cube[k]*4 - 8
if every.name[-3:] == '1kr':
cube[k] = cube[k]*4 - 4
if every.name[-3:] == '1kc':
cube[k] = cube[k]*4 - 1
postfixes = ['1kf', '2kf', '1kr', '1kc']
def loglike(cube, ndim, nparams):
point = []
cube_index = 0
for k, every in enumerate(model.parameters):
if every.name[-3:] in postfixes:
point.append(10**cube[cube_index])
cube_index += 1
else:
point.append(model.parameters[k].value)
model_solver.run(point)
failed = False
for every in model_solver.yobs:
for thing in every:
if thing <= -0.00000001 or np.isnan(thing):
failed = True
if failed:
return ['fail', -10000.0]
else:
parpc = model_solver.yobs[-1][6]/(model_solver.yobs[-1][1] + model_solver.yobs[-1][6])
if (parpc > 0.0) and (parpc < 1.00000001):
print log(parpc), point
return ['sim', log(parpc)]
else:
return ['fail', -10000.0]
n_params = 0
for m, lotsa in enumerate(model.parameters):
if lotsa.name[-3:] == '1kf':
n_params += 1
if lotsa.name[-3:] == '2kf':
n_params += 1
if lotsa.name[-3:] == '1kr':
n_params += 1
if lotsa.name[-3:] == '1kc':
n_params += 1
start_time = tm.clock()
counts = [0, 0]
pymultinest.run(loglike, prior, n_params, evidence_tolerance=0.0001, n_live_points=16000, log_zero=-1e3, sampling_efficiency=0.3, outputfiles_basename='/scratch/kochenma/log_casp_act/778/', resume = False, verbose = False, counts=counts)
print counts
print 'start time', start_time
print 'end time', tm.clock()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import random
import re
from collections import defaultdict
from typing import List, Optional, Dict, Tuple
from parlai.core.opt import Opt
from parlai.core.teachers import ParlAIDialogTeacher, create_task_agent_from_taskname
from parlai.tasks.convai2.agents import BothTeacher
from parlai.tasks.empathetic_dialogues.agents import EmpatheticDialoguesTeacher
from parlai.tasks.wizard_of_wikipedia.agents import WizardDialogKnowledgeTeacher
from parlai.utils.misc import warn_once
from parlai.utils.io import PathManager
from parlai.utils.concepts import split_concepts
from .build import build
##################################################
#### Teacher for the BlendedSkillTalk Dataset ####
##################################################
def raw_data_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'blended_skill_talk', dt + '.json')
def _processed_data_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'blended_skill_talk', dt + '.txt')
def _persona_list_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
return os.path.join(opt['datapath'], 'blended_skill_talk', 'persona_list.txt')
def _topic_to_persona_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
return os.path.join(
opt['datapath'], 'blended_skill_talk', 'topic_to_persona_list.txt'
)
def _cached_data_path(opt: Opt, experiencer_side_only: bool) -> str:
"""
Build the data if it doesn't exist.
See EDPersonaTopicifierTeacher in ParlAI v1.5.1 and earlier for the code to add
persona strings to the base EmpatheticDialogues dataset.
"""
build(opt)
dt = opt['datatype'].split(':')[0]
side_string = 'experiencer_only' if experiencer_side_only else 'both_sides'
return os.path.join(
opt['datapath'],
'blended_skill_talk',
f'ed_persona_topicifier__{dt}__{side_string}.json',
)
def safe_personas_path(opt: Opt) -> str:
# Build the data if it doesn't exist.
build(opt)
return os.path.join(opt['datapath'], 'blended_skill_talk', 'safe_personas.txt')
class BlendedSkillTalkTeacher(ParlAIDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['parlaidialogteacher_datafile'] = _processed_data_path(opt)
super().__init__(opt, shared)
class InteractiveTeacher(BlendedSkillTalkTeacher):
# Dummy class to add arguments for interactive world.
pass
class SelfchatTeacher(BlendedSkillTalkTeacher):
# Dummy class to add arguments for interactive world.
pass
class DefaultTeacher(BlendedSkillTalkTeacher):
pass
def create_agents(opt):
if not opt.get('interactive_task', False):
return create_task_agent_from_taskname(opt)
else:
# interactive task has no task agents (they are attached as user agents)
return []
################################################################################
## Teachers for adding ConvAI2 personas and WoW topics to existing datasets ##
################################################################################
class PersonaTopicifier:
def __init__(
self,
opt: Opt,
should_have_personas: bool = False,
should_have_topics: bool = False,
no_persona_is_error: bool = False,
):
self.utterance_to_persona_map = {}
self.should_have_personas = should_have_personas
self.should_have_topics = should_have_topics
self.no_persona_is_error = no_persona_is_error
# Throw an exception if a persona is not found for the input WoW topic
# this returns map of persona line str to WoW topic
self.personas_file_path = _persona_list_path(opt)
self.topic_to_persona_path = _topic_to_persona_path(opt)
(
self.wow_topics_to_persona_strings_map,
self.persona_strings_to_wow_topics_map,
) = self._setup_personas_to_wow_topics()
with PathManager.open(self.personas_file_path, 'r') as f:
self.personas = f.read().strip().split('||')
# There's an extra line at the end of the file which is ''
self.personas = [p for p in self.personas if p]
def _setup_personas_to_wow_topics(
self,
) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]:
persona_strings_to_topics = defaultdict(list)
topics_to_persona_strings = defaultdict(list)
with PathManager.open(self.topic_to_persona_path, 'r') as f:
for line in f:
match = re.fullmatch(r'([^[]+): (\[.+\])\n', line)
topic = match.group(1)
persona_strings = eval(match.group(2))
assert isinstance(persona_strings, list)
topics_to_persona_strings[topic] = persona_strings
for str_ in persona_strings:
persona_strings_to_topics[str_].append(topic)
warn_once(
f'FINISHED MAPPING personas to topics, got: {len(list(persona_strings_to_topics.keys()))} persona strings to map to topics.'
)
return topics_to_persona_strings, persona_strings_to_topics
def __calculate_word_overlap(self, a, b):
"""
Very rudimentary way to calculate word overlap.
"""
score = 0
tokens_a = a.split(' ')
tokens_a = [ta for ta in tokens_a if len(ta) >= 5]
for ta in tokens_a:
if ta in b:
score += 1
tokens_b = b.split(' ')
tokens_b = [tb for tb in tokens_b if len(tb) >= 5]
for tb in tokens_b:
if tb in a:
score += 1
return score
def __choose_persona_from_text(self, utt):
utt = utt.strip()
if utt not in self.utterance_to_persona_map:
best_word_overlap = 0
best_persona = None
for p in self.personas:
word_overlap = self.__calculate_word_overlap(utt, p)
if word_overlap >= best_word_overlap:
best_word_overlap = word_overlap
best_persona = p
if not best_persona:
raise Exception(
f'No persona found for utterance: \"{utt}\". This should not happen.'
)
self.utterance_to_persona_map[utt] = best_persona
# Should have a \n at the end of it already
return best_persona
return self.utterance_to_persona_map[utt]
def __choose_persona_from_topic(self, topic):
topic = topic.strip()
persona_strings = self.wow_topics_to_persona_strings_map[topic]
for p in persona_strings:
for persona in self.personas:
if p in persona:
return persona
if self.no_persona_is_error:
raise ValueError(f'ERROR: Found no persona for topic: {topic}.')
else:
warn_once(f'Found no persona for topic: {topic}. Returning first persona.')
return self.personas[0]
def __choose_topic(self, persona):
persona_lines = persona.strip().split('\n')
for p in persona_lines:
p_str = p.replace('your persona:', '')
p_str = p_str.strip()
if p_str in self.persona_strings_to_wow_topics_map:
topics = self.persona_strings_to_wow_topics_map[p_str]
topic = topics[0] + '\n'
return topic
for utt, topics in self.persona_strings_to_wow_topics_map.items():
utt_words = utt.split()
utt_words_long = [utt for utt in utt_words if len(utt) > 6]
for long_utt in utt_words_long:
if long_utt in persona:
return topics[0] + '\n'
return topics[0] + '\n'
def get_modified_text(self, text):
# Order should be <Persona> \n <Topic> \n <Utterance>
# Should be used for entry_idx == 0 only (for all first
# utterances only)
# has_neither = 'persona:' not in text and '\n' not in text
# has_wow_topic_only = 'persona:' not in text and '\n' in text
# has_persona_only = 'persona:' in text
has_neither = not self.should_have_personas and not self.should_have_topics
has_wow_topic_only = not self.should_have_personas and self.should_have_topics
has_persona_only = not self.should_have_topics and self.should_have_personas
if (self.should_have_personas and (has_neither or has_wow_topic_only)) or (
self.should_have_topics and (has_neither or has_persona_only)
):
raise Exception(
f'Malformed text: {text}, should_have_personas: {self.should_have_personas}, should_have_topics: {self.should_have_topics}, has_neither: {has_neither}, has_wow_topic_only: {has_wow_topic_only}, has_persona_only: {has_persona_only}'
)
if has_neither:
# Will occur with ED
persona = self.__choose_persona_from_text(text)
topic = self.__choose_topic(persona)
utt = text
elif has_wow_topic_only:
# Will occur with Wizard
parts = text.strip().split('\n')
if len(parts) > 1:
topic = parts[0] + '\n'
utt = parts[1]
persona = self.__choose_persona_from_topic(topic)
else:
# Only has a topic, no utterance
topic = parts[0] + '\n'
utt = ''
persona = self.__choose_persona_from_topic(topic)
elif has_persona_only:
# Will occur with Convai2
lines = text.strip().split('\n')
utt = lines[-1]
persona = ''.join(l + '\n' for l in lines[:-1])
topic = self.__choose_topic(persona)
else:
raise Exception(f'Unknown structure of utterance: {text}')
modified_utterance = persona + topic + utt
return modified_utterance
################################################################
## Generator of context for crowdsourcing BST conversations ##
################################################################
class ContextGenerator:
"""
Generates contexts shown to crowdsourced workers when collecting BST conversations.
This generator was used to generate the context information shown to workers at the
beginning of a conversation, when crowdsourcing the conversations that make up the
BST dataset.
"""
def __init__(self, opt, datatype: str = 'train', seed: Optional[int] = None):
"""
Initialize the context generator.
opt: only a 'datapath' key is required, to specify the ParlAI data folder
"""
if seed is not None:
self.rng = random.Random(seed)
else:
self.rng = random.Random()
convai2_opt = Opt({'datapath': opt['datapath'], 'datatype': datatype})
self.convai2_teacher = BothTeacher(convai2_opt)
ed_opt = Opt(
{
'datapath': opt['datapath'],
'datatype': datatype,
'train_experiencer_only': True,
}
)
# Specify train_experiencer_only = True because we want to ensure that the text
# will correspond to a Speaker utterance and the label to a Listener response
self.ed_teacher = EmpatheticDialoguesTeacher(ed_opt)
wow_opt = Opt({'datapath': opt['datapath'], 'datatype': datatype})
self.wow_teacher = WizardDialogKnowledgeTeacher(wow_opt)
self.topic_to_persona_path = _topic_to_persona_path(opt)
self.wow_topics_to_episode_idxes = self._setup_topics_to_episodes()
self.persona_strings_to_wow_topics = self._setup_personas_to_topics()
def get_context(self) -> dict:
"""
Get context information to be shown at the beginning of one conversation.
Values in return dict:
- context_dataset: the dataset (ConvAI2, EmpatheticDialogues, or Wizard of
Wikipedia) used to generate the context information.
- persona_1_strings, persona_2_strings: 2 persona strings each for the two
speakers, chosen randomly from the ConvAI2 dataset. If context_dataset ==
"wizard_of_wikipedia", these persona strings will be matched to the WoW
topic returned in the "additional_context" field.
- additional_context: provides additional bits of information to give context
for the speakers. If context_dataset == "empathetic_dialogues", this is a
situation from the start of an ED conversation. If context_dataset ==
"wizard_of_wikipedia", this is a topic from the WoW dataset that matches the
persona strings. If context_dataset == "convai2", this is None.
- person1_seed_utterance, person2_seed_utterance: two lines of a conversation
from the dataset specified by "context_dataset". They will be shown to the
speakers to "seed" the conversation, and the speakers continue from where
the lines left off.
"""
# Determine which dataset we will show context for
rand_value = self.rng.random()
if rand_value < 1 / 3:
context_dataset = 'convai2'
elif rand_value < 2 / 3:
context_dataset = 'empathetic_dialogues'
else:
context_dataset = 'wizard_of_wikipedia'
if context_dataset == 'convai2':
# Select episode
episode_idx = self.rng.randrange(self.convai2_teacher.num_episodes())
# Extract personas
persona_1_strings, persona_2_strings = self._extract_personas(episode_idx)
# Sample persona strings
selected_persona_1_strings = self.rng.sample(persona_1_strings, 2)
selected_persona_2_strings = self.rng.sample(persona_2_strings, 2)
# Select previous utterances
num_entries = len(self.convai2_teacher.data.data[episode_idx])
entry_idx = self.rng.randrange(1, num_entries)
# Don't select the first entry, which often doesn't include an apprentice
# utterance
chosen_entry = self.convai2_teacher.get(episode_idx, entry_idx=entry_idx)
person1_seed_utterance = chosen_entry['text']
assert len(chosen_entry['labels']) == 1
person2_seed_utterance = chosen_entry['labels'][0]
return {
'context_dataset': context_dataset,
'persona_1_strings': selected_persona_1_strings,
'persona_2_strings': selected_persona_2_strings,
'additional_context': None,
'person1_seed_utterance': person1_seed_utterance,
'person2_seed_utterance': person2_seed_utterance,
}
elif context_dataset == 'empathetic_dialogues':
# Select episode
persona_episode_idx = self.rng.randrange(
self.convai2_teacher.num_episodes()
)
# Extract personas
persona_1_strings, persona_2_strings = self._extract_personas(
persona_episode_idx
)
# Sample persona strings
selected_persona_1_strings = self.rng.sample(persona_1_strings, 2)
selected_persona_2_strings = self.rng.sample(persona_2_strings, 2)
# Select previous utterances
episode_idx = self.rng.randrange(self.ed_teacher.num_episodes())
entry_idx = 0 # We'll only use the first pair of utterances
entry = self.ed_teacher.get(episode_idx, entry_idx=entry_idx)
situation = entry['situation']
speaker_utterance = entry['text']
assert len(entry['labels']) == 1
listener_response = entry['labels'][0]
return {
'context_dataset': context_dataset,
'persona_1_strings': selected_persona_1_strings,
'persona_2_strings': selected_persona_2_strings,
'additional_context': situation,
'person1_seed_utterance': speaker_utterance,
'person2_seed_utterance': listener_response,
}
elif context_dataset == 'wizard_of_wikipedia':
# Pull different personas until you get a pair for which at least one
# sentence has a WoW topic bound to it
num_tries = 0
while True:
num_tries += 1
# Extract a random (matched) pair of personas
persona_episode_idx = self.rng.randrange(
self.convai2_teacher.num_episodes()
)
all_persona_strings = dict()
all_persona_strings[1], all_persona_strings[2] = self._extract_personas(
persona_episode_idx
)
# See if any of the persona strings have a matching WoW topic
matching_persona_string_idxes = []
for persona_idx, persona_strings in all_persona_strings.items():
for str_idx, str_ in enumerate(persona_strings):
wow_topics = self.persona_strings_to_wow_topics[str_]
if len(wow_topics) > 0:
matching_persona_string_idxes.append((persona_idx, str_idx))
if len(matching_persona_string_idxes) > 0:
break
print(
f'{num_tries:d} try/tries needed to find a pair of personas with an '
f'associated WoW topic.'
)
# Pick out the WoW topic and matching persona string
matching_persona_idx, matching_persona_string_idx = self.rng.sample(
matching_persona_string_idxes, k=1
)[0]
matching_persona_string = all_persona_strings[matching_persona_idx][
matching_persona_string_idx
]
wow_topic = self.rng.sample(
self.persona_strings_to_wow_topics[matching_persona_string], k=1
)[0]
# Sample persona strings, making sure that we keep the one connected to the
# WoW topic
if matching_persona_idx == 1:
remaining_persona_1_strings = [
str_
for str_ in all_persona_strings[1]
if str_ != matching_persona_string
]
selected_persona_1_strings = [
matching_persona_string,
self.rng.sample(remaining_persona_1_strings, k=1)[0],
]
self.rng.shuffle(selected_persona_1_strings)
selected_persona_2_strings = self.rng.sample(all_persona_strings[2], 2)
else:
selected_persona_1_strings = self.rng.sample(all_persona_strings[1], 2)
remaining_persona_2_strings = [
str_
for str_ in all_persona_strings[2]
if str_ != matching_persona_string
]
selected_persona_2_strings = [
matching_persona_string,
self.rng.sample(remaining_persona_2_strings, k=1)[0],
]
self.rng.shuffle(selected_persona_2_strings)
# Sample WoW previous utterances, given the topic
episode_idx = self.rng.sample(
self.wow_topics_to_episode_idxes[wow_topic], k=1
)[0]
entry_idx = 1
# Select the second entry, which (unlike the first entry) will always have
# two valid utterances and which will not usually be so far along in the
# conversation that the new Turkers will be confused
entry = self.wow_teacher.get(episode_idx, entry_idx=entry_idx)
apprentice_utterance = entry['text']
assert len(entry['labels']) == 1
wizard_utterance = entry['labels'][0]
return {
'context_dataset': context_dataset,
'persona_1_strings': selected_persona_1_strings,
'persona_2_strings': selected_persona_2_strings,
'additional_context': wow_topic,
'person1_seed_utterance': apprentice_utterance,
'person2_seed_utterance': wizard_utterance,
}
def _setup_personas_to_topics(self) -> Dict[str, List[str]]:
"""
Create a map from ConvAI2 personas to WoW topics that they correspond to.
"""
print('Starting to map personas to topics.')
persona_strings_to_topics = defaultdict(list)
with PathManager.open(self.topic_to_persona_path, 'r') as f:
for line in f:
match = re.fullmatch(r'([^[]+): (\[.+\])\n', line)
topic = match.group(1)
if topic not in self.wow_topics_to_episode_idxes:
continue
persona_strings = eval(match.group(2))
assert isinstance(persona_strings, list)
for str_ in persona_strings:
persona_strings_to_topics[str_].append(topic)
print('Finished mapping personas to topics.')
return persona_strings_to_topics
def _setup_topics_to_episodes(self) -> Dict[str, List[int]]:
"""
Create a map from WoW topics to the indices of the WoW episodes that use them.
"""
print('Starting to map topics to episodes.')
topics_to_episodes = defaultdict(list)
for episode_idx in range(self.wow_teacher.num_episodes()):
topic = self.wow_teacher.get(episode_idx, entry_idx=0)['chosen_topic']
topics_to_episodes[topic].append(episode_idx)
print('Finished mapping topics to episodes.')
return topics_to_episodes
def _extract_personas(self, episode_idx: str) -> Tuple[List[str], List[str]]:
"""
For the given ConvAI2 conversation, return strings of both speakers' personas.
"""
first_entry = self.convai2_teacher.get(episode_idx, entry_idx=0)
first_text_strings = first_entry['text'].split('\n')
persona_1_strings = []
persona_2_strings = []
for str_ in first_text_strings[:-1]: # The last string is the first utterance
if str_.startswith('your persona: '): # Here, "you" are Person 2
persona_2_strings.append(str_[len('your persona: ') :])
elif str_.startswith("partner's persona: "):
persona_1_strings.append(str_[len("partner's persona: ") :])
else:
raise ValueError('Persona string cannot be parsed!')
return persona_1_strings, persona_2_strings
import parlai.utils.logging as logging
from parlai.utils.misc import str_to_msg
TOKEN_KNOWLEDGE = '__knowledge__'
TOKEN_END_KNOWLEDGE = '__endknowledge__'
class ConceptsTeacher(BlendedSkillTalkTeacher):
def _setup_data(self, path):
logging.info(f"Loading ParlAI text data: {path}")
self.episodes = []
self.num_exs = 0
eps = []
with PathManager.open(path, newline='\n', encoding='utf-8') as read:
for line_no, line in enumerate(read, 1):
msg = str_to_msg(line.rstrip('\n'))
if msg and 'eval_labels' in msg:
raise ValueError(
f"It looks like you've written eval_labels as a key in your "
f"data file. This is not appropriate; labels will be converted "
f"for you automatically. This is happening on Line {line_no} "
f"in {path}. The line is:\n\t{line}"
)
if msg and 'text' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "text" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'labels' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "labels" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'concepts' not in msg:
raise ValueError(
f'BlendedSkillTalkConceptsTeacher requires a "concepts" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg:
self.num_exs += 1
# concepts = .replace("|",". ")
concepts = msg["concepts"]
if self.opt.get("dict_tokenizer", "") == "re":
concepts = split_concepts(concepts)
text = msg['text'] + concepts
msg.force_set('text',text)
del msg['concepts']
eps.append(msg)
if msg.get('episode_done', False):
self.episodes.append(eps)
eps = []
if len(eps) > 0:
# add last episode
eps[-1].force_set('episode_done', True)
self.episodes.append(eps)
if len(self.episodes) == 1 and line_no > 100:
logging.error(
f'The data in {path} looks like one very long episode. If this '
f'is intentional, you may ignore this, but you MAY have a bug in '
f'your data.'
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-05-19 15:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('areas', '0001_initial'),
('users', '0002_user_email_active'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('title', models.CharField(max_length=20, verbose_name='地址名称')),
('receiver', models.CharField(max_length=20, verbose_name='收货人')),
('place', models.CharField(max_length=50, verbose_name='地址')),
('mobile', models.CharField(max_length=11, verbose_name='手机')),
('tel', models.CharField(blank=True, default='', max_length=20, null=True, verbose_name='固定电话')),
('email', models.CharField(blank=True, default='', max_length=30, null=True, verbose_name='电子邮箱')),
('is_deleted', models.BooleanField(default=False, verbose_name='逻辑删除')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='city_addresses', to='areas.Area', verbose_name='市')),
('district', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='district_addresses', to='areas.Area', verbose_name='区')),
('province', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='province_addresses', to='areas.Area', verbose_name='省')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to=settings.AUTH_USER_MODEL, verbose_name='省')),
],
options={
'verbose_name': '用户地址',
'ordering': ['-update_time'],
'db_table': 'tb_address',
'verbose_name_plural': '用户地址',
},
),
migrations.AddField(
model_name='user',
name='default_address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='users', to='users.Address', verbose_name='默认地址'),
),
]
|
#! /user/bin/env python
# _*_ coding: utf-8 _*_
# __author__ = "王顶"
# Email: 408542507@qq.com
"""
循环切片实现
需求总是改变,一会是4层金字塔,一会儿是5层金子塔
到底要几层,改一下 while 循环的条件变量就行了
"""
level = 0
line = ''
stars = '*******************************************'
spaces = ' '
while level < 4:
n = level * 2 + 1 # n 代表* 的个数
m = 4 - level # m 代表空格个数
line = spaces[:m] + stars[:n]
print(line)
level = level + 1
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast():
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
shape = (4, 5, 2, 3, 4, 5, 6)
x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 5, 7, 4, 5, 6)
x_np = np.arange(20).reshape((4, 5, 1)).astype(np.int32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (8, 5, 7, 4, 5, 6)
x_np = np.arange(24).reshape((1, 4, 1, 6)).astype(np.bool)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 4, 5, 2, 3, 4, 5, 7)
x_np = np.random.rand(2, 3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
shape = (4, 5)
x1_np = np.ones((1,)).astype(np.bool_)
output = P.BroadcastTo(shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast_dyn_init():
"""
Test running the op with -1's in the init shape to support varied inputs.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
ms_shape = (-1, 4, 5, 6)
np_shape = (3, 4, 5, 6)
x_np = np.random.rand(3, 1, 5, 1).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x_np))
expect = np.broadcast_to(x_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
x1_np = np.random.rand(3, 1, 5, 1).astype(np.float16)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
ms_shape = (2, 3, -1, 5)
np_shape = (2, 3, 4, 5)
x1_np = np.random.rand(4, 5).astype(np.float32)
output = P.BroadcastTo(ms_shape)(Tensor(x1_np))
expect = np.broadcast_to(x1_np, np_shape)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_broadcast_dyn_invalid_init():
"""
Test running the op with -1's in the init shape in incorrect positions.
Expected to fail.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
ms_shape = (2, -1, 4, 5)
x_np = np.random.rand(4, 5).astype(np.float32)
with pytest.raises(ValueError):
P.BroadcastTo(ms_shape)(Tensor(x_np))
|
# coding: utf-8
import pprint
import re
import six
class UpdateApplicationEndpointRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'enabled': 'str',
'user_data': 'str'
}
attribute_map = {
'enabled': 'enabled',
'user_data': 'user_data'
}
def __init__(self, enabled=None, user_data=None):
"""UpdateApplicationEndpointRequestBody - a model defined in huaweicloud sdk"""
self._enabled = None
self._user_data = None
self.discriminator = None
if enabled is not None:
self.enabled = enabled
if user_data is not None:
self.user_data = user_data
@property
def enabled(self):
"""Gets the enabled of this UpdateApplicationEndpointRequestBody.
设备是否可用,值为true或false字符串。
:return: The enabled of this UpdateApplicationEndpointRequestBody.
:rtype: str
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this UpdateApplicationEndpointRequestBody.
设备是否可用,值为true或false字符串。
:param enabled: The enabled of this UpdateApplicationEndpointRequestBody.
:type: str
"""
self._enabled = enabled
@property
def user_data(self):
"""Gets the user_data of this UpdateApplicationEndpointRequestBody.
用户自定义数据,最大长度支持UTF-8编码后2048字节。
:return: The user_data of this UpdateApplicationEndpointRequestBody.
:rtype: str
"""
return self._user_data
@user_data.setter
def user_data(self, user_data):
"""Sets the user_data of this UpdateApplicationEndpointRequestBody.
用户自定义数据,最大长度支持UTF-8编码后2048字节。
:param user_data: The user_data of this UpdateApplicationEndpointRequestBody.
:type: str
"""
self._user_data = user_data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateApplicationEndpointRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!opt/python-3.6/bin/python3
import unittest
import sys
sys.path.append("../src")
from info_ordering import order_info
class TestInfoOrdering(unittest.TestCase):
def test_order_info(self):
# TODO: fix to actually test
value = 5
self.assertEqual(value, 5)
if __name__ == '__main__':
unittest.main()
|
"""Semantic analysis of TypedDict definitions."""
from mypy.backports import OrderedDict
from typing import Optional, List, Set, Tuple
from typing_extensions import Final
from mypy.types import Type, AnyType, TypeOfAny, TypedDictType, TPDICT_NAMES
from mypy.nodes import (
CallExpr, TypedDictExpr, Expression, NameExpr, Context, StrExpr, BytesExpr, UnicodeExpr,
ClassDef, RefExpr, TypeInfo, AssignmentStmt, PassStmt, ExpressionStmt, EllipsisExpr, TempNode,
DictExpr, ARG_POS, ARG_NAMED
)
from mypy.semanal_shared import SemanticAnalyzerInterface
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type
from mypy.messages import MessageBuilder
from mypy.errorcodes import ErrorCode
from mypy import errorcodes as codes
TPDICT_CLASS_ERROR: Final = (
"Invalid statement in TypedDict definition; " 'expected "field_name: field_type"'
)
class TypedDictAnalyzer:
def __init__(self,
options: Options,
api: SemanticAnalyzerInterface,
msg: MessageBuilder) -> None:
self.options = options
self.api = api
self.msg = msg
def analyze_typeddict_classdef(self, defn: ClassDef) -> Tuple[bool, Optional[TypeInfo]]:
"""Analyze a class that may define a TypedDict.
Assume that base classes have been analyzed already.
Note: Unlike normal classes, we won't create a TypeInfo until
the whole definition of the TypeDict (including the body and all
key names and types) is complete. This is mostly because we
store the corresponding TypedDictType in the TypeInfo.
Return (is this a TypedDict, new TypeInfo). Specifics:
* If we couldn't finish due to incomplete reference anywhere in
the definition, return (True, None).
* If this is not a TypedDict, return (False, None).
"""
possible = False
for base_expr in defn.base_type_exprs:
if isinstance(base_expr, RefExpr):
self.api.accept(base_expr)
if base_expr.fullname in TPDICT_NAMES or self.is_typeddict(base_expr):
possible = True
if possible:
if (len(defn.base_type_exprs) == 1 and
isinstance(defn.base_type_exprs[0], RefExpr) and
defn.base_type_exprs[0].fullname in TPDICT_NAMES):
# Building a new TypedDict
fields, types, required_keys = self.analyze_typeddict_classdef_fields(defn)
if fields is None:
return True, None # Defer
info = self.build_typeddict_typeinfo(defn.name, fields, types, required_keys,
defn.line)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
return True, info
# Extending/merging existing TypedDicts
if any(not isinstance(expr, RefExpr) or
expr.fullname not in TPDICT_NAMES and
not self.is_typeddict(expr) for expr in defn.base_type_exprs):
self.fail("All bases of a new TypedDict must be TypedDict types", defn)
typeddict_bases = list(filter(self.is_typeddict, defn.base_type_exprs))
keys: List[str] = []
types = []
required_keys = set()
# Iterate over bases in reverse order so that leftmost base class' keys take precedence
for base in reversed(typeddict_bases):
assert isinstance(base, RefExpr)
assert isinstance(base.node, TypeInfo)
assert isinstance(base.node.typeddict_type, TypedDictType)
base_typed_dict = base.node.typeddict_type
base_items = base_typed_dict.items
valid_items = base_items.copy()
for key in base_items:
if key in keys:
self.fail('Overwriting TypedDict field "{}" while merging'
.format(key), defn)
keys.extend(valid_items.keys())
types.extend(valid_items.values())
required_keys.update(base_typed_dict.required_keys)
new_keys, new_types, new_required_keys = self.analyze_typeddict_classdef_fields(defn,
keys)
if new_keys is None:
return True, None # Defer
keys.extend(new_keys)
types.extend(new_types)
required_keys.update(new_required_keys)
info = self.build_typeddict_typeinfo(defn.name, keys, types, required_keys, defn.line)
defn.analyzed = TypedDictExpr(info)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
return True, info
return False, None
def analyze_typeddict_classdef_fields(
self,
defn: ClassDef,
oldfields: Optional[List[str]] = None) -> Tuple[Optional[List[str]],
List[Type],
Set[str]]:
"""Analyze fields defined in a TypedDict class definition.
This doesn't consider inherited fields (if any). Also consider totality,
if given.
Return tuple with these items:
* List of keys (or None if found an incomplete reference --> deferral)
* List of types for each key
* Set of required keys
"""
fields: List[str] = []
types: List[Type] = []
for stmt in defn.defs.body:
if not isinstance(stmt, AssignmentStmt):
# Still allow pass or ... (for empty TypedDict's).
if (not isinstance(stmt, PassStmt) and
not (isinstance(stmt, ExpressionStmt) and
isinstance(stmt.expr, (EllipsisExpr, StrExpr)))):
self.fail(TPDICT_CLASS_ERROR, stmt)
elif len(stmt.lvalues) > 1 or not isinstance(stmt.lvalues[0], NameExpr):
# An assignment, but an invalid one.
self.fail(TPDICT_CLASS_ERROR, stmt)
else:
name = stmt.lvalues[0].name
if name in (oldfields or []):
self.fail('Overwriting TypedDict field "{}" while extending'
.format(name), stmt)
if name in fields:
self.fail('Duplicate TypedDict key "{}"'.format(name), stmt)
continue
# Append name and type in this case...
fields.append(name)
if stmt.type is None:
types.append(AnyType(TypeOfAny.unannotated))
else:
analyzed = self.api.anal_type(stmt.type)
if analyzed is None:
return None, [], set() # Need to defer
types.append(analyzed)
# ...despite possible minor failures that allow further analyzis.
if stmt.type is None or hasattr(stmt, 'new_syntax') and not stmt.new_syntax:
self.fail(TPDICT_CLASS_ERROR, stmt)
elif not isinstance(stmt.rvalue, TempNode):
# x: int assigns rvalue to TempNode(AnyType())
self.fail('Right hand side values are not supported in TypedDict', stmt)
total: Optional[bool] = True
if 'total' in defn.keywords:
total = self.api.parse_bool(defn.keywords['total'])
if total is None:
self.fail('Value of "total" must be True or False', defn)
total = True
required_keys = set(fields) if total else set()
return fields, types, required_keys
def check_typeddict(self,
node: Expression,
var_name: Optional[str],
is_func_scope: bool) -> Tuple[bool, Optional[TypeInfo]]:
"""Check if a call defines a TypedDict.
The optional var_name argument is the name of the variable to
which this is assigned, if any.
Return a pair (is it a typed dict, corresponding TypeInfo).
If the definition is invalid but looks like a TypedDict,
report errors but return (some) TypeInfo. If some type is not ready,
return (True, None).
"""
if not isinstance(node, CallExpr):
return False, None
call = node
callee = call.callee
if not isinstance(callee, RefExpr):
return False, None
fullname = callee.fullname
if fullname not in TPDICT_NAMES:
return False, None
res = self.parse_typeddict_args(call)
if res is None:
# This is a valid typed dict, but some type is not ready.
# The caller should defer this until next iteration.
return True, None
name, items, types, total, ok = res
if not ok:
# Error. Construct dummy return value.
info = self.build_typeddict_typeinfo('TypedDict', [], [], set(), call.line)
else:
if var_name is not None and name != var_name:
self.fail(
'First argument "{}" to TypedDict() does not match variable name "{}"'.format(
name, var_name), node, code=codes.NAME_MATCH)
if name != var_name or is_func_scope:
# Give it a unique name derived from the line number.
name += '@' + str(call.line)
required_keys = set(items) if total else set()
info = self.build_typeddict_typeinfo(name, items, types, required_keys, call.line)
info.line = node.line
# Store generated TypeInfo under both names, see semanal_namedtuple for more details.
if name != var_name or is_func_scope:
self.api.add_symbol_skip_local(name, info)
if var_name:
self.api.add_symbol(var_name, info, node)
call.analyzed = TypedDictExpr(info)
call.analyzed.set_line(call.line, call.column)
return True, info
def parse_typeddict_args(
self, call: CallExpr) -> Optional[Tuple[str, List[str], List[Type], bool, bool]]:
"""Parse typed dict call expression.
Return names, types, totality, was there an error during parsing.
If some type is not ready, return None.
"""
# TODO: Share code with check_argument_count in checkexpr.py?
args = call.args
if len(args) < 2:
return self.fail_typeddict_arg("Too few arguments for TypedDict()", call)
if len(args) > 3:
return self.fail_typeddict_arg("Too many arguments for TypedDict()", call)
# TODO: Support keyword arguments
if call.arg_kinds not in ([ARG_POS, ARG_POS], [ARG_POS, ARG_POS, ARG_NAMED]):
return self.fail_typeddict_arg("Unexpected arguments to TypedDict()", call)
if len(args) == 3 and call.arg_names[2] != 'total':
return self.fail_typeddict_arg(
'Unexpected keyword argument "{}" for "TypedDict"'.format(call.arg_names[2]), call)
if not isinstance(args[0], (StrExpr, BytesExpr, UnicodeExpr)):
return self.fail_typeddict_arg(
"TypedDict() expects a string literal as the first argument", call)
if not isinstance(args[1], DictExpr):
return self.fail_typeddict_arg(
"TypedDict() expects a dictionary literal as the second argument", call)
total: Optional[bool] = True
if len(args) == 3:
total = self.api.parse_bool(call.args[2])
if total is None:
return self.fail_typeddict_arg(
'TypedDict() "total" argument must be True or False', call)
dictexpr = args[1]
res = self.parse_typeddict_fields_with_types(dictexpr.items, call)
if res is None:
# One of the types is not ready, defer.
return None
items, types, ok = res
for t in types:
check_for_explicit_any(t, self.options, self.api.is_typeshed_stub_file, self.msg,
context=call)
if self.options.disallow_any_unimported:
for t in types:
if has_any_from_unimported_type(t):
self.msg.unimported_type_becomes_any("Type of a TypedDict key", t, dictexpr)
assert total is not None
return args[0].value, items, types, total, ok
def parse_typeddict_fields_with_types(
self,
dict_items: List[Tuple[Optional[Expression], Expression]],
context: Context) -> Optional[Tuple[List[str], List[Type], bool]]:
"""Parse typed dict items passed as pairs (name expression, type expression).
Return names, types, was there an error. If some type is not ready, return None.
"""
seen_keys = set()
items: List[str] = []
types: List[Type] = []
for (field_name_expr, field_type_expr) in dict_items:
if isinstance(field_name_expr, (StrExpr, BytesExpr, UnicodeExpr)):
key = field_name_expr.value
items.append(key)
if key in seen_keys:
self.fail('Duplicate TypedDict key "{}"'.format(key), field_name_expr)
seen_keys.add(key)
else:
name_context = field_name_expr or field_type_expr
self.fail_typeddict_arg("Invalid TypedDict() field name", name_context)
return [], [], False
try:
type = expr_to_unanalyzed_type(field_type_expr, self.options,
self.api.is_stub_file)
except TypeTranslationError:
self.fail_typeddict_arg('Invalid field type', field_type_expr)
return [], [], False
analyzed = self.api.anal_type(type)
if analyzed is None:
return None
types.append(analyzed)
return items, types, True
def fail_typeddict_arg(self, message: str,
context: Context) -> Tuple[str, List[str], List[Type], bool, bool]:
self.fail(message, context)
return '', [], [], True, False
def build_typeddict_typeinfo(self, name: str, items: List[str],
types: List[Type],
required_keys: Set[str],
line: int) -> TypeInfo:
# Prefer typing then typing_extensions if available.
fallback = (self.api.named_type_or_none('typing._TypedDict', []) or
self.api.named_type_or_none('typing_extensions._TypedDict', []) or
self.api.named_type_or_none('mypy_extensions._TypedDict', []))
assert fallback is not None
info = self.api.basic_new_typeinfo(name, fallback, line)
info.typeddict_type = TypedDictType(OrderedDict(zip(items, types)), required_keys,
fallback)
return info
# Helpers
def is_typeddict(self, expr: Expression) -> bool:
return (isinstance(expr, RefExpr) and isinstance(expr.node, TypeInfo) and
expr.node.typeddict_type is not None)
def fail(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None:
self.api.fail(msg, ctx, code=code)
|
from bs4 import BeautifulSoup
import requests
import os
class App:
def __init__(self):
self.userlist = []
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"}
self.page = 1
os.system("title "+"THT IHBAR OTOMASYONU")
os.system("color F")
self.hashUser = input("'xf_user' Bilgisini giriniz: ").strip()
self.hashTfaTrust = input("'xf_tfa_trust' Bilgisini giriniz: ").strip()
self.cookies = {
'xf_user':f'{self.hashUser}',
'xf_tfa_trust':f'{self.hashTfaTrust}'
}
self.Transactions()
def ControlAccount(self):
request = requests.get("https://www.turkhackteam.org/uye/kaptantr.744109/", cookies=self.cookies, headers = self.headers)
controltext = "Giriş yap"
html = request.text
if controltext in html:
return "Giris Yapılmadı"
else:
return"Giriş Yapıldı"
def Scarping(self):
request = requests.get("https://www.turkhackteam.org/reports/closed?page="+ str(self.page), cookies=self.cookies, headers=self.headers).text
parser = BeautifulSoup(request, 'html.parser')
urls = parser.findAll("a", {"class": "structItem-title"},href=True)
for url in urls:
file = open("rapor.txt","a",encoding='utf-8')
file.write("*"*40)
file.write("\n")
reportedLink = "https://www.turkhackteam.org"+url["href"]
request = requests.get(reportedLink, cookies=self.cookies, headers=self.headers).text
contentParser = BeautifulSoup(request, 'html.parser')
content = contentParser.find_all("header",{"class":"message-attribution message-attribution--plain"})
for item in content:
userLink = item.find('a')["href"]
userLink = "https://www.turkhackteam.org"+userLink
userSituation = item.find("span", {"class": "label label--accent"})
userSituation = userSituation is None
userName = item.find('h4',{"class":"attribution"}).text
userSituation ={True: "İhbar Yapan", False: "İhbar Eden"} [userSituation]
text = f"{userLink} // {userName} // ({userSituation})"
file.write(reportedLink)
file.write("\n")
file.write(text)
file.write("\n")
file.write("-"*20)
file.write("\n")
file.close()
def Transactions(self):
print("""
///////////////////////////////////////////
// //
// THT Ihbar Otomasyonu //
// 1.0 //
// //
// Created By //
// Ar-Ge Team //
///////////////////////////////////////////
""")
if self.ControlAccount() == "Giris Yapılmadı":
print("Giriş Yapılamadı. Çıkış yapmak için lütfen bir tuşa basınız.")
input()
exit()
else:
print(f"Login Control: {self.ControlAccount()}")
print("İşlem Başladı, Lütfen Bekleyiniz")
self.Scarping()
print("İşlem Tamamlandı, Çıkış Yapmak İçin Bir tuşa Basınız.")
input()
if __name__ == '__main__':
main = App()
|
"""
Diagnostic:
Diagnostic to produce images of the profile over time from a cube.
These plost show cube value (ie temperature) on the x-axis, and depth/height
on the y axis. The colour scale is the annual mean of the cube data.
Note that this diagnostic assumes that the preprocessors do the bulk of the
hard work, and that the cube received by this diagnostic (via the settings.yml
and metadata.yml files) has a time component, and depth component, but no
latitude or longitude coordinates.
An approproate preprocessor for a 3D+time field would be:
preprocessors:
prep_profile:
extract_volume:
long1: 0.
long2: 20.
lat1: -30.
lat2: 30.
z_min: 0.
z_max: 3000.
average_region:
coord1: longitude
coord2: latitude
This tool is part of the ocean diagnostic tools package in the ESMValTool.
Author: Lee de Mora (PML)
ledm@pml.ac.uk
"""
import logging
import os
import sys
import matplotlib
matplotlib.use('Agg') # noqa
import matplotlib.pyplot as plt
import iris
import iris.quickplot as qplt
import diagnostic_tools as diagtools
from esmvaltool.diag_scripts.shared import run_diagnostic
# This part sends debug statements to stdout
logger = logging.getLogger(os.path.basename(__file__))
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
def determine_profiles_str(cube):
"""
Determine a string from the cube, to describe the profile.
Used in image titles, descriptions and filenames.
"""
options = ['latitude', 'longitude']
for option in options:
coord = cube.coord(option)
if len(coord.points) > 1:
continue
value = coord.points.mean()
if option == 'latitude':
return str(value) + ' N'
if option == 'longitude':
if value > 180.:
return str(value - 360.) + ' W'
return str(value) + ' E'
return ''
def make_profiles_plots(
cfg,
metadata,
filename,
):
"""
Make a simple profile plot for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
# Make annual means from:
cube = cube.aggregated_by('year', iris.analysis.MEAN)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
#
times = cube.coord('time')
times_float = diagtools.timecoord_to_float(times)
time_0 = times_float[0]
cmap = plt.cm.get_cmap('jet')
plot_details = {}
for time_index, time in enumerate(times_float):
color = cmap((time - time_0) / (times_float[-1] - time_0))
qplt.plot(cube[time_index, :], cube[time_index, :].coord('depth'),
c=color)
plot_details[time_index] = {'c': color, 'ls': '-', 'lw': 1,
'label': str(int(time))}
# Add title to plot
title = ' '.join([
metadata['dataset'],
metadata['long_name'],
])
plt.title(title)
# Add Legend outside right.
diagtools.add_legend_outside_right(plot_details, plt.gca())
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# Determine image filename:
if multi_model:
path = diagtools.folder(
cfg['plot_dir']) + os.path.basename(filename).replace(
'.nc', '_profile' + image_extention)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix='profile' + image_extention,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def main(cfg):
"""
Load the config file, and send it to the plot maker.
The cfg is the opened global config.
"""
for index, metadata_filename in enumerate(cfg['input_files']):
logger.info(
'metadata filename:\t%s',
metadata_filename
)
metadatas = diagtools.get_input_files(cfg, index=index)
for filename in sorted(metadatas.keys()):
logger.info('-----------------')
logger.info(
'model filenames:\t%s',
filename,
)
######
# Time series of individual model
make_profiles_plots(cfg, metadatas[filename], filename)
logger.info('Success')
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
|
import unittest
import blitzml
import numpy as np
from common import captured_output
class TestProblemOptions(unittest.TestCase):
def setUp(self):
A = np.arange(20).reshape(5, 4)
b = np.arange(5).astype(np.float64)
self.prob = blitzml.LassoProblem(A, b)
def tearDown(self):
del self.prob
def test_min_time(self):
self.assertLessEqual(self.prob._min_time, 0.)
self.prob._min_time = 2.0
self.assertEqual(self.prob._min_time, 2.0)
def test_max_time(self):
self.assertGreaterEqual(self.prob._max_time, 3600.)
self.prob._max_time = 5.0
self.assertEqual(self.prob._max_time, 5.0)
def test_max_iterations(self):
self.assertGreaterEqual(self.prob._max_iterations, 100)
self.prob._max_iterations = 10
self.assertEqual(self.prob._max_iterations, 10)
def test_tolerance(self):
self.assertGreater(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.
self.assertEqual(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.1
self.assertEqual(self.prob._stopping_tolerance, 0.1)
def test_verbose(self):
self.assertEqual(self.prob._verbose, False)
self.prob._verbose = True
self.assertEqual(self.prob._verbose, True)
def test_use_screening(self):
self.assertEqual(self.prob._use_screening, True)
self.prob._use_screening = False
self.assertEqual(self.prob._use_screening, False)
def test_use_working_sets(self):
self.assertEqual(self.prob._use_working_sets, True)
self.prob._use_working_sets = False
self.assertEqual(self.prob._use_working_sets, False)
def test_suppress_warnings(self):
bad_log_dir = "path/to/bad_log/dir/zxc8aj3n"
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertIn("Warning", out[0])
blitzml.suppress_warnings()
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertNotIn("Warning", out[0])
blitzml.unsuppress_warnings()
|
import nltk
from langdetect import detect
import csv
class Tokenize:
""" Text tokenizer """
def __init__(self):
""" Default constructor """
self.language = "en"
self.workDirectory = "/run/media/jf/Datos/Tourist Text Mining/datasets/colombia_en/"
self.tagFilename = "tags_en.csv"
self.wfFilename = "words_freq_en.csv"
self.structFilename = "structure_en.csv"
# http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
self.tagCategories_en = {
'Adjective' : ['ADJ', 'JJ', 'JJR', 'JJS'],
'Adverb' : ['ADV', 'RB', 'RBR', 'RBS', 'WRB'],
'Conjunction' : ['CONJ', 'CC'],
'Determiner' : ['DET', 'DT', 'EX', 'PDT', 'WDT'],
'Noun' : ['NOUN', 'NN', 'NNP', 'NNPS', 'NNS'],
'Numeral' : ['NUM', 'CD'],
'Particle' : ['PRT', 'POS', 'RP', 'TO'],
'Preposition' : ['ADP', 'IN'],
'Pronoun' : ['PRON', 'PRP', 'PRP$', 'WP', 'WP$'],
'Punctuation' : ['.', '#', '$', "''", '”', '``', ',', '.', ':', "''", '(', ')', '-LRB-', '-RRB-'],
'Verb' : ['VERB', 'MD', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'],
'X' : ['X', 'FW', 'LS', 'SYM', 'UH'],
}
self.reviews = []
self.tokens = []
self.tags = []
self.entities = []
self.other = []
def getCategory(self, tag):
""" Get the tag's category """
for cat in self.tagCategories_en:
if (tag in self.tagCategories_en[cat]):
return(cat)
return("")
def tokenizing(self):
""" Text tokenizer """
self.tokens = []
self.tags = []
self.entities = []
self.other = []
for review in self.reviews:
try:
if (detect(review) == self.language):
token = nltk.word_tokenize(review)
tag = nltk.pos_tag(token)
entity = nltk.chunk.ne_chunk(tag)
self.tokens.append(token)
self.tags.append(tag)
self.entities.append(entity)
else :
self.other.append(review)
except Exception as e:
continue
with open(self.workDirectory + self.tagFilename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for tag in self.tags:
for value in tag:
writer.writerow(value)
def tagFrequencies(self):
""" Tag Frequencies """
fr = []
for tag in self.tags:
for key, value in tag:
found = False
for i in range(0, len(fr)):
if (fr[i][0] == value):
fr[i][1] += 1
found = True
break
if not found:
fr.append([value, 1])
def wordFrequencies(self):
""" Word Frequencies """
wd = []
for tag in self.tags:
for key, value in tag:
found = False
for i in range(0, len(wd)):
if (wd[i][0].lower() == key.lower()):
wd[i][1] += 1
found = True
break
if not found:
wd.append([key, 1])
with open(self.workDirectory + self.wfFilename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for w in wd:
writer.writerow(w)
def wordCategory(self):
""" Word - category """
cats = []
for tag in self.tags:
for key, value in tag:
cats.append([key, self.getCategory(value)])
for cat in self.tagCategories_en:
with open(self.workDirectory + "_" + cat + '.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for i in cats:
if (i[1] == cat):
writer.writerow(i)
def getRules(self):
""" Get rules """
rules = []
for tag in self.tags:
s = ""
for w, t in tag:
s += self.getCategory(t) + " "
if (t == '.' or t == ','):
rules.append(s)
s = ""
if (len(s) > 0):
rules.append(s)
with open(self.workDirectory + self.structFilename, 'w') as csvfile:
for rule in rules:
csvfile.write("%s\n" % rule)
#from Tokenize import Tokenize
#tk = Tokenize()
#tk.reviews = reviews
#tk.language = "es"
#tk.workDirectory = "/run/media/jf/Datos/Tourist Text Mining/datasets/colombia_es/"
#tk.tagFilename = "location_tags_es.csv"
#tk.wfFilename = "location_words_freq_es.csv"
#tk.structFilename = "location_structure_es.csv"
#tk.tokenizing()
|
#! /usr/bin/python
from molmod.units import *
from yaff import *
import h5py, numpy as np
#Setting up system and force field
system = System.from_file('system.chk')
ff = ForceField.generate(system, 'pars.txt', rcut=15.0*angstrom, alpha_scale=3.2, gcut_scale=1.5, smooth_ei=True)
#Setting up output
f = h5py.File('output.h5', mode='w')
hdf5 = HDF5Writer(f, step=1)
r = h5py.File('restart.h5', mode='w')
restart = RestartWriter(r, step=10000)
hooks = [hdf5, restart]
#Setting up simulation
energy = ff.compute()
system.to_hdf5(f)
f['system/energy'] = energy
|
import os
import numpy as np
import pandas as pd
os.environ['MKL_THREADING_LAYER'] = 'GNU'
# df = pd.DataFrame(columns=['multiprune', 'headstr', 'pluslayer', 'plushead', 'acc1'])
# df.to_csv("multiprune_plusone.csv",index=False)
prevheadlist = [set([7]),set([11]),set([0]),set([7]),set([9]),set([9])]
plusheadlist = [set(range(12))-{7},set(range(12))-{11},set(range(12))-{0},set(range(12))-{7},set(range(12))-{9},set(range(12))-{9}]
for multiprune in range(1,12):
headstr = []
for oneset in prevheadlist:
setstr = [str(int(s)) for s in oneset]
setstr = '+'.join(setstr)
headstr.append(setstr)
headstr = '.'.join(headstr)
for pluslayer in range(6):
for plushead in plusheadlist[pluslayer]:
os.system(f'python -m torch.distributed.launch --nproc_per_node 1 --master_port 12345 main.py --eval --cfg configs/swin_tiny_patch4_window7_224.yaml --resume swin_tiny_patch4_window7_224.pth --data-path data/imagenet/ --prune {multiprune}_{headstr}_{pluslayer}_{plushead}')
df = pd.read_csv("multiprune_plusone.csv")
df = df[(df.multiprune == multiprune) & (df.pluslayer == pluslayer)]
df = df.apply(pd.to_numeric, errors = 'coerce')
max_acc1_idx = df.idxmax().acc1
plusheadlist[pluslayer].remove(df.loc[max_acc1_idx].plushead)
prevheadlist[pluslayer].add(df.loc[max_acc1_idx].plushead)
|
import wx
from .misc.helpers import deg_to_rad, rad_to_deg
from .misc.vector import Vector
# Size of the turtle canvas. We assume no user will have a screen
# so big that the canvas will be bigger than this.
BITMAP_SIZE = Vector((2000, 1200))
# Center of the canvas.
origin = BITMAP_SIZE / 2.0
def to_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return rad_to_deg(-angle) - 180
def from_my_angle(angle):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return deg_to_rad(-angle + 180)
def from_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
def to_my_pos(pos):
"""
Transform between the reference frame that we prefer
and the reference frame that wxPython prefers
"""
return -pos + origin
class Turtle:
"""
A Turtle object defines a turtle by its attributes, such as
position, orientation, color, etc. See source of __init__ for
a complete list.
"""
def __init__(self):
self.pos = Vector((0, 0))
self.orientation = 180
self.color = "red"
self.width = 3
self.visible = True
self.pen_down = True
# the `clear` attribute is only made True momentarily when
# the `clear()` function is called by the user to clear the screen.
self.clear = False
self.SPEED = 400.0 # Pixels per second
self.ANGULAR_SPEED = 360.0 # Degrees per second
def give_pen(self):
"""
Gives a wxPython pen that corresponds to the color, width,
and pen_downity of the Turtle instance.
"""
return wx.Pen(self.color,
self.width,
wx.SOLID if self.pen_down else wx.TRANSPARENT)
|
import click
from cmsis_svd.parser import SVDParser
MCU_OPTIONS = [
'STM32F0xx',
]
MCU2VENDOR_FILE = {
'STM32F0xx': ('STMicro', 'STM32F0xx.svd'),
}
ALL = 'show_all'
def show_register(register):
fields = []
for field in register.fields:
upper_index = field.bit_offset + field.bit_width - 1
lower_index = field.bit_offset
if upper_index == lower_index:
index_s = str(upper_index)
else:
index_s = f'{upper_index}:{lower_index}'
fields.append(f'{field.name}[{index_s}]')
print(f'{register.name: <5} 0x{register.address_offset:04x}: {",".join(fields)}')
def show_peripheral(peripheral):
print(peripheral.name)
for register in peripheral.registers:
show_register(register)
print()
@click.command()
@click.option('--mcu', type=click.Choice(MCU_OPTIONS), required=True,
help='MCU Name')
@click.option('--mcu-peripheral', help='Peripheral Specified')
def main(mcu, mcu_peripheral=None):
"""Given a chip and peripheral, prints the registers.
"""
parser = SVDParser.for_packaged_svd(*MCU2VENDOR_FILE[mcu])
address2peripheral = {}
for peripheral in parser.get_device().peripherals:
address2peripheral[peripheral.base_address] = peripheral
for _, peripheral in sorted(address2peripheral.items()):
print(f'{peripheral.name: <16} @ 0x{peripheral.base_address:08x} ({peripheral.address_block.size: >4})')
if mcu_peripheral:
for peripheral in parser.get_device().peripherals:
if peripheral.name == mcu_peripheral or mcu_peripheral == ALL:
show_peripheral(peripheral)
if __name__ == '__main__':
main()
|
from mycv.utils.general import disable_multithreads
disable_multithreads()
import os
from pathlib import Path
import argparse
from tqdm import tqdm
import math
import torch
import torch.cuda.amp as amp
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.parallel import DistributedDataParallel as DDP
import wandb
from mycv.utils.general import increment_dir
from mycv.utils.torch_utils import set_random_seeds, ModelEMA
from mycv.datasets.imagenet import ImageNetCls, imagenet_val
def cal_acc(p: torch.Tensor, labels: torch.LongTensor):
assert not p.requires_grad and p.device == labels.device
assert p.dim() == 2 and p.shape[0] == labels.shape[0]
_, p_cls = torch.max(p, dim=1)
tp = (p_cls == labels)
acc = tp.sum() / len(tp)
return acc
def train():
# ====== set the run settings ======
parser = argparse.ArgumentParser()
parser.add_argument('--project', type=str, default='imagenet')
parser.add_argument('--group', type=str, default='mini200')
parser.add_argument('--model', type=str, default='csp_s')
parser.add_argument('--resume', type=str, default='')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--amp', type=bool, default=True)
parser.add_argument('--ema', type=bool, default=True)
parser.add_argument('--optimizer', type=str, default='SGD', choices=['Adam', 'SGD'])
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--metric', type=str, default='top1', choices=['top1'])
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--local_rank', type=int, default=-1, help='DDP arg, do not modify')
parser.add_argument('--wbmode', action='store_true')
cfg = parser.parse_args()
# model
cfg.img_size = 224
cfg.input_norm = False
cfg.sync_bn = False
# optimizer
cfg.lr = 0.01
cfg.momentum = 0.9
cfg.weight_decay = 0.0001
cfg.nesterov = False
# lr scheduler
cfg.lrf = 0.2 # min lr factor
cfg.lr_warmup_epochs = 1
# EMA
# cfg.ema_decay = 0.999
cfg.ema_warmup_epochs = 4
# Main process
IS_MAIN = (cfg.local_rank in [-1, 0])
# check arguments
metric: str = cfg.metric.lower()
epochs: int = cfg.epochs
local_rank: int = cfg.local_rank
world_size: int = int(os.environ.get('WORLD_SIZE', 1))
assert local_rank == int(os.environ.get('RANK', -1)), 'Only support single node'
assert cfg.batch_size % world_size == 0, 'batch_size must be multiple of device count'
batch_size: int = cfg.batch_size // world_size
if IS_MAIN:
print(cfg, '\n')
print('Batch size on each single GPU =', batch_size, '\n')
# fix random seeds for reproducibility
set_random_seeds(1)
torch.backends.cudnn.benchmark = True
# device setting
assert torch.cuda.is_available()
if local_rank == -1: # Single GPU
device = torch.device(f'cuda:{cfg.device}')
else: # DDP mode
assert torch.cuda.device_count() > local_rank and torch.distributed.is_available()
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank)
torch.distributed.init_process_group(
backend='nccl', init_method='env://', world_size=world_size, rank=local_rank
)
print(f'Local rank: {local_rank}, using device {device}:', 'device property:',
torch.cuda.get_device_properties(device))
# Dataset
if IS_MAIN:
print('Initializing Datasets and Dataloaders...')
if cfg.group == 'default':
train_split = 'train'
val_split = 'val'
cfg.num_class = 1000
elif cfg.group == 'mini200':
train_split = 'train200_600'
val_split = 'val200_600'
cfg.num_class = 200
else:
raise ValueError()
# training set
trainset = ImageNetCls(train_split, img_size=cfg.img_size, input_norm=cfg.input_norm)
sampler = torch.utils.data.distributed.DistributedSampler(
trainset, num_replicas=world_size, rank=local_rank, shuffle=True
) if local_rank != -1 else None
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=(sampler is None), sampler=sampler,
num_workers=cfg.workers, pin_memory=True
)
# test set
testloader = torch.utils.data.DataLoader(
ImageNetCls(split=val_split, img_size=cfg.img_size, input_norm=cfg.input_norm),
batch_size=batch_size, shuffle=False, num_workers=cfg.workers//2,
pin_memory=True, drop_last=False
)
# Initialize model
if cfg.model == 'res50':
from mycv.models.cls.resnet import resnet50
model = resnet50(num_classes=cfg.num_class)
elif cfg.model == 'res101':
from mycv.models.cls.resnet import resnet101
model = resnet101(num_classes=cfg.num_class)
elif cfg.model.startswith('yolov5'):
from mycv.models.yolov5.cls import YOLOv5Cls
assert cfg.model[-1] in ['s', 'm', 'l']
model = YOLOv5Cls(model=cfg.model[-1], num_class=cfg.num_class)
elif cfg.model.startswith('csp'):
from mycv.models.yolov5.cls import CSP
assert cfg.model[-1] in ['s', 'm', 'l']
model = CSP(model=cfg.model[-1], num_class=cfg.num_class)
else:
raise NotImplementedError()
model = model.to(device)
# loss function
loss_func = torch.nn.CrossEntropyLoss(reduction='mean')
# different optimization setting for different layers
pgb, pgw = [], []
for k, v in model.named_parameters():
if ('.bn' in k) or ('.bias' in k): # batchnorm or bias
pgb.append(v)
else: # conv weights
assert '.weight' in k
pgw.append(v)
parameters = [
{'params': pgb, 'lr': cfg.lr, 'weight_decay': 0.0},
{'params': pgw, 'lr': cfg.lr, 'weight_decay': cfg.weight_decay}
]
if IS_MAIN:
print('Parameter groups:', [len(pg['params']) for pg in parameters])
del pgb, pgw
# optimizer
if cfg.optimizer == 'SGD':
optimizer = torch.optim.SGD(parameters, lr=cfg.lr,
momentum=cfg.momentum, nesterov=cfg.nesterov)
elif cfg.optimizer == 'Adam':
optimizer = torch.optim.Adam(parameters, lr=cfg.lr)
else:
raise ValueError()
# AMP
scaler = amp.GradScaler(enabled=cfg.amp)
log_parent = Path(f'runs/{cfg.project}')
wb_id = None
results = {metric: 0}
if cfg.resume:
# resume
run_name = cfg.resume
log_dir = log_parent / run_name
assert log_dir.is_dir()
checkpoint = torch.load(log_dir / 'last.pt')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
start_epoch = checkpoint['epoch'] + 1
cur_fitness = best_fitness = checkpoint.get(metric, 0)
if IS_MAIN:
wb_id = open(log_dir / 'wandb_id.txt', 'r').read()
else:
# new experiment
run_name = increment_dir(dir_root=log_parent, name=cfg.model)
log_dir = log_parent / run_name # wandb logging dir
if IS_MAIN:
os.makedirs(log_dir, exist_ok=False)
print(str(model), file=open(log_dir / 'model.txt', 'w'))
start_epoch = 0
cur_fitness = best_fitness = 0
# initialize wandb
if IS_MAIN:
wbrun = wandb.init(project=cfg.project, group=cfg.group, name=run_name, config=cfg,
dir='runs/', resume='allow', id=wb_id, mode=cfg.wbmode)
cfg = wbrun.config
cfg.log_dir = log_dir
cfg.wandb_id = wbrun.id
if not (log_dir / 'wandb_id.txt').exists():
with open(log_dir / 'wandb_id.txt', 'w') as f:
f.write(wbrun.id)
else:
wbrun = None
# lr scheduler
def warmup_cosine(x):
warmup_iter = cfg.lr_warmup_epochs * len(trainloader)
if x < warmup_iter:
factor = x / warmup_iter
else:
_cur = x - warmup_iter + 1
_total = epochs * len(trainloader)
factor = cfg.lrf + 0.5 * (1 - cfg.lrf) * (1 + math.cos(_cur * math.pi / _total))
return factor
scheduler = LambdaLR(optimizer, lr_lambda=warmup_cosine, last_epoch=start_epoch - 1)
# SyncBatchNorm
if local_rank != -1 and cfg.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Exponential moving average
if IS_MAIN and cfg.ema:
emas = [
ModelEMA(model, decay=0.99),
ModelEMA(model, decay=0.999),
ModelEMA(model, decay=0.9999)
]
for ema in emas:
ema.updates = start_epoch * len(trainloader) # set EMA updates
ema.warmup = cfg.ema_warmup_epochs * len(trainloader) # set EMA warmup
else:
emas = None
# DDP mode
if local_rank != -1:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
# ======================== start training ========================
niter = s = None
for epoch in range(start_epoch, epochs):
model.train()
if local_rank != -1:
trainloader.sampler.set_epoch(epoch)
optimizer.zero_grad()
pbar = enumerate(trainloader)
train_loss, train_acc = 0.0, 0.0
if IS_MAIN:
pbar_title = ('%-10s' * 6) % (
'Epoch', 'GPU_mem', 'lr', 'tr_loss', 'tr_acc', metric
)
print('\n' + pbar_title) # title
pbar = tqdm(pbar, total=len(trainloader))
for i, (imgs, labels) in pbar:
# debugging
# if True:
# import matplotlib.pyplot as plt
# from mycv.datasets.food101 import CLASS_NAMES
# for im, lbl in zip(imgs, labels):
# im = im * trainset._input_std + trainset._input_mean
# im = im.permute(1,2,0).numpy()
# print(CLASS_NAMES[lbl])
# plt.imshow(im); plt.show()
imgs = imgs.to(device=device)
labels = labels.to(device=device)
# forward
with amp.autocast(enabled=cfg.amp):
p = model(imgs)
loss = loss_func(p, labels) * imgs.shape[0]
if local_rank != -1:
loss = loss * world_size
# loss is averaged within image, sumed over batch, and sumed over gpus
# backward, update
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
if emas:
for ema in emas:
ema.update(model)
# Scheduler
scheduler.step()
# logging
if IS_MAIN:
niter = epoch * len(trainloader) + i
cur_lr = optimizer.param_groups[0]['lr']
loss = loss.detach().cpu().item()
acc = cal_acc(p.detach(), labels)
train_loss = (train_loss*i + loss) / (i+1)
train_acc = (train_acc*i + acc) / (i+1)
mem = torch.cuda.max_memory_allocated(device) / 1e9
s = ('%-10s' * 2 + '%-10.4g' * 4) % (
f'{epoch}/{epochs-1}', f'{mem:.3g}G',
cur_lr, train_loss, 100*train_acc, 100*cur_fitness
)
pbar.set_description(s)
torch.cuda.reset_peak_memory_stats()
# Weights & Biases logging
if niter % 100 == 0:
wbrun.log({
'general/lr': cur_lr,
'metric/train_loss': train_loss,
'metric/train_acc': train_acc,
'ema/n_updates': emas[0].updates if emas is not None else 0,
'ema0/decay': emas[0].get_decay() if emas is not None else 0,
'ema1/decay': emas[1].get_decay() if emas is not None else 0,
'ema2/decay': emas[2].get_decay() if emas is not None else 0,
}, step=niter)
# logging end
# ----Mini batch end
# ----Epoch end
# If DDP mode, synchronize model parameters on all gpus
if local_rank != -1:
model._sync_params_and_buffers(authoritative_rank=0)
# Evaluation
if IS_MAIN:
# results is like {'top1': xxx, 'top5': xxx}
_log_dic = {'general/epoch': epoch}
results = imagenet_val(model, split=val_split, testloader=testloader)
_log_dic.update({'metric/plain_val_'+k: v for k,v in results.items()})
res_emas = torch.zeros(len(emas))
if emas is not None:
for ei, ema in enumerate(emas):
results = imagenet_val(ema.ema, split=val_split, testloader=testloader)
_log_dic.update({f'metric/ema{ei}_val_'+k: v for k,v in results.items()})
res_emas[ei] = results[metric]
# select best result among all emas
_idx = torch.argmax(res_emas)
cur_fitness = res_emas[_idx]
_save_model = emas[_idx].ema
best_decay = emas[_idx].final_decay
else:
cur_fitness = results[metric]
_save_model = model
best_decay = 0
# wandb log
wbrun.log(_log_dic, step=niter)
# Write evaluation results
res = s + '||' + '%10.4g' * 1 % (results[metric])
with open(log_dir / 'results.txt', 'a') as f:
f.write(res + '\n')
# save last checkpoint
checkpoint = {
'model' : _save_model.state_dict(),
'optimizer' : optimizer.state_dict(),
'scaler' : scaler.state_dict(),
'epoch' : epoch,
metric : cur_fitness,
'best_decay': best_decay
}
torch.save(checkpoint, log_dir / 'last.pt')
# save best checkpoint
if cur_fitness > best_fitness:
best_fitness = cur_fitness
torch.save(checkpoint, log_dir / 'best.pt')
del checkpoint
# ----Epoch end
# ----Training end
if __name__ == '__main__':
train()
# from mycv.models.cls.resnet import resnet50
# model = resnet50(num_classes=1000)
# weights = torch.load('weights/resnet50-19c8e357.pth')
# model.load_state_dict(weights)
# model = model.cuda()
# model.eval()
# results = imagenet_val(model, img_size=224, batch_size=64, workers=4)
# print(results['top1'])
|
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# smcrm/
APPS_DIR = ROOT_DIR / "smcrm"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_celery_beat",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
]
LOCAL_APPS = [
"smcrm.users.apps.UsersConfig",
"smcrm.projects.apps.ProjectsConfig",
"smcrm.developers.apps.DevelopersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "smcrm.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"smcrm.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = False
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Konstantin Moiseenko""", "moiseenko.k.s@gmail.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env("CELERY_BROKER_URL")
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "smcrm.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "smcrm.users.adapters.SocialAccountAdapter"
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
|
# -*- coding: utf-8 -*-
"""
获取YCY图片
"""
import json
import os
import requests
from settings import PROJECT_PATH
class YCYImage(object):
def __init__(self):
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
# "Content-Type": "application/x-www-form-urlencoded",
}
def get_img(self):
"""获取100页的图片链接"""
url = "https://www.duitang.com/napi/blog/list/by_search/"
result = []
for page in range(0, 240, 24):
data = {
'kw': '杨超越',
'type': 'feed',
'include_fields': 'top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id',
'_type': '',
'start': str(page),
}
r = requests.get(url, headers=self.headers, params=data, verify=False)
d = json.loads(r.text)
if d.get('data').get('object_list'):
d = d['data']['object_list']
result.extend(d)
return result
def download_img_and_save(self, result):
"""下载图片并保存"""
if not result:
return
for index, d in enumerate(result):
r = requests.get(url=d['photo']['path'])
file_name = os.path.join(PROJECT_PATH, "pics", "ycy_{}.jpg".format(index))
with open(file_name, 'wb') as f:
f.write(r.content)
def run(self):
result = self.get_img()
self.download_img_and_save(result)
if __name__ == '__main__':
ycy = YCYImage()
ycy.run()
|
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import copy
from io import StringIO
import os
import sys
import traceback
from dace.sdfg import SDFG
from dace.transformation.optimizer import Optimizer
class TransformationTester(Optimizer):
""" An SDFG optimizer that consecutively applies available transformations
up to a fixed depth. """
def __init__(self,
sdfg: SDFG,
depth=1,
validate=True,
generate_code=True,
compile=False,
print_exception=True,
halt_on_exception=False):
""" Creates a new Transformation tester, which brute-forces applying the
available transformations up to a certain level.
:param sdfg: The SDFG to transform.
:param depth: The number of levels to run transformations. For
instance, depth=1 means to only run immediate
transformations, whereas depth=2 would run
transformations resulting from those transformations.
:param validate: If True, the SDFG is validated after applying.
:param generate_code: If True, the SDFG will generate code after
transformation.
:param compile: If True, the SDFG will be compiled after applying.
:param print_exception: If True, prints exception when it is raised.
:param halt_on_exception: If True, stops when a transformation
raises an exception.
"""
super().__init__(sdfg)
self.depth = depth
self.validate = validate
self.generate_code = generate_code
self.compile = compile
self.print_exception = print_exception
self.halt_on_exception = halt_on_exception
self.passed_tests = 0
self.failed_tests = 0
self.stdout = sys.stdout
self.stderr = sys.stderr
def _optimize_recursive(self, sdfg: SDFG, depth: int):
if depth == self.depth:
return
matches = list(self.get_pattern_matches(sdfg=sdfg))
# Apply each transformation
for match in matches:
# Copy the SDFG
new_sdfg: SDFG = copy.deepcopy(sdfg)
# Try to apply, handle any exception
try:
# Redirect outputs
output = StringIO()
sys.stdout = output
sys.stderr = output
print(' ' * depth,
type(match).__name__,
'- ',
end='',
file=self.stdout)
tsdfg: SDFG = new_sdfg.sdfg_list[match.sdfg_id]
match.apply(tsdfg)
sdfg.save(os.path.join('_dacegraphs', 'program.sdfg'))
# Validate
if self.validate:
new_sdfg.validate()
# Expand library nodes
new_sdfg.expand_library_nodes()
# Generate code
if self.generate_code:
new_sdfg.generate_code()
if self.compile:
compiled = new_sdfg.compile()
del compiled
print('PASS', file=self.stdout)
self.passed_tests += 1
# Recursively optimize as necessary
self._optimize_recursive(sdfg, depth + 1)
except: # Literally anything can happen here
print('FAIL', file=self.stdout)
self.failed_tests += 1
if self.halt_on_exception:
print(output.getvalue(), file=self.stderr)
raise
if self.print_exception:
print(output.getvalue(), file=self.stderr)
traceback.print_exc(file=self.stderr)
continue
finally:
# Restore redirected outputs
sys.stdout = self.stdout
sys.stderr = self.stderr
def optimize(self):
self._optimize_recursive(self.sdfg, 0)
if self.failed_tests > 0:
raise RuntimeError(
'%d / %d transformations passed' %
(self.passed_tests, self.passed_tests + self.failed_tests))
return self.sdfg
if __name__ == '__main__':
import dace
@dace.program
def example(A: dace.float32[2]):
A *= 2
sdfg = example.to_sdfg()
tt = TransformationTester(sdfg, 2, halt_on_exception=True)
tt.optimize()
print('SUMMARY: %d / %d tests passed' %
(tt.passed_tests, tt.passed_tests + tt.failed_tests))
|
import pygame
from snake.resources.constants import BLOCK_SIZE, WIDTH, HEIGHT
from snake.resources.directions import Direction
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.f = 0
self.g = 0
self.h = 0
self.neighbors = []
self.origin = None
def __eq__(self, point):
return self.__class__ == point.__class__ and self.x == point.x and self.y == point.y
def plot(self, display, color):
'''Plots the point with given color and fixed size'''
pygame.draw.rect(display, color, pygame.Rect(self.x, self.y, BLOCK_SIZE, BLOCK_SIZE))
def get_direction(self):
'''Determine direction in which the snake moves based on initial position'''
if self.x == self.origin.x and self.y < self.origin.y:
return Direction.UP
elif self.x == self.origin.x and self.y > self.origin.y:
return Direction.DOWN
elif self.x < self.origin.x and self.y == self.origin.y:
return Direction.LEFT
elif self.x > self.origin.x and self.y == self.origin.y:
return Direction.RIGHT
def generate_neighbors(self):
'''Generates neighbors for point object'''
if self.x > 0:
self.neighbors.append(Point(self.x - BLOCK_SIZE, self.y))
if self.y > 0:
self.neighbors.append(Point(self.x, self.y - BLOCK_SIZE))
if self.x < WIDTH - BLOCK_SIZE:
self.neighbors.append(Point(self.x + BLOCK_SIZE, self.y))
if self.y < HEIGHT - BLOCK_SIZE:
self.neighbors.append(Point(self.x, self.y + BLOCK_SIZE))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generating a 4D synthetic data set with noise.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A 2D space, time and frequency data set is generated for testing purposes in
reda.
"""
###############################################################################
# imports
import os
from glob import glob
import numpy as np
import crtomo
import reda
###############################################################################
# Generate the forward models
frequencies = np.logspace(-3, 3, 5)
grid = crtomo.crt_grid(
'data_synthetic_4d/elem.dat', 'data_synthetic_4d/elec.dat'
)
# this context manager makes sure that all output is relative to the given
# directory
with reda.CreateEnterDirectory('output_synthetic_4d'):
for nr, anomaly_z_pos in enumerate(range(0, -10, -3)):
outdir = 'modV_{:02}'.format(nr)
if os.path.isdir(outdir):
continue
sinv = crtomo.eitMan(grid=grid, frequencies=frequencies)
sinv.add_homogeneous_model(100, 0)
sinv.set_area_to_single_colecole(
18, 22, anomaly_z_pos -2.0, anomaly_z_pos,
[100, 0.1, 0.04, 0.6]
)
r = sinv.plot_forward_models()
r['rmag']['fig'].savefig('forward_rmag_{:02}.pdf'.format(nr))
r['rpha']['fig'].savefig('forward_rpha_{:02}.pdf'.format(nr))
for f, td in sinv.tds.items():
td.configs.gen_dipole_dipole(skipc=0, nr_voltage_dipoles=40)
td.configs.gen_reciprocals(append=True)
r = sinv.measurements()
sinv.save_measurements_to_directory(outdir)
# plot pseudosections
Vdirs = sorted(glob('modV*'))
for nr, Vdir in enumerate(Vdirs):
seit = reda.sEIT()
seit.import_crtomo(Vdir)
seit.compute_K_analytical(spacing=1)
seit.plot_pseudosections(
'r', return_fig=True
).savefig('ps_r_{:02}.jpg'.format(nr), dpi=300)
seit.plot_pseudosections(
'rho_a', return_fig=True
).savefig('ps_rho_a_{:02}.jpg'.format(nr), dpi=300)
seit.plot_pseudosections(
'rpha', return_fig=True
).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)
###############################################################################
# now generate noisy data
# this context manager makes sure that all output is relative to the given
# directory
with reda.CreateEnterDirectory('output_synthetic_4d'):
Vdirs = sorted(glob('modV*'))
for nr, Vdir in enumerate(Vdirs):
seit = reda.sEIT()
seit.import_crtomo(Vdir)
seit.compute_K_analytical(spacing=1)
# use different seeds for different time steps
np.random.seed(34 + nr)
noise = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])
r_save = seit.data['r'].values.copy()
seit.data['r'] = r_save + noise * r_save / 8000.0 * np.log(seit.data['k'])
seit.data['rho_a'] = seit.data['r'] * seit.data['k']
seit.plot_pseudosections(
'rho_a', return_fig=True
).savefig('noisy_ps_rho_a_{:02}.jpg'.format(nr), dpi=300)
rpha_save = seit.data['rpha'].values.copy()
noise_rpha = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])
seit.data['rpha'] = rpha_save + noise_rpha * rpha_save / 10.0
seit.plot_pseudosections(
'rpha', return_fig=True
).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)
seit.export_to_crtomo_multi_frequency(Vdir + '_noisy')
|
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app, json_contacts, db, check_ui):
contact=json_contacts
app.open_home_page()
old_contacts = db.get_contact_list()
app.contact.create(contact)
assert len(old_contacts) + 1 == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key = Contact.id_con_max) == sorted(new_contacts, key = Contact.id_con_max)
if check_ui:
assert sorted(old_contacts, key=Contact.id_con_max) == sorted(app.group.get_contact_list(), key=Contact.id_con_max)
|
# author: Carlina Kim, Karanpal Singh, Sukriti Trehan, Victor Cuspinera
# date: 2020-06-21
'''This script will read the saved theme/subtheme model(s), padded validation sets and y validation sets for model evaluation,
and will save the evaluation results in the specified directory.
There are 2 parameters Input Path and Output Path where you want to save the evaluation results.
Usage: model_evaluate.py --level='theme' --output_dir=<destination_dir_path>
Example:
python src/models/model_evaluate.py --level='theme' --output_dir=reports/
python src/models/model_evaluate.py --level='subtheme' --output_dir=reports/
Options:
--input_dir=<input_dir_path> Directory name for the padded documents and embeddings
--output_dir=<destination_dir_path> Directory for saving evaluated results
'''
import pandas as pd
import numpy as np
from docopt import docopt
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, precision_recall_curve
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
opt = docopt(__doc__)
print("\n-----START: model_evaluate.py-----\n")
def main(level, output_dir):
"""
Takes the input level and calls model_evaluate class with
output_dir as argument
"""
me = model_evaluate()
me.get_evaluations(level=level, output_dir=output_dir)
print('Thanks for your patience, the evaluation process has finished!\n')
print('----END: model_evaluate.py----\n')
return
class model_evaluate:
# Loads data and evaluates saved theme model and subtheme models on validation set
def eval_metrics(self, model_name, x_valid, y_valid, level='theme'):
"""
Evaluates model results on different threshold levels and produces data table/
precision recall curves
Parameters
-----------
model_name: (TensforFlow Saved model)
x_valid: (pandas dataframe) dataframe with validation comments
y_valid: (numpy array) array with labels
level: (string) Takes value 'theme' or 'subtheme' to evaluate accordingly
Returns
-------
Pandas DataFrame or matplotlib plot
dataframe with evaluation metrics including precision, recall, f1 score at
different threshold values
"""
pred_values = model_name.predict(x_valid)
if level == 'theme':
precision_dict = dict()
recall_dict = dict()
thresh_dict = dict()
precision_dict["BiGRU + Fasttext"], recall_dict["BiGRU + Fasttext"], thresh_dict["BiGRU + Fasttext"] = precision_recall_curve(y_valid.ravel(), pred_values.ravel())
labels = []
labels = list(precision_dict.keys())
plt.figure()
plt.step(recall_dict['BiGRU + Fasttext'], precision_dict['BiGRU + Fasttext'], where='post', color='orange')
plt.xlabel('Recall', fontsize=18)
plt.ylabel('Precision', fontsize=18)
plt.axhline(y=0.743643, xmin=0, xmax=0.71, ls='--', color="cornflowerblue")
plt.axvline(x=0.705382, ymin=0, ymax=0.71, ls='--', color="cornflowerblue")
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(labels, loc=(1.01, .79), prop=dict(size=14))
plt.title('Precision Recall Curves for best performing model', fontsize = 18)
plt.savefig('reports/figures/pr_curve_valid_theme.png')
# PRECISION & RECALL
predictions_results = []
thresholds=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for val in thresholds:
pred=pred_values.copy()
pred[pred>=val]=1
pred[pred<val]=0
accuracy = accuracy_score(y_valid, pred, normalize=True, sample_weight=None)
precision = precision_score(y_valid, pred, average='micro')
recall = recall_score(y_valid, pred, average='micro')
f1 = f1_score(y_valid, pred, average='micro')
case= {'Threshold': val,
'Accuracy': accuracy,
'Precision': precision,
'Recall': recall,
'F1-measure': f1}
predictions_results.append(case)
return pd.DataFrame(predictions_results)
def get_evaluations(self, level, output_dir):
"""
Evaluates models by using eval_metrics function
"""
if level == 'theme':
print("**Loading data**")
x_valid = np.load('data/interim/question1_models/advance/X_valid_padded.npy')
y_valid = np.load('data/interim/question1_models/advance/y_valid.npy')
print("**Loading the saved theme model**")
model = tf.keras.models.load_model('models/Theme_Model/theme_model')
print("**Predicting on validation set using saved model and evaluating metrics**")
results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid)
print("**Saving results**")
results.to_csv(output_dir + '/tables/theme_tables/theme_valid_eval.csv')
print("Evaluations saved to reports/")
else:
print("Loading data and evaluating the subthemes model on validation set")
themes = ['CPD', 'CB', 'EWC', 'Exec', 'FWE',
'SP', 'RE', 'Sup', 'SW', 'TEPE', 'VMG', 'OTH']
for label in themes:
print("****Label:", label, "****")
print("**Loading data**")
x_valid = np.load('data/interim/subthemes/' + str(label) + '/X_valid_padded.npy')
# self.x_valids.append(x_valid)
y_valid = np.load('data/interim/subthemes/' + str(label) + '/y_valid.npy')
# self.y_valids.append(y_valid)
print("**Loading the saved subtheme model**")
model = tf.keras.models.load_model('models/Subtheme_Models/' + str(label).lower() + '_model')
# self.models.append(model)
print("**Predicting on validation set using saved model and evaluating metrics**")
results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid, level = 'subtheme')
print("**Saving results**")
results.to_csv(output_dir + '/tables/subtheme_tables' + str(label).lower() + '_valid_eval.csv')
print("Process of subtheme", label, "model completed\n")
print("Evaluations saved to reports/tables")
if __name__ == "__main__":
main(opt["--level"], opt["--output_dir"])
|
"""Learn embedding weights with LBANN."""
import argparse
import os.path
import numpy as np
import lbann
import lbann.contrib.launcher
import lbann.contrib.args
import data.data_readers
import model.random_projection
import utils
import utils.graph
import utils.snap
root_dir = os.path.dirname(os.path.realpath(__file__))
# ----------------------------------
# Options
# ----------------------------------
# Command-line arguments
parser = argparse.ArgumentParser()
lbann.contrib.args.add_scheduler_arguments(parser)
parser.add_argument(
'--job-name', action='store', default='lbann_node2vec', type=str,
help='job name', metavar='NAME')
parser.add_argument(
'--graph', action='store', default='youtube', type=str,
help='graph name (see utils.snap.download_graph) or edgelist file',
metavar='NAME')
parser.add_argument(
'--mini-batch-size', action='store', default=256, type=int,
help='mini-batch size (default: 256)', metavar='NUM')
parser.add_argument(
'--num-iterations', action='store', default=1000, type=int,
help='number of epochs (default: 1000)', metavar='NUM')
parser.add_argument(
'--proj_dim', action='store', default=1024, type=int,
help='projection space dimensions (default: 10000)', metavar='NUM')
parser.add_argument(
'--latent-dim', action='store', default=128, type=int,
help='latent space dimensions (default: 128)', metavar='NUM')
parser.add_argument(
'--learning-rate', action='store', default=-1, type=float,
help='learning rate (default: 0.25*mbsize)', metavar='VAL')
parser.add_argument(
'--work-dir', action='store', default=None, type=str,
help='working directory', metavar='DIR')
parser.add_argument(
'--batch-job', action='store_true',
help='submit as batch job')
parser.add_argument(
'--offline-walks', action='store_true',
help='perform random walks offline')
args = parser.parse_args()
# Default learning rate
# Note: Learning rate in original word2vec is 0.025
if args.learning_rate < 0:
args.learning_rate = 0.25 * args.mini_batch_size
# Random walk options
epoch_size = 100 * args.mini_batch_size
walk_length = 100
return_param = 0.25
inout_param = 0.25
num_negative_samples = 0
# ----------------------------------
# Create data reader
# ----------------------------------
# Download graph if needed
if os.path.exists(args.graph):
graph_file = args.graph
else:
graph_file = utils.snap.download_graph(args.graph)
# Construct data reader
if args.offline_walks:
# Note: Graph and walk parameters are fully specified in module
# for offline walks
import data.offline_walks
graph_file = data.offline_walks.graph_file
epoch_size = data.offline_walks.num_samples()
walk_length = data.offline_walks.walk_length
return_param = data.offline_walks.return_param
inout_param = data.offline_walks.inout_param
num_negative_samples = data.offline_walks.num_negative_samples
reader = data.data_readers.make_offline_data_reader()
else:
# Note: Preprocess graph with HavoqGT and store in shared memory
# before starting LBANN.
distributed_graph_file = '/dev/shm/graph'
reader = data.data_readers.make_online_data_reader(
graph_file=distributed_graph_file,
epoch_size=epoch_size,
walk_length=walk_length,
return_param=return_param,
inout_param=inout_param,
num_negative_samples=num_negative_samples,
)
sample_size = num_negative_samples + walk_length
# Parse graph file to get number of vertices
num_vertices = utils.graph.max_vertex_index(graph_file) + 1
# ----------------------------------
# Construct layer graph
# ----------------------------------
obj = []
metrics = []
# Autoencoder
# Note: Input is sequence of vertex IDs
input_ = lbann.Identity(lbann.Input())
proj = model.random_projection.random_projection(
input_,
sample_size,
args.proj_dim,
)
autoencoder = model.random_projection.ChannelwiseFullyConnectedAutoencoder(
args.proj_dim,
args.latent_dim,
[],
)
proj_recon = autoencoder(proj)
# Mean square error loss
scale_decay = 0.5
loss = model.random_projection.mean_squared_error(
data_dim=args.proj_dim,
sequence_length=walk_length,
source_sequence=proj_recon,
target_sequence=proj,
scale_decay=scale_decay,
)
obj.append(loss)
# ----------------------------------
# Run LBANN
# ----------------------------------
# Create optimizer
opt = lbann.SGD(learn_rate=args.learning_rate)
# Create LBANN objects
iterations_per_epoch = utils.ceildiv(epoch_size, args.mini_batch_size)
num_epochs = utils.ceildiv(args.num_iterations, iterations_per_epoch)
trainer = lbann.Trainer(
mini_batch_size=args.mini_batch_size,
num_parallel_readers=0,
)
callbacks = [
lbann.CallbackPrint(),
lbann.CallbackTimer(),
lbann.CallbackDumpWeights(directory='weights',
epoch_interval=num_epochs),
]
model = lbann.Model(
num_epochs,
layers=lbann.traverse_layer_graph(input_),
objective_function=obj,
metrics=metrics,
callbacks=callbacks,
)
# Create batch script
kwargs = lbann.contrib.args.get_scheduler_kwargs(args)
script = lbann.contrib.launcher.make_batch_script(
job_name=args.job_name,
work_dir=args.work_dir,
**kwargs,
)
# Preprocess graph data with HavoqGT if needed
if not args.offline_walks:
ingest_graph_exe = os.path.join(
root_dir,
'build',
'havoqgt',
'src',
'ingest_edge_list',
)
script.add_parallel_command([
ingest_graph_exe,
f'-o {distributed_graph_file}',
f'-d {2**30}',
'-u 1',
graph_file,
])
# LBANN invocation
prototext_file = os.path.join(script.work_dir, 'experiment.prototext')
lbann.proto.save_prototext(
prototext_file,
trainer=trainer,
model=model,
data_reader=reader,
optimizer=opt,
)
script.add_parallel_command([
lbann.lbann_exe(),
f'--prototext={prototext_file}',
f'--num_io_threads=1',
])
# Run LBANN
if args.batch_job:
script.submit(True)
else:
script.run(True)
|
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os.path as path
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from bes.testing.unit_test import unit_test
from bes.testing.framework import unit_test_inspect as UTI
from bes.testing.unit_test_skip import raise_skip
class test_unit_test_inspect(unit_test):
@classmethod
def setUpClass(clazz):
raise_skip('broken')
def test_inspect_file(self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_not_unit_test(self):
content = '''
class test_apple_fixture(object):
def test_foo(self):
pass
def test_bar(self):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [], UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_disbled(self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def xtest_foo(self):
self.assertEqual( 6, 3 + 3 )
def xtest_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def doesnt_work_test_inspect_file_TestCase_subclass(self):
content = '''
import unittest
class unit_super(unittest.TestCase):
_x = 5
class test_apple_fixture(unit_super):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
class somthing(unittest.TestCase):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_unit_test(self):
content = '''
from bes.testing.unit_test import unit_test
class test_apple_fixture(unit_test):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
if __name__ == '__main__':
unit_test.main()
|
import json
from vcx.api.connection import Connection
from utils import init_vcx, run_coroutine_in_new_loop
from connection import BaseConnection
class Inviter(BaseConnection):
async def start(self):
await init_vcx()
print("Create a connection to alice and print out the invite details")
connection_ = await Connection.create('alice')
await connection_.connect('{"use_public_did": true}')
await connection_.update_state()
details = await connection_.invite_details(False)
print("**invite details**")
print(json.dumps(details))
print("******************")
self.connection_data = await connection_.serialize()
connection_.release()
return json.dumps(details)
def connect(self):
run_coroutine_in_new_loop(self.update_state)
|
###################################################################
import logging
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import os.path
import uuid
from os import environ
import json
from tornado.options import define, options
import mimetypes
import random
from tornadose.handlers import EventSource
from tornadose.stores import DataStore
###################################################################
import serverlogic
from utils.file import read_string_from_file
###################################################################
teststore = DataStore()
###################################################################
define("port", default=environ.get("PORT", 5000), help="run on the given port", type=int)
###################################################################
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/gif.worker.js", GifWorker),
(r"/static/.*", MyStaticFileHandler),
(r"/jsonapi", JsonApi),
(r"/importstudy/.*", ImportStudy),
(r"/test", Test),
(r"/docs/.*", Docs),
(r"/chatsocket", ChatSocketHandler),
(r"/testevents", EventSource, {'store': teststore}),
(r"/enginelog", EventSource, {'store': serverlogic.mainenginelog.datastore})
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
#static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=False,
)
super(Application, self).__init__(handlers, **settings)
class GifWorker(tornado.web.RequestHandler):
def get(self):
with open("static/js/gif.worker.js", 'rb') as f:
data = f.read()
self.write(data)
class MyStaticFileHandler(tornado.web.RequestHandler):
def get(self):
path = self.request.path
filepath = path[1:]
if not os.path.isfile(filepath):
self.set_status(404)
return
mimetype = mimetypes.guess_type(path)
if mimetype[0]:
self.set_header("Content-Type", mimetype[0])
with open(filepath, 'rb') as f:
data = f.read()
self.write(data)
class MainHandler(tornado.web.RequestHandler):
def get(self):
#print(self.request.__dict__)
self.render("index.html", messages=ChatSocketHandler.cache)
class JsonApi(tornado.web.RequestHandler):
def post(self):
reqobj = json.loads(self.request.body.decode('utf-8'))
resobj = serverlogic.jsonapi(reqobj)
self.set_header("Content-Type", "application/json")
self.write(json.dumps(resobj))
class ImportStudy(tornado.web.RequestHandler):
def get(self):
path = self.request.path
parts = path.split("/")
paramindex = parts.index("importstudy") + 1
if ( len(parts) - paramindex ) < 2:
self.write("too few parameters, usage: /importstudy/[usercode]/[studyid]")
return
usercode = parts[paramindex]
studyid = parts[paramindex + 1]
nodeid = "root"
if ( paramindex + 2 ) < len(parts):
nodeid = parts[paramindex + 2]
self.redirect(f"/?task=importstudy&usercode={usercode}&studyid={studyid}&nodeid={nodeid}&tab=board&boardtab=tree")
class Test(tornado.web.RequestHandler):
def get(self):
self.write(read_string_from_file("templates/test.html", "test"))
class Docs(tornado.web.RequestHandler):
def get(self):
path = self.request.path
parts = path.split("/")
self.write(read_string_from_file("docs/" + parts[2] + ".md", "Pgn Editor."))
class ChatSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
cache = []
cache_size = 200
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def open(self):
ChatSocketHandler.waiters.add(self)
def on_close(self):
ChatSocketHandler.waiters.remove(self)
@classmethod
def update_cache(cls, chat):
cls.cache.append(chat)
if len(cls.cache) > cls.cache_size:
cls.cache = cls.cache[-cls.cache_size :]
@classmethod
def send_updates(cls, chat):
logging.info("sending message to %d waiters", len(cls.waiters))
for waiter in cls.waiters:
try:
waiter.write_message(chat)
except:
logging.error("Error sending message", exc_info=True)
def on_message(self, message):
logging.info("got message %r", message)
parsed = tornado.escape.json_decode(message)
chat = {"id": str(uuid.uuid4()), "body": parsed["body"]}
chat["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=chat)
)
ChatSocketHandler.update_cache(chat)
ChatSocketHandler.send_updates(chat)
def main():
tornado.options.parse_command_line()
app = Application()
app.listen(options.port)
tornado.ioloop.PeriodicCallback(lambda: teststore.submit(random.random()), 1000).start()
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
###################################################################
|
# Copyright 2017 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import cast_as_call
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit import policy_fixture
class ServerListLimitMarkerCell0Test(test.TestCase,
integrated_helpers.InstanceHelperMixin):
"""Regression test for bug 1689692 introduced in Ocata.
The user specifies a limit which is greater than the number of instances
left in the page and the marker starts in the cell0 database. What happens
is we don't null out the marker but we still have more limit so we continue
to page in the cell database(s) but the marker isn't found in any of those,
since it's already found in cell0, so it eventually raises a MarkerNotFound
error.
"""
def setUp(self):
super(ServerListLimitMarkerCell0Test, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# The NeutronFixture is needed to stub out validate_networks in API.
self.useFixture(nova_fixtures.NeutronFixture(self))
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
image_fake.stub_out_image_service(self)
self.addCleanup(image_fake.FakeImageService_reset)
# We have to get the image before we use 2.latest otherwise we'll get
# a 404 on the /images proxy API because of 2.36.
self.image_id = self.api.get_images()[0]['id']
# Use the latest microversion available to make sure something does
# not regress in new microversions; cap as necessary.
self.api.microversion = 'latest'
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
# We don't start the compute service because we want NoValidHost so
# all of the instances go into ERROR state and get put into cell0.
self.useFixture(cast_as_call.CastAsCall(self.stubs))
def test_list_servers_marker_in_cell0_more_limit(self):
"""Creates three servers, then lists them with a marker on the first
and a limit of 3 which is more than what's left to page on (2) but
it shouldn't fail, it should just give the other two back.
"""
# create three test servers
for x in range(3):
server = self.api.post_server(
dict(server=self._build_minimal_create_server_request(
self.api, 'test-list-server-limit%i' % x, self.image_id,
networks='none')))
self.addCleanup(self.api.delete_server, server['id'])
self._wait_for_state_change(self.api, server, 'ERROR')
servers = self.api.get_servers()
self.assertEqual(3, len(servers))
# Take the first server and user that as our marker.
marker = servers[0]['id']
# Since we're paging after the first server as our marker, there are
# only two left so specifying three should just return two.
servers = self.api.get_servers(search_opts=dict(marker=marker,
limit=3))
self.assertEqual(2, len(servers))
|
import collections
import io
from typing import BinaryIO, Tuple, Union
import warnings
import numpy
import skimage.io
import skimage.measure
import skimage.morphology
import skimage.segmentation
import skimage.transform
from .base import BaseSegmentationHelper
class ScikitSegmentationHelper(BaseSegmentationHelper):
@classmethod
def loadImage(cls, imageDataStream: Union[BinaryIO, str]) -> numpy.ndarray:
"""
Load an image into an RGB array.
:param imageDataStream: A file-like object containing the encoded
(JPEG, etc.) image data or a file path.
:return: A Numpy array with the RGB image data.
"""
imageData = skimage.io.imread(imageDataStream, plugin='pil')
if len(imageData.shape) == 1 and imageData.shape[0] > 1:
# Some images seem to have a 2nd (or 3rd+) layer, which should be ignored
# https://github.com/scikit-image/scikit-image/issues/2154
# The first element within the result should be the main image
imageData = imageData[0]
if len(imageData.shape) == 3 and imageData.shape[2] == 4:
# cv2.floodFill doesn't work correctly with array views, so copy
imageData = imageData[:, :, :3].copy()
return imageData
@classmethod
def writeImage(cls, image, encoding='png', width=None):
if width is not None:
factor = float(width) / image.shape[1]
image = skimage.transform.rescale(image, factor)
imageStream = io.BytesIO()
with warnings.catch_warnings():
# Ignore warnings about low contrast images, as masks are often empty
warnings.filterwarnings('ignore', r'^.* is a low contrast image$', UserWarning)
# The 'pil' plugin is about 40% faster than the default 'imageio' plugin
# The 'pil' plugin uses 'format_str' as an argument, not 'format'
skimage.io.imsave(imageStream, image, plugin='pil', format_str=encoding)
imageStream.seek(0)
return imageStream
@classmethod
def segment(cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int
) -> numpy.ndarray:
"""
Do a flood-fill segmentation of an image, yielding a single contiguous region with no holes.
:param image: A Numpy array with the image to be segmented.
:param seedCoord: (X, Y) coordinates of the segmentation seed point.
:param tolerance: The intensity tolerance value for the segmentation.
:return: The mask image of the segmented region, with values 0 or 255.
"""
maskImage = cls._floodFill(
image,
seedCoord,
tolerance)
# Now, fill in any holes in the maskImage
# First, add a padded border, allowing the next operation to reach
# around edge-touching components
maskImage = numpy.pad(maskImage, 1, 'constant', constant_values=1)
maskImageBackground = cls._floodFill(
maskImage,
# The seed point is a part of the padded border of maskImage
seedCoord=(0, 0),
# The seed point and border will have a value of 1, but we want to
# also include the actual mask background, which has a value of 0
tolerance=1)
# Remove the extra padding
maskImageBackground = maskImageBackground[1:-1, 1:-1]
# Flip the background, to get the mask with holes removed
maskImage = numpy.invert(maskImageBackground)
return maskImage
@classmethod
def _clippedAdd(cls, array, value):
typeInfo = numpy.iinfo(array.dtype)
newArray = array.astype(int)
newArray += value
return newArray.clip(typeInfo.min, typeInfo.max).astype(array.dtype)
@classmethod
def _floodFill(
cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int,
connectivity: int = 8) -> numpy.ndarray:
"""
Segment an image into a region connected to a seed point, using OpenCV.
:param image: The image to be segmented.
:param seedCoord: The point inside the connected region where the
segmentation will start.
:param tolerance: The maximum color/intensity difference between the
seed point and a point in the connected region.
:param connectivity: (optional) The number of allowed connectivity
propagation directions. Allowed values are:
* 4 for edge pixels
* 8 for edge and corner pixels
:returns: A binary label mask, with an extra 1-pixel wide padded border.
The values are either ``0`` or ``fillValue``.
"""
seedValue = image[seedCoord[1], seedCoord[0]]
seedValueMin = cls._clippedAdd(seedValue, -tolerance)
seedValueMax = cls._clippedAdd(seedValue, tolerance)
if connectivity == 4:
connectivityArg = 1
elif connectivity == 8:
connectivityArg = 2
else:
raise ValueError('Unknown connectivity value.')
binaryImage = numpy.logical_and(
image >= seedValueMin,
image <= seedValueMax
)
if len(image.shape) == 3:
# Reduce RGB components, requiring all to be within threshold
binaryImage = numpy.all(binaryImage, 2)
labelImage = skimage.measure.label(
binaryImage.astype(int),
return_num=False,
connectivity=connectivityArg
)
del binaryImage
maskImage = numpy.equal(
labelImage, labelImage[seedCoord[1], seedCoord[0]])
del labelImage
maskImage = maskImage.astype(numpy.uint8) * 255
return maskImage
@classmethod
def _structuringElement(cls, shape, radius, elementType=bool):
size = (radius * 2) + 1
if shape == 'circle':
element = skimage.morphology.disk(radius, elementType)
elif shape == 'cross':
element = numpy.zeros((size, size), elementType)
element[:, size // 2] = elementType(True)
element[size // 2, :] = elementType(True)
elif shape == 'square':
element = skimage.morphology.square(size, elementType)
else:
raise ValueError('Unknown element shape value.')
return element
@classmethod
def _binaryOpening(cls, image, elementShape='circle', elementRadius=5):
element = cls._structuringElement(elementShape, elementRadius, bool)
morphedImage = skimage.morphology.binary_opening(
image=image,
selem=element
)
return morphedImage
@classmethod
def _collapseCoords(cls, coords):
collapsedCoords = [coords[0]]
collapsedCoords.extend([
coord
for prevCoord, coord, nextCoord in zip(
coords[0:], coords[1:], coords[2:])
if numpy.cross(nextCoord - prevCoord, coord - prevCoord) != 0
])
collapsedCoords.append(coords[-1])
collapsedCoords = numpy.array(collapsedCoords)
return collapsedCoords
@classmethod
def maskToContour(cls, maskImage: numpy.ndarray) -> numpy.ndarray:
"""
Extract the contour line within a segmented label mask, using Scikit-Image.
:param maskImage: A binary label mask of numpy.uint8.
:return: An array of point pairs.
"""
if maskImage.dtype != numpy.uint8:
raise TypeError('maskImage must be an array of uint8.')
coords = skimage.measure.find_contours(
# TODO: threshold image more efficiently
array=maskImage.astype(bool).astype(numpy.double),
level=0.5,
fully_connected='low',
positive_orientation='low'
)
coords = numpy.fliplr(coords[0])
coords = cls._collapseCoords(coords)
return coords
@classmethod
def contourToMask(cls, imageShape: Tuple[int, int], coords: numpy.ndarray) -> numpy.ndarray:
"""
Convert a contour line to a label mask.
:param imageShape: The [Y, X] shape of the image.
:param coords: An array of point pairs.
:return: A binary label mask of numpy.uint8.
"""
maskImage = skimage.measure.grid_points_in_poly(
shape=imageShape,
verts=numpy.fliplr(coords)
).astype(numpy.uint8)
maskImage *= 255
return maskImage
@classmethod
def _slic(cls, image, numSegments=None, segmentSize=None):
compactness = 0.01 # make superpixels highly deformable
maxIter = 10
sigma = 2.0
if numSegments and segmentSize:
raise ValueError(
'Only one of numSegments or segmentSize may be set.')
elif numSegments:
pass
elif segmentSize:
numSegments = (image.shape[0] * image.shape[1]) / (segmentSize ** 2)
else:
raise ValueError('One of numSegments or segmentSize must be set.')
labelImage = skimage.segmentation.slic(
image,
n_segments=numSegments,
compactness=compactness,
max_iter=maxIter,
sigma=sigma,
enforce_connectivity=True,
min_size_factor=0.5,
slic_zero=True
)
return labelImage
class _PersistentCounter(object):
def __init__(self):
self.value = 0
def __call__(self):
ret = self.value
self.value += 1
return ret
@classmethod
def _uint64ToRGB(cls, val):
return numpy.dstack((
val.astype(numpy.uint8),
(val >> numpy.uint64(8)).astype(numpy.uint8),
(val >> numpy.uint64(16)).astype(numpy.uint8)
))
@classmethod
def _RGBTounit64(cls, val: numpy.ndarray) -> numpy.ndarray:
"""
Decode an RGB representation of a superpixel label into its native scalar value.
:param val: A single pixel, or a 3-channel image.
This is an numpy.ndarray of uint8, with a shape [3] or [n, m, 3].
"""
return \
(val[..., 0].astype(numpy.uint64)) + \
(val[..., 1].astype(numpy.uint64) << numpy.uint64(8)) + \
(val[..., 2].astype(numpy.uint64) << numpy.uint64(16))
@classmethod
def superpixels(cls, image):
superpixelLabels = cls._slic(image, numSegments=1000)
superpixels = cls._uint64ToRGB(superpixelLabels)
return superpixels
@classmethod
def superpixels_legacy(cls, image, coords):
maskImage = cls.contourToMask(image.shape[:2], coords)
from .opencv import OpenCVSegmentationHelper
# This operation is much faster in OpenCV
maskImage = OpenCVSegmentationHelper._binaryOpening(
maskImage.astype(numpy.uint8),
elementShape='circle',
elementRadius=5
).astype(bool)
insideImage = image.copy()
insideImage[numpy.logical_not(maskImage)] = 0
insideSuperpixelLabels = cls._slic(insideImage, segmentSize=20)
outsideImage = image.copy()
outsideImage[maskImage] = 0
outsideSuperpixelLabels = cls._slic(outsideImage, segmentSize=60)
# https://stackoverflow.com/questions/16210738/implementation-of-numpy-in1d-for-2d-arrays
insideSuperpixelMask = numpy.in1d(
insideSuperpixelLabels.flat,
numpy.unique(insideSuperpixelLabels[maskImage])
).reshape(insideSuperpixelLabels.shape)
combinedSuperpixelLabels = outsideSuperpixelLabels.copy()
combinedSuperpixelLabels[insideSuperpixelMask] = \
insideSuperpixelLabels[insideSuperpixelMask] + \
outsideSuperpixelLabels.max() + 10000
labelValues = collections.defaultdict(cls._PersistentCounter())
for value in numpy.nditer(combinedSuperpixelLabels,
op_flags=['readwrite']):
value[...] = labelValues[value.item()]
combinedSuperpixels = cls._uint64ToRGB(combinedSuperpixelLabels)
return combinedSuperpixels
|
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(nn.Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch', deep_stem=True, avg_down=True, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottle2neck):
# dcn in Res2Net bottle2neck is in ModuleList
for n in m.convs:
if hasattr(n, 'conv_offset'):
constant_init(n.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottle2neck):
constant_init(m.norm3, 0)
else:
raise TypeError('pretrained must be a str or None')
|
from HDPython.ast.ast_classes.ast_base import v_ast_base, add_class
import HDPython.hdl_converter as hdl
from HDPython.ast.ast_hdl_error import HDPython_error
from HDPython.base import HDPython_base
class v_re_assigne_rhsift(v_ast_base):
def __init__(self,lhs, rhs,context=None, astParser=None):
self.lhs = lhs
self.rhs = rhs
self.context =context
self.astParser = astParser
def __str__(self):
if issubclass(type(self.lhs),HDPython_base):
return hdl.impl_reasign_rshift_(self.lhs, self.rhs, astParser=self.astParser, context_str=self.context )
return str(self.lhs) + " := " + str(self.rhs)
def body_RShift(astParser,Node):
rhs = astParser.Unfold_body(Node.right)
lhs = astParser.Unfold_body(Node.left)
if issubclass( type(lhs),HDPython_base) and issubclass( type(rhs),HDPython_base):
rhs.__Driver__ = astParser.ContextName[-1]
return v_re_assigne_rhsift(lhs, rhs,context=astParser.ContextName[-1],astParser=astParser)
err_msg = HDPython_error(
astParser.sourceFileName,
Node.lineno,
Node.col_offset,
type(lhs).__name__,
"right shift is only supported for HDPyhon objects"
)
raise Exception(err_msg,lhs)
add_class("RShift",body_RShift)
|
import pytest
import pathlib
import os
import subprocess
import tempfile
from kopf.testing import KopfRunner
from dask_kubernetes.common.utils import check_dependency
DIR = pathlib.Path(__file__).parent.absolute()
check_dependency("helm")
check_dependency("kubectl")
check_dependency("docker")
@pytest.fixture()
async def kopf_runner(k8s_cluster):
yield KopfRunner(["run", "-m", "dask_kubernetes.operator", "--verbose"])
@pytest.fixture(scope="session")
def docker_image():
image_name = "dask-kubernetes:dev"
subprocess.check_output(["docker", "build", "-t", image_name, "./ci/"])
return image_name
@pytest.fixture(scope="session")
def k8s_cluster(kind_cluster, docker_image):
os.environ["KUBECONFIG"] = str(kind_cluster.kubeconfig_path)
kind_cluster.load_docker_image(docker_image)
yield kind_cluster
del os.environ["KUBECONFIG"]
@pytest.fixture(scope="session")
def ns(k8s_cluster):
return "default"
def run_generate(crd_path, patch_path, temp_path):
subprocess.run(
["k8s-crd-resolver", "-r", "-j", patch_path, crd_path, temp_path],
check=True,
env={**os.environ},
)
@pytest.fixture(scope="session", autouse=True)
def customresources(k8s_cluster):
temp_dir = tempfile.TemporaryDirectory()
crd_path = os.path.join(DIR, "operator", "customresources")
run_generate(
os.path.join(crd_path, "daskcluster.yaml"),
os.path.join(crd_path, "daskcluster.patch.yaml"),
os.path.join(temp_dir.name, "daskcluster.yaml"),
)
run_generate(
os.path.join(crd_path, "daskworkergroup.yaml"),
os.path.join(crd_path, "daskworkergroup.patch.yaml"),
os.path.join(temp_dir.name, "daskworkergroup.yaml"),
)
k8s_cluster.kubectl("apply", "-f", temp_dir.name)
yield
k8s_cluster.kubectl("delete", "-f", temp_dir.name)
temp_dir.cleanup()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyarrow
from .. import query_to_arrow
def test_query_to_arrow(capsys, client):
arrow_table = query_to_arrow.query_to_arrow(client)
out, err = capsys.readouterr()
assert "Downloaded 8 rows, 2 columns." in out
arrow_schema = arrow_table.schema
assert arrow_schema.names == ["race", "participant"]
assert pyarrow.types.is_string(arrow_schema.types[0])
assert pyarrow.types.is_struct(arrow_schema.types[1])
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AutoScalingPolicySummary(object):
"""
Summary information for an autoscaling policy.
"""
def __init__(self, **kwargs):
"""
Initializes a new AutoScalingPolicySummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this AutoScalingPolicySummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this AutoScalingPolicySummary.
:type display_name: str
:param policy_type:
The value to assign to the policy_type property of this AutoScalingPolicySummary.
:type policy_type: str
:param is_enabled:
The value to assign to the is_enabled property of this AutoScalingPolicySummary.
:type is_enabled: bool
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'policy_type': 'str',
'is_enabled': 'bool'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'policy_type': 'policyType',
'is_enabled': 'isEnabled'
}
self._id = None
self._display_name = None
self._policy_type = None
self._is_enabled = None
@property
def id(self):
"""
**[Required]** Gets the id of this AutoScalingPolicySummary.
The ID of the autoscaling policy that is assigned after creation.
:return: The id of this AutoScalingPolicySummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this AutoScalingPolicySummary.
The ID of the autoscaling policy that is assigned after creation.
:param id: The id of this AutoScalingPolicySummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
Gets the display_name of this AutoScalingPolicySummary.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:return: The display_name of this AutoScalingPolicySummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this AutoScalingPolicySummary.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param display_name: The display_name of this AutoScalingPolicySummary.
:type: str
"""
self._display_name = display_name
@property
def policy_type(self):
"""
**[Required]** Gets the policy_type of this AutoScalingPolicySummary.
The type of autoscaling policy.
:return: The policy_type of this AutoScalingPolicySummary.
:rtype: str
"""
return self._policy_type
@policy_type.setter
def policy_type(self, policy_type):
"""
Sets the policy_type of this AutoScalingPolicySummary.
The type of autoscaling policy.
:param policy_type: The policy_type of this AutoScalingPolicySummary.
:type: str
"""
self._policy_type = policy_type
@property
def is_enabled(self):
"""
Gets the is_enabled of this AutoScalingPolicySummary.
Whether the autoscaling policy is enabled.
:return: The is_enabled of this AutoScalingPolicySummary.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this AutoScalingPolicySummary.
Whether the autoscaling policy is enabled.
:param is_enabled: The is_enabled of this AutoScalingPolicySummary.
:type: bool
"""
self._is_enabled = is_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
from confluent_kafka import Consumer, KafkaException, KafkaError
import sys
import logging
from pprint import pformat
def print_assignment(consumer, partitions):
print('Assignment:', partitions)
if __name__ == '__main__':
conf = {
'bootstrap.servers': 'localhost:9092',
'group.id': 'devnation-python',
'session.timeout.ms': 6000,
'auto.offset.reset': 'earliest'
}
c = Consumer(conf)
c.subscribe(['devnation'], on_assign=print_assignment)
# Read messages from Kafka, print to stdout
try:
while True:
msg = c.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
# Continue -> we reached the end of the partition
continue
else:
sys.stderr.write('-E- Something went wrong: %s' % msg.error())
break
else:
# Proper message
sys.stderr.write('-I- %s [%d] at offset %d with key %s: ' %
(msg.topic(), msg.partition(), msg.offset(),
str(msg.key())))
print(msg.value())
except KeyboardInterrupt:
sys.stderr.write('%% Aborted by user\n')
finally:
c.close()
|
"""Central data class and associated."""
# --- import --------------------------------------------------------------------------------------
import collections
import operator
import functools
import warnings
import numpy as np
import h5py
import scipy
from scipy.interpolate import griddata, interp1d
from .._group import Group
from .. import collection as wt_collection
from .. import exceptions as wt_exceptions
from .. import kit as wt_kit
from .. import units as wt_units
from ._axis import Axis, identifier_to_operator
from ._channel import Channel
from ._constant import Constant
from ._variable import Variable
# --- define --------------------------------------------------------------------------------------
__all__ = ["Data"]
# --- class ---------------------------------------------------------------------------------------
class Data(Group):
"""Multidimensional dataset."""
class_name = "Data"
def __init__(self, *args, **kwargs):
self._axes = []
self._constants = []
Group.__init__(self, *args, **kwargs)
# populate axes, constants from attrs string
for identifier in self.attrs.get("axes", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
# Should not be needed for wt5 >= 1.0.3, kept for opening older wt5 files.
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
axis = Axis(self, expression, units)
self._axes.append(axis)
for identifier in self.attrs.get("constants", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
const = Constant(self, expression, units)
self._constants.append(const)
self._current_axis_identities_in_natural_namespace = []
if self.file.mode is not None and self.file.mode != "r":
self._on_constants_updated()
self._on_axes_updated()
# the following are populated if not already recorded
self.channel_names
self.source
self.variable_names
def __repr__(self) -> str:
return "<WrightTools.Data '{0}' {1} at {2}>".format(
self.natural_name, str(self.axis_names), "::".join([self.filepath, self.name])
)
@property
def axes(self) -> tuple:
return tuple(self._axes)
@property
def axis_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._axes)
@property
def axis_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._axes)
@property
def constants(self) -> tuple:
return tuple(self._constants)
@property
def constant_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._constants)
@property
def constant_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._constants)
@property
def channel_names(self) -> tuple:
"""Channel names."""
if "channel_names" not in self.attrs.keys():
self.attrs["channel_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["channel_names"])
@channel_names.setter
def channel_names(self, value):
"""Set channel names."""
self.attrs["channel_names"] = np.array(value, dtype="S")
@property
def channels(self) -> tuple:
"""Channels."""
return tuple(self[n] for n in self.channel_names)
@property
def datasets(self) -> tuple:
"""Datasets."""
return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))
@property
def kind(self):
"""Kind."""
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None
@property
def ndim(self) -> int:
"""Get number of dimensions."""
try:
assert self._ndim is not None
except (AssertionError, AttributeError):
if len(self.variables) == 0:
self._ndim = 0
else:
self._ndim = self.variables[0].ndim
finally:
return self._ndim
@property
def shape(self) -> tuple:
"""Shape."""
try:
assert self._shape is not None
except (AssertionError, AttributeError):
self._shape = wt_kit.joint_shape(*self.variables)
finally:
return self._shape
@property
def size(self) -> int:
"""Size."""
return functools.reduce(operator.mul, self.shape)
@property
def source(self):
"""Source."""
if "source" not in self.attrs.keys():
self.attrs["source"] = "None"
value = self.attrs["source"]
return value if not value == "None" else None
@property
def units(self) -> tuple:
"""All axis units."""
return tuple(a.units for a in self._axes)
@property
def constant_units(self) -> tuple:
"""All constant units."""
return tuple(a.units for a in self._constants)
@property
def variable_names(self) -> tuple:
"""Variable names."""
if "variable_names" not in self.attrs.keys():
self.attrs["variable_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["variable_names"])
@variable_names.setter
def variable_names(self, value):
"""Set variable names."""
self.attrs["variable_names"] = np.array(value, dtype="S")
@property
def variables(self) -> tuple:
"""Variables."""
try:
assert self._variables is not None
except (AssertionError, AttributeError):
self._variables = [self[n] for n in self.variable_names]
finally:
return tuple(self._variables)
@property
def _leaf(self):
return "{0} {1}".format(self.natural_name, self.shape)
def _on_axes_updated(self):
"""Method to run when axes are changed in any way.
Propagates updated axes properly.
"""
# update attrs
self.attrs["axes"] = np.array([a.identity.encode() for a in self._axes], dtype="S")
# remove old attributes
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass # already gone
# populate new attributes
for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key)
def _on_constants_updated(self):
"""Method to run when constants are changed in any way.
Propagates updated constants properly.
"""
# update attrs
self.attrs["constants"] = np.array(
[a.identity.encode() for a in self._constants], dtype="S"
)
def _print_branch(self, prefix, depth, verbose):
def print_leaves(prefix, lis, vline=True):
for i, item in enumerate(lis):
if vline:
a = "│ "
else:
a = " "
if i + 1 == len(lis):
b = "└── "
else:
b = "├── "
s = prefix + a + b + "{0}: {1}".format(i, item._leaf)
print(s)
if verbose:
# axes
print(prefix + "├── axes")
print_leaves(prefix, self.axes)
# constants
print(prefix + "├── constants")
print_leaves(prefix, self.constants)
# variables
print(prefix + "├── variables")
print_leaves(prefix, self.variables)
# channels
print(prefix + "└── channels")
print_leaves(prefix, self.channels, vline=False)
else:
# axes
s = "axes: "
s += ", ".join(["{0} ({1})".format(a.expression, a.units) for a in self.axes])
print(prefix + "├── " + s)
# constants
s = "constants: "
s += ", ".join(
["{0} ({1} {2})".format(a.expression, a.value, a.units) for a in self.constants]
)
print(prefix + "├── " + s)
# channels
s = "channels: "
s += ", ".join(self.channel_names)
print(prefix + "└── " + s)
def bring_to_front(self, channel):
"""Bring a specific channel to the zero-indexed position in channels.
All other channels get pushed back but remain in order.
Parameters
----------
channel : int or str
Channel index or name.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
new.insert(0, new.pop(channel_index))
self.channel_names = new
def chop(self, *args, at={}, parent=None, verbose=True) -> wt_collection.Collection:
"""Divide the dataset into its lower-dimensionality components.
Parameters
----------
axis : str or int (args)
Axes of the returned data objects. Strings refer to the names of
axes in this object, integers refer to their index. Provide multiple
axes to return multidimensional data objects.
at : dict (optional)
Choice of position along an axis. Keys are axis names, values are lists
``[position, input units]``. If exact position does not exist,
the closest valid position is used.
parent : WrightTools Collection instance (optional)
Collection to place the new "chop" collection within. Default is
None (new parent).
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools Collection
Collection of chopped data objects.
Examples
--------
>>> data.axis_names
['d2', 'w1', 'w2']
Get all w1 wigners.
>>> datas = data.chop('d2', 'w1')
>>> len(datas)
51
Get 2D frequency at d2=0 fs.
>>> datas = data.chop('w1', 'w2', at={'d2': [0, 'fs']})
>>> len(datas)
0
>>> datas[0].axis_names
['w1', 'w2']
>>> datas[0].d2[:]
0.
See Also
--------
collapse
Collapse the dataset along one axis.
split
Split the dataset while maintaining its dimensionality.
"""
from ._axis import operators, operator_to_identifier
# parse args
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, int):
args[i] = self._axes[arg].natural_name
elif isinstance(arg, str):
# same normalization that occurs in the natural_name @property
arg = arg.strip()
for op in operators:
arg = arg.replace(op, operator_to_identifier[op])
args[i] = wt_kit.string2identifier(arg)
# normalize the at keys to the natural name
for k in [ak for ak in at.keys() if type(ak) == str]:
for op in operators:
if op in k:
nk = k.replace(op, operator_to_identifier[op])
at[nk] = at[k]
at.pop(k)
k = nk
# get output collection
out = wt_collection.Collection(name="chop", parent=parent)
# get output shape
kept = args + [ak for ak in at.keys() if type(ak) == str]
kept_axes = [self._axes[self.axis_names.index(a)] for a in kept]
removed_axes = [a for a in self._axes if a not in kept_axes]
removed_shape = wt_kit.joint_shape(*removed_axes)
if removed_shape == ():
removed_shape = (1,) * self.ndim
removed_shape = list(removed_shape)
for i in at.keys():
if type(i) == int:
removed_shape[i] = 1
for ax in kept_axes:
if ax.shape.count(1) == ax.ndim - 1:
removed_shape[ax.shape.index(ax.size)] = 1
removed_shape = tuple(removed_shape)
# iterate
i = 0
for idx in np.ndindex(removed_shape):
idx = np.array(idx, dtype=object)
idx[np.array(removed_shape) == 1] = slice(None)
for axis, point in at.items():
if type(axis) == int:
idx[axis] = point
continue
point, units = point
destination_units = self._axes[self.axis_names.index(axis)].units
point = wt_units.converter(point, units, destination_units)
axis_index = self.axis_names.index(axis)
axis = self._axes[axis_index]
idx_index = np.array(axis.shape) > 1
if np.sum(idx_index) > 1:
raise wt_exceptions.MultidimensionalAxisError("chop", axis.natural_name)
idx_index = list(idx_index).index(True)
idx[idx_index] = np.argmin(np.abs(axis[tuple(idx)] - point))
data = out.create_data(name="chop%03i" % i)
for v in self.variables:
kwargs = {}
kwargs["name"] = v.natural_name
kwargs["values"] = v[idx]
kwargs["units"] = v.units
kwargs["label"] = v.label
kwargs.update(v.attrs)
data.create_variable(**kwargs)
for c in self.channels:
kwargs = {}
kwargs["name"] = c.natural_name
kwargs["values"] = c[idx]
kwargs["units"] = c.units
kwargs["label"] = c.label
kwargs["signed"] = c.signed
kwargs.update(c.attrs)
data.create_channel(**kwargs)
new_axes = [a.expression for a in kept_axes if a.expression not in at.keys()]
new_axis_units = [a.units for a in kept_axes if a.expression not in at.keys()]
data.transform(*new_axes)
for const in self.constant_expressions:
data.create_constant(const, verbose=False)
for ax in self.axis_expressions:
if ax not in new_axes:
data.create_constant(ax, verbose=False)
for j, units in enumerate(new_axis_units):
data.axes[j].convert(units)
i += 1
out.flush()
# return
if verbose:
print("chopped data into %d piece(s)" % len(out), "in", new_axes)
return out
def gradient(self, axis, *, channel=0):
"""
Compute the gradient along one axis.
New channels have names ``<channel name>_<axis name>_gradient``.
Parameters
----------
axis : int or str
The axis to differentiate along.
If given as an integer, the axis in the underlying array is used,
and unitary spacing is assumed.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to differentiate.
Default is the first channel.
"""
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis '{}' is a single point, cannot compute gradient".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute gradient".format(
channel, axis
)
)
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_gradient".format(channel, axis),
values=np.empty(self[channel].shape, dtype=rtype),
)
channel = self[channel]
if axis == axis_index:
new[:] = np.gradient(channel[:], axis=axis_index)
else:
new[:] = np.gradient(channel[:], self[axis].points, axis=axis_index)
def moment(self, axis, channel=0, moment=1, *, resultant=None):
"""Take the nth moment the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_moment_<moment num>``.
Moment 0 is the integral of the slice.
Moment 1 is the weighted average or "Center of Mass", normalized by the integral
Moment 2 is the variance, the central moment about the center of mass,
normalized by the integral
Moments 3+ are central moments about the center of mass, normalized by the integral
and by the standard deviation to the power of the moment.
Moments, especially higher order moments, are susceptible to noise and baseline.
It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip`
in conjunction with moments to reduce effects of noise.
Parameters
----------
axis : int or str
The axis to take the moment along.
If given as an integer, the axis with that index is used.
If given as a string, the axis with that name is used.
The axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The collapsed axis must be monotonic to produce correct results.
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to take the moment.
If given as an integer, the channel with that index is used.
If given as a string, the channel with that name is used.
The channel must have values along the axis
(i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``)
Default is 0, the first channel.
moment : int or tuple of int
The moments to take.
One channel will be created for each number given.
Default is 1, the center of mass.
resultant : tuple of int
The resultant shape after the moment operation.
By default, it is intuited by the axis along which the moment is being taken.
This default only works if that axis is 1D, so resultant is required if a
multidimensional axis is passed as the first argument.
The requirement of monotonicity applies on a per pixel basis.
See Also
--------
collapse
Reduce dimensionality by some mathematical operation
clip
Set values above/below a threshold to a particular value
WrightTools.kit.joint_shape
Useful for setting `resultant` kwarg based off of axes not collapsed.
"""
# get axis index --------------------------------------------------------------------------
axis_index = None
if resultant is not None:
for i, (s, r) in enumerate(zip(wt_kit.joint_shape(*self.axes), resultant)):
if s != r and r == 1 and axis_index is None:
axis_index = i
elif s == r:
continue
else:
raise wt_exceptions.ValueError(
f"Invalid resultant shape '{resultant}' for shape {wt_kit.joint_shape(*self.axes)}. "
+ "Consider using `wt.kit.joint_shape` to join non-collapsed axes."
)
index = wt_kit.get_index(self.axis_names, axis)
if axis_index is None:
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "moment")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot compute moment".format(axis)
)
axis_index = axes[0]
warnings.warn("moment", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute moment".format(
channel, axis
)
)
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
channel = self[channel]
axis_inp = axis
axis = self.axes[index]
x = axis[:]
if np.any(np.isnan(x)):
raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp))
y = np.nan_to_num(channel[:])
try:
moments = tuple(moment)
except TypeError:
moments = (moment,)
multiplier = 1
if 0 in moments:
# May be possible to optimize, probably doesn't need the sum
# only matters for integral, all others normalize by integral
multiplier = np.sign(
np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)
)
for moment in moments:
about = 0
norm = 1
if moment > 0:
norm = np.trapz(y, x, axis=axis_index)
norm = np.array(norm)
norm.shape = new_shape
if moment > 1:
about = np.trapz(x * y, x, axis=axis_index)
about = np.array(about)
about.shape = new_shape
about /= norm
if moment > 2:
sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index)
sigma = np.array(sigma)
sigma.shape = new_shape
sigma /= norm
sigma **= 0.5
norm *= sigma ** moment
values = np.trapz((x - about) ** moment * y, x, axis=axis_index)
values = np.array(values)
values.shape = new_shape
values /= norm
if moment == 0:
values *= multiplier
self.create_channel(
"{}_{}_{}_{}".format(channel.natural_name, axis_inp, "moment", moment),
values=values,
)
def collapse(self, axis, method="sum"):
"""Collapse the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_<method>``.
Parameters
----------
axis : int or str
The axis to collapse along.
If given as an integer, the axis in the underlying array is used.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
method : {'average', 'sum', 'max', 'min'} (optional)
The method of collapsing the given axis. Method may also be list
of methods corresponding to the channels of the object. Default
is sum. NaNs are ignored.
Can also be a list, allowing for different treatment for varied channels.
In this case, None indicates that no change to that channel should occur.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
split
Split the dataset while maintaining its dimensionality.
moment
Take the moment along a particular axis
"""
if method in ("int", "integrate"):
warnings.warn(
"integrate method of collapse is deprecated, use moment(moment=0) instead",
wt_exceptions.VisibleDeprecationWarning,
)
for channel in self.channel_names:
try:
self.moment(axis, channel, moment=0)
self.rename_channels(
**{self.channel_names[-1]: f"{channel}_{axis}_{method}"}, verbose=False
)
except wt_exceptions.ValueError:
pass # may have some channels which fail, do so silently
return
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot collapse".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
new_shape = list(self.shape)
new_shape[axis_index] = 1
func = {
"sum": np.nansum,
"max": np.nanmax,
"maximum": np.nanmax,
"min": np.nanmin,
"minimum": np.nanmin,
"ave": np.nanmean,
"average": np.nanmean,
"mean": np.nanmean,
}
# methods ---------------------------------------------------------------------------------
if isinstance(method, str):
methods = [method for _ in self.channels]
if isinstance(method, list):
if len(method) == len(self.channels):
methods = method
else:
raise wt_exceptions.ValueError(
"method argument must have same number of elements as there are channels"
)
for m in methods:
if m not in func.keys():
raise wt_exceptions.ValueError("method '{}' not recognized".format(m))
warnings.warn("collapse", category=wt_exceptions.EntireDatasetInMemoryWarning)
# collapse --------------------------------------------------------------------------------
for method, channel in zip(methods, self.channel_names):
if method is None:
continue
if self[channel].shape[axis_index] == 1:
continue # Cannot collapse any further, don't clutter data object
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
rtype = self[channel].dtype
if method in ["ave", "average", "mean"]:
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_{}".format(channel, axis, method),
values=np.empty(new_shape, dtype=rtype),
units=self[channel].units,
)
new[:] = func[method](self[channel], axis=axis_index, keepdims=True)
def convert(self, destination_units, *, convert_variables=False, verbose=True):
"""Convert all compatable axes and constants to given units.
Parameters
----------
destination_units : str
Destination units.
convert_variables : boolean (optional)
Toggle conversion of stored arrays. Default is False
verbose : bool (optional)
Toggle talkback. Default is True.
See Also
--------
Axis.convert
Convert a single axis object to compatable units. Call on an
axis object in data.axes.
"""
# apply to all compatible axes
for axis in self.axes:
if wt_units.is_valid_conversion(axis.units, destination_units):
orig = axis.units
axis.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"axis {} converted from {} to {}".format(
axis.expression, orig, destination_units
)
)
# apply to all compatible constants
for constant in self.constants:
if wt_units.is_valid_conversion(constant.units, destination_units):
orig = constant.units
constant.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"constant {} converted from {} to {}".format(
constant.expression, orig, destination_units
)
)
if convert_variables:
for var in self.variables:
if wt_units.is_valid_conversion(var.units, destination_units):
orig = var.units
var.convert(destination_units)
if verbose:
print(
"variable {} converted from {} to {}".format(
var.natural_name, orig, destination_units
)
)
self._on_axes_updated()
self._on_constants_updated()
def create_channel(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Channel:
"""Append a new channel.
Parameters
----------
name : string
Unique name for this channel.
values : array (optional)
Array. If None, an empty array equaling the data shape is
created. Default is None.
shape : tuple of int
Shape to use. Must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Channel units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs : dict
Additional keyword arguments passed to Channel instantiation.
Returns
-------
Channel
Created channel.
"""
if name in self.channel_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.variable_names:
raise wt_exceptions.NameNotUniqueError(name)
require_kwargs = {"chunks": True}
if values is None:
if shape is None:
require_kwargs["shape"] = self.shape
else:
require_kwargs["shape"] = shape
if dtype is None:
require_kwargs["dtype"] = np.dtype(np.float64)
else:
require_kwargs["dtype"] = dtype
if require_kwargs["dtype"].kind in "fcmM":
require_kwargs["fillvalue"] = np.nan
else:
require_kwargs["fillvalue"] = 0
else:
require_kwargs["data"] = values
require_kwargs["shape"] = values.shape
require_kwargs["dtype"] = values.dtype
if np.prod(require_kwargs["shape"]) == 1:
require_kwargs["chunks"] = None
# create dataset
dataset_id = self.require_dataset(name=name, **require_kwargs).id
channel = Channel(self, dataset_id, units=units, **kwargs)
# finish
self.attrs["channel_names"] = np.append(self.attrs["channel_names"], name.encode())
return channel
def create_variable(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Variable:
"""Add new child variable.
Parameters
----------
name : string
Unique identifier.
values : array-like (optional)
Array to populate variable with. If None, an variable will be filled with NaN.
Default is None.
shape : tuple of int
Shape to use. must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Variable units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs
Additional kwargs to variable instantiation.
Returns
-------
WrightTools Variable
New child variable.
"""
if name in self.variable_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.channel_names:
raise wt_exceptions.NameNotUniqueError(name)
if values is None:
if shape is None:
shape = self.shape
if dtype is None:
dtype = np.dtype(np.float64)
if dtype.kind in "fcmM":
fillvalue = np.nan
else:
fillvalue = 0
else:
shape = values.shape
dtype = values.dtype
fillvalue = None
# create dataset
id = self.require_dataset(
name=name, data=values, shape=shape, dtype=dtype, fillvalue=fillvalue
).id
variable = Variable(self, id, units=units, **kwargs)
# finish
self._variables = None
self.attrs["variable_names"] = np.append(self.attrs["variable_names"], name.encode())
return variable
def get_nadir(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the minimum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmin()
# finish
return tuple(a[idx] for a in self._axes)
def get_zenith(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the maximum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmax()
# finish
return tuple(a[idx] for a in self._axes)
def heal(self, channel=0, method="linear", fill_value=np.nan, verbose=True):
"""
Remove nans from channel using interpolation.
Parameters
----------
channel : int or str (optional)
Channel to heal. Default is 0.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
fill_value : number-like (optional)
The value written to pixels that cannot be filled by interpolation.
Default is nan.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
.. note:: Healing may take several minutes for large datasets.
Interpolation time goes as nearest, linear, then cubic.
"""
warnings.warn("heal", category=wt_exceptions.EntireDatasetInMemoryWarning)
timer = wt_kit.Timer(verbose=False)
with timer:
# channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
values = self.channels[channel_index][:]
points = [axis[:] for axis in self._axes]
xi = tuple(np.meshgrid(*points, indexing="ij"))
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, values.size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = values.flatten()
# remove nans
arr = arr[:, ~np.isnan(arr).any(axis=0)]
# grid data wants tuples
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# grid data
out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)
self.channels[channel_index][:] = out
# print
if verbose:
print(
"channel {0} healed in {1} seconds".format(
channel.name, np.around(timer.interval, decimals=3)
)
)
def level(self, channel, axis, npts, *, verbose=True):
"""Subtract the average value of npts at the edge of a given axis.
Parameters
----------
channel : int or str
Channel to level.
axis : int
Axis to level along.
npts : int
Number of points to average for each slice. Positive numbers
take points at leading indicies and negative numbers take points
at trailing indicies.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("level", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channels[channel_index]
# verify npts not zero
npts = int(npts)
if npts == 0:
raise wt_exceptions.ValueError("npts must not be zero")
# get subtrahend
ss = [slice(None)] * self.ndim
if npts > 0:
ss[axis] = slice(0, npts, None)
else:
ss[axis] = slice(npts, None, None)
subtrahend = np.nanmean(channel[ss], axis=axis)
if self.ndim > 1:
subtrahend = np.expand_dims(subtrahend, axis=axis)
# level
channel -= subtrahend
# finish
channel._null = 0
if verbose:
print("channel {0} leveled along axis {1}".format(channel.natural_name, axis))
def map_variable(
self, variable, points, input_units="same", *, name=None, parent=None, verbose=True
) -> "Data":
"""Map points of an axis to new points using linear interpolation.
Out-of-bounds points are written nan.
Parameters
----------
variable : string
The variable to map onto.
points : array-like or int
If array, the new points. If int, new points will have the same
limits, with int defining the number of evenly spaced points
between.
input_units : str (optional)
The units of the new points. Default is same, which assumes
the new points have the same units as the axis.
name : string (optional)
The name of the new data object. If None, generated from
natural_name. Default is None.
parent : WrightTools.Collection (optional)
Parent of new data object. If None, data is made at root of a
new temporary file.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.Data
New data object.
"""
# get variable index
variable_index = wt_kit.get_index(self.variable_names, variable)
variable = self.variables[variable_index]
# get points
if isinstance(points, int):
points = np.linspace(variable.min(), variable.max(), points)
points = np.array(points)
# points dimensionality
if points.ndim < variable.ndim:
for i, d in enumerate(variable.shape):
if d == 1:
points = np.expand_dims(points, axis=i)
# convert points
if input_units == "same":
pass
else:
points = wt_units.converter(points, input_units, variable.units)
# construct new data object
special = ["name", "axes", "constants", "channel_names", "variable_names"]
kwargs = {k: v for k, v in self.attrs.items() if k not in special}
if name is None:
name = "{0}_{1}_mapped".format(self.natural_name, variable.natural_name)
kwargs["name"] = name
kwargs["parent"] = parent
out = Data(**kwargs)
# mapped variable
values = points
out.create_variable(values=values, **variable.attrs)
# orthogonal variables
for v in self.variables:
if wt_kit.orthogonal(v.shape, variable.shape):
out.create_variable(values=v[:], **v.attrs)
out.transform(*self.axis_expressions)
# interpolate
if self.ndim == 1:
def interpolate(dataset, points):
function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)
return function(points)
else:
pts = np.array([a.full.flatten() for a in self.axes]).T
out_pts = np.array([a.full.flatten() for a in out.axes]).T
def interpolate(dataset, points):
values = dataset.full.flatten()
function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)
new = function(out_pts)
new.shape = out.shape
return new
for v in self.variables:
if v.natural_name not in out.variable_names:
out.create_variable(values=interpolate(v, points), **v.attrs)
out.variable_names = self.variable_names # enforce old order
out._variables = None # force regeneration of variables @property
for channel in self.channels:
out.create_channel(values=interpolate(channel, points), **channel.attrs)
# finish
if verbose:
print("data mapped from {0} to {1}".format(self.shape, out.shape))
return out
def offset(
self,
points,
offsets,
along,
offset_axis,
units="same",
offset_units="same",
mode="valid",
method="linear",
verbose=True,
):
"""Offset one axis based on another axis' values.
Useful for correcting instrumental artifacts such as zerotune.
Parameters
----------
points : 1D array-like
Points.
offsets : 1D array-like
Offsets.
along : str or int
Axis that points array lies along.
offset_axis : str or int
Axis to offset using offsets.
units : str (optional)
Units of points array.
offset_units : str (optional)
Units of offsets aray.
mode : {'valid', 'full', 'old'} (optional)
Define how far the new axis will extend. Points outside of valid
interpolation range will be written nan.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
>>> points # an array of w1 points
>>> offsets # an array of d1 corrections
>>> data.offset(points, offsets, 'w1', 'd1')
"""
raise NotImplementedError
# axis ------------------------------------------------------------------------------------
if isinstance(along, int):
axis_index = along
elif isinstance(along, str):
axis_index = self.axis_names.index(along)
else:
raise TypeError("along: expected {int, str}, got %s" % type(along))
axis = self._axes[axis_index]
# values & points -------------------------------------------------------------------------
# get values, points, units
if units == "same":
input_units = axis.units
else:
input_units = units
# check offsets is 1D or 0D
if len(offsets.shape) == 1:
pass
else:
raise RuntimeError("values must be 1D or 0D in offset!")
# check if units is compatible, convert
dictionary = getattr(wt_units, axis.units_kind)
if input_units in dictionary.keys():
pass
else:
raise RuntimeError("units incompatible in offset!")
points = wt_units.converter(points, input_units, axis.units)
# create correction array
function = interp1d(points, offsets, bounds_error=False)
corrections = function(axis[:])
# remove nans
finite_indicies = np.where(np.isfinite(corrections))[0]
left_pad_width = finite_indicies[0]
right_pad_width = len(corrections) - finite_indicies[-1] - 1
corrections = np.pad(
corrections[np.isfinite(corrections)],
(int(left_pad_width), int(right_pad_width)),
mode="edge",
)
# do correction ---------------------------------------------------------------------------
# transpose so axis is last
transpose_order = np.arange(len(self._axes))
transpose_order[axis_index] = len(self._axes) - 1
transpose_order[-1] = axis_index
self.transpose(transpose_order, verbose=False)
# get offset axis index
if isinstance(offset_axis, int):
offset_axis_index = offset_axis
elif isinstance(offset_axis, str):
offset_axis_index = self.axis_names.index(offset_axis)
else:
raise TypeError("offset_axis: expected {int, str}, got %s" % type(offset_axis))
# new points
new_points = [a[:] for a in self._axes]
old_offset_axis_points = self._axes[offset_axis_index][:]
spacing = abs(
(old_offset_axis_points.max() - old_offset_axis_points.min())
/ float(len(old_offset_axis_points))
)
if mode == "old":
new_offset_axis_points = old_offset_axis_points
elif mode == "valid":
_max = old_offset_axis_points.max() + corrections.min()
_min = old_offset_axis_points.min() + corrections.max()
n = int(abs(np.ceil((_max - _min) / spacing)))
new_offset_axis_points = np.linspace(_min, _max, n)
elif mode == "full":
_max = old_offset_axis_points.max() + corrections.max()
_min = old_offset_axis_points.min() + corrections.min()
n = np.ceil((_max - _min) / spacing)
new_offset_axis_points = np.linspace(_min, _max, n)
new_points[offset_axis_index] = new_offset_axis_points
new_xi = tuple(np.meshgrid(*new_points, indexing="ij"))
xi = tuple(np.meshgrid(*[a[:] for a in self._axes], indexing="ij"))
for channel in self.channels:
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, channel[:].size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = channel[:].flatten()
# do corrections
corrections = list(corrections)
corrections = corrections * int((len(arr[0]) / len(corrections)))
arr[offset_axis_index] += corrections
# grid data
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# note that rescale is crucial in this operation
out = griddata(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)
channel[:] = out
self._axes[offset_axis_index][:] = new_offset_axis_points
# transpose out
self.transpose(transpose_order, verbose=False)
def print_tree(self, *, verbose=True):
"""Print a ascii-formatted tree representation of the data contents."""
print("{0} ({1})".format(self.natural_name, self.filepath))
self._print_branch("", depth=0, verbose=verbose)
def prune(self, keep_channels=True, *, verbose=True):
"""Remove unused variables and (optionally) channels from the Data object.
Unused variables are those that are not included in either axes or constants.
Unused channels are those not specified in keep_channels, or the first channel.
Parameters
----------
keep_channels : boolean or int or str or tuple
If False, removes all but the first channel.
If int or str, removes all but that index/name channel.
If tuple, removes all channels except those in the tuple by index or name.
Default is True: do not delete channels
verbose : boolean
Toggle talkback. Default is True.
"""
for v in self.variables:
for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):
if v == var:
break
else:
self.remove_variable(v.natural_name, implied=False, verbose=verbose)
if keep_channels is not True:
try:
if isinstance(keep_channels, str):
raise TypeError
indexes = tuple(keep_channels)
except TypeError:
indexes = (keep_channels,)
for i, ch in enumerate(self.channels):
if i not in indexes and not ch.natural_name in indexes:
self.remove_channel(ch.natural_name, verbose=verbose)
def remove_channel(self, channel, *, verbose=True):
"""Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
name = new.pop(channel_index)
del self[name]
self.channel_names = new
if verbose:
print("channel {0} removed".format(name))
def remove_variable(self, variable, *, implied=True, verbose=True):
"""Remove variable from data.
Parameters
----------
variable : int or str
Variable index or name to remove.
implied : boolean (optional)
Toggle deletion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
if isinstance(variable, int):
variable = self.variable_names[variable]
# find all of the implied variables
removed = []
if implied:
for n in self.variable_names:
if n.startswith(variable):
removed.append(n)
else:
removed = [variable]
# check that axes will not be ruined
for n in removed:
for a in self._axes:
if n in [v.natural_name for v in a.variables]:
message = "{0} is contained in axis {1}".format(n, a.expression)
raise RuntimeError(message)
for c in self._constants:
if n in [v.natural_name for v in c.variables]:
warnings.warn(
"Variable being removed used in a constant",
wt_exceptions.WrightToolsWarning,
)
# do removal
for n in removed:
variable_index = wt_kit.get_index(self.variable_names, n)
new = list(self.variable_names)
name = new.pop(variable_index)
del self[name]
self.variable_names = new
self._variables = None
# finish
if verbose:
print("{0} variable(s) removed:".format(len(removed)))
for n in removed:
print(" {0}".format(n))
def rename_channels(self, *, verbose=True, **kwargs):
"""Rename a set of channels.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.channel_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.channel_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.channel_names = names
# finish
if verbose:
print("{0} channel(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def rename_variables(self, *, implied=True, verbose=True, **kwargs):
"""Rename a set of variables.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
implied : boolean (optional)
Toggle inclusion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# find all of the implied variables
kwargs = collections.OrderedDict(kwargs)
if implied:
new = collections.OrderedDict()
for k, v in kwargs.items():
for n in self.variable_names:
if n.startswith(k):
new[n] = n.replace(k, v, 1)
kwargs = new
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.variable_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.variable_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.variable_names = names
units = self.units
new = list(self.axis_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.transform(*new)
for a, u in zip(self._axes, units):
a.convert(u)
units = self.constant_units
new = list(self.constant_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.set_constants(*new)
for c, u in zip(self._constants, units):
c.convert(u)
# finish
if verbose:
print("{0} variable(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def share_nans(self):
"""Share not-a-numbers between all channels.
If any channel is nan at a given index, all channels will be nan
at that index after this operation.
Uses the share_nans method found in wt.kit.
"""
def f(_, s, channels):
outs = wt_kit.share_nans(*[c[s] for c in channels])
for c, o in zip(channels, outs):
c[s] = o
self.channels[0].chunkwise(f, self.channels)
def smooth(self, factors, channel=None, verbose=True) -> "Data":
"""Smooth a channel using an n-dimenional kaiser window.
Note, all arrays are loaded into memory.
For more info see `Kaiser_window`__ wikipedia entry.
__ https://en.wikipedia.org/wiki/Kaiser_window
Parameters
----------
factors : int or list of int
The smoothing factor. You may provide a list of smoothing factors
for each axis.
channel : int or str or None (optional)
The channel to smooth. If None, all channels will be smoothed.
Default is None.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("smooth", category=wt_exceptions.EntireDatasetInMemoryWarning)
# get factors -----------------------------------------------------------------------------
if isinstance(factors, list):
pass
else:
dummy = np.zeros(len(self._axes))
dummy[::] = factors
factors = list(dummy)
# get channels ----------------------------------------------------------------------------
if channel is None:
channels = self.channels
else:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channels = [self.channels[channel_index]]
# smooth ----------------------------------------------------------------------------------
for channel in channels:
values = channel[:]
for axis_index in range(len(factors)):
factor = factors[axis_index]
# transpose so the axis of interest is last
transpose_order = range(len(values.shape))
# replace axis_index with zero
transpose_order = [
len(values.shape) - 1 if i == axis_index else i for i in transpose_order
]
transpose_order[len(values.shape) - 1] = axis_index
values = values.transpose(transpose_order)
# get kaiser window
beta = 5.0
w = np.kaiser(2 * factor + 1, beta)
# for all slices...
for index in np.ndindex(values[..., 0].shape):
current_slice = values[index]
temp_slice = np.pad(current_slice, int(factor), mode=str("edge"))
values[index] = np.convolve(temp_slice, w / w.sum(), mode=str("valid"))
# transpose out
values = values.transpose(transpose_order)
# return array to channel object
channel[:] = values
if verbose:
print("smoothed data")
def split(
self, expression, positions, *, units=None, parent=None, verbose=True
) -> wt_collection.Collection:
"""
Split the data object along a given expression, in units.
Parameters
----------
expression : int or str
The expression to split along. If given as an integer, the axis at that index
is used.
positions : number-type or 1D array-type
The position(s) to split at, in units.
units : str (optional)
The units of the given positions. Default is same, which assumes
input units are identical to first variable units.
parent : WrightTools.Collection (optional)
The parent collection in which to place the 'split' collection.
Default is a new Collection.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.collection.Collection
A Collection of data objects.
The order of the objects is such that the axis points retain their original order.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
collapse
Collapse the dataset along one axis.
"""
# axis ------------------------------------------------------------------------------------
old_expr = self.axis_expressions
old_units = self.units
out = wt_collection.Collection(name="split", parent=parent)
if isinstance(expression, int):
if units is None:
units = self._axes[expression].units
expression = self._axes[expression].expression
elif isinstance(expression, str):
pass
else:
raise TypeError("expression: expected {int, str}, got %s" % type(expression))
self.transform(expression)
if units:
self.convert(units, verbose=False)
try:
positions = [-np.inf] + sorted(list(positions)) + [np.inf]
except TypeError:
positions = [-np.inf, positions, np.inf]
values = self._axes[0].full
masks = [(values >= lo) & (values < hi) for lo, hi in wt_kit.pairwise(positions)]
omasks = []
cuts = []
for mask in masks:
try:
omasks.append(wt_kit.mask_reduce(mask))
cuts.append([i == 1 for i in omasks[-1].shape])
# Ensure at least one axis is kept
if np.all(cuts[-1]):
cuts[-1][0] = False
except ValueError:
omasks.append(None)
cuts.append(None)
for i in range(len(positions) - 1):
out.create_data("split%03i" % i)
for var in self.variables:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, var.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, var.shape)
out_arr[omask] = var[:][imask]
out[i].create_variable(values=out_arr, **var.attrs)
for ch in self.channels:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, ch.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, ch.shape)
out_arr[omask] = ch[:][imask]
out[i].create_channel(values=out_arr, **ch.attrs)
if verbose:
for d in out.values():
try:
d.transform(expression)
except IndexError:
continue
print("split data into {0} pieces along <{1}>:".format(len(positions) - 1, expression))
for i, (lo, hi) in enumerate(wt_kit.pairwise(positions)):
new_data = out[i]
if new_data.shape == ():
print(" {0} : None".format(i))
else:
new_axis = new_data.axes[0]
print(
" {0} : {1:0.2f} to {2:0.2f} {3} {4}".format(
i, lo, hi, self.axes[0].units, new_axis.shape
)
)
for d in out.values():
try:
d.transform(*old_expr)
keep = []
keep_units = []
for ax, u in zip(d.axes, old_units):
if ax.size > 1:
keep.append(ax.expression)
keep_units.append(u)
else:
d.create_constant(ax.expression, verbose=False)
d.transform(*keep)
for ax, u in zip(d.axes, keep_units):
ax.convert(u)
except IndexError:
continue
tempax = Axis(d, expression)
if all(
np.all(
np.sum(~np.isnan(tempax.masked), axis=tuple(set(range(tempax.ndim)) - {j}))
<= 1
)
for j in range(tempax.ndim)
):
d.create_constant(expression, verbose=False)
self.transform(*old_expr)
for ax, u in zip(self.axes, old_units):
ax.convert(u)
return out
def transform(self, *axes, verbose=True):
"""Transform the data.
Parameters
----------
axes : strings
Expressions for the new set of axes.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Similar method except for constants
"""
# TODO: ensure that transform does not break data
# create
new = []
newt = "newt" in self.axis_expressions
current = {a.expression: a for a in self._axes}
for expression in axes:
axis = current.get(expression, Axis(self, expression))
new.append(axis)
self._axes = new
# units
for a in self._axes:
if a.units is None:
a.convert(a.variables[0].units)
# finish
self.flush()
self._on_axes_updated()
nownewt = "newt" in self.axis_expressions
if verbose and nownewt and not newt:
print("Look she turned me into a newt")
elif verbose and newt and not nownewt:
print("I got better")
def set_constants(self, *constants, verbose=True):
"""Set the constants associated with the data.
Parameters
----------
constants : str
Expressions for the new set of constants.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
transform
Similar method except for axes.
create_constant
Add an individual constant.
remove_constant
Remove an individual constant.
"""
# create
new = []
current = {c.expression: c for c in self._constants}
for expression in constants:
constant = current.get(expression, Constant(self, expression))
new.append(constant)
self._constants = new
# units
for c in self._constants:
if c.units is None:
c.convert(c.variables[0].units)
# finish
self.flush()
self._on_constants_updated()
def create_constant(self, expression, *, verbose=True):
"""Append a constant to the stored list.
Parameters
----------
expression : str
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
remove_constant
Remove an individual constant.
"""
if expression in self.constant_expressions:
wt_exceptions.ObjectExistsWarning.warn(expression)
return self.constants[self.constant_expressions.index(expression)]
constant = Constant(self, expression)
if constant.units is None:
constant.convert(constant.variables[0].units)
self._constants.append(constant)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' added".format(constant.expression))
return constant
def remove_constant(self, constant, *, verbose=True):
"""Remove a constant from the stored list.
Parameters
----------
constant : str or Constant or int
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
create_constant
Add an individual constant.
"""
if isinstance(constant, (str, int)):
constant_index = wt_kit.get_index(self.constant_expressions, constant)
elif isinstance(constant, Constant):
constant_index = wt_kit.get_index(self.constants, constant)
constant = self._constants[constant_index]
self._constants.pop(constant_index)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' removed".format(constant.expression))
def zoom(self, factor, order=1, verbose=True):
"""Zoom the data array using spline interpolation of the requested order.
The number of points along each axis is increased by factor.
See `scipy ndimage`__ for more info.
__ http://docs.scipy.org/doc/scipy/reference/
generated/scipy.ndimage.interpolation.zoom.html
Parameters
----------
factor : float
The number of points along each axis will increase by this factor.
order : int (optional)
The order of the spline used to interpolate onto new points.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
raise NotImplementedError
import scipy.ndimage
# axes
for axis in self._axes:
axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)
# channels
for channel in self.channels:
channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)
# return
if verbose:
print("data zoomed to new shape:", self.shape)
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import itertools
import weakref
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
ALLOWLIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
_EAGER_CONST_THRESHOLD = 128
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UnknownArgument instead of any unsupported types.
"""
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join(str(p) for p in path)
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, composite_tensor.CompositeTensor):
# TODO(b/133606651) Do we need to inject arg_name?
return arg._type_spec # pylint: disable=protected-access
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
name = "/".join(str(p) for p in path)
return resource_variable_ops.VariableSpec(arg.shape, arg.dtype, name)
if isinstance(arg, (
int,
float,
bool,
str,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
type_spec.TypeSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped)
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not allowlisted, and both
read and write to the outer graph's collections that are allowlisted.
The current allowlisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = set()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = object_identity.ObjectIdentityWeakSet()
self.is_control_flow_graph = False
outer_graph = ops.get_default_graph()
self._weak_outer_graph = weakref.ref(outer_graph)
while outer_graph.building_function:
outer_graph = outer_graph.outer_graph
# If self._weak_outer_graph is deleted, we revert to the outermost Graph
# active when the FuncGraph was traced. This will not be a FuncGraph.
self._fallback_outer_graph = outer_graph
self._captures = py_collections.OrderedDict()
# If not None, records the names of output args of this function. Used to
# preserve the output names in the signature of a serialized+deserialized
# function. Private at the moment mostly because it's often out of date.
self._output_names = None
# Maps arbitrary key -> (closure, nest of placeholders), where at function
# call time the value of closure() will be used to feed the nest of
# placeholders.
self._deferred_captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
# Keep track of whether this FuncGraph is exportable to SavedModel. Use
# `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any
# dependent functions as unsaveable.
self._saveable = True
self._saving_errors = set()
# Keep track of callbacks to run when this graph exits default scope
self._scope_exit_callbacks = None
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def capture_call_time_value(self, closure, spec, key=None):
"""Creates a placeholder which at call time has the value closure().
Useful, for example, to respect TensorFlow context managers, which are often
dynamically scoped.
Args:
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
key: optional. If not None, multiple calls to lazy_capture with the same
key in the same graph will return the same placeholder, and the
first closure will be used at function call time.
Returns:
Nest of placeholders which, at function call time, will be fed with the
result of calling closure().
Raises:
ValueError: at function call time, if the return value of closure() is
not compatible with `spec`.
"""
if key is None:
key = object()
if key not in self._deferred_captures:
def convert_to_placeholder(s):
if not isinstance(s, tensor_spec.DenseSpec):
raise TypeError(
"Expected a nest of `TypeSpec` objects, found %s of type %s." %
(s, type(s)))
return array_ops.placeholder(dtype=s.dtype, shape=s.shape)
placeholder = nest.map_structure(
convert_to_placeholder, spec, expand_composites=True)
def wrapped_closure():
ret_nest = closure()
nest.assert_same_structure(spec, ret_nest, expand_composites=True)
# This uses the tensor dtype defined in `spec` when converting values
# in `ret_nest` to tensors.
# pylint: disable=protected-access
y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,
expand_composites=False)
# pylint: enable=protected-access
return nest.flatten(y, expand_composites=True)
self._deferred_captures[key] = (wrapped_closure, placeholder)
return self._deferred_captures[key][1]
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, ops.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
graph = ops.get_default_graph()
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
# However, we need to preserve the outer device stack in the following
# cases in non eager context:
# 1. device stack is callable
# 2. When using distribution strategy with legacy graph mode.
old_device_stack = self._device_function_stack
if (not context.executing_eagerly() and
(device_stack_has_callable(graph._device_function_stack) or
(self._distribution_strategy_stack and
not ops.executing_eagerly_outside_functions()))):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
old_scope_exit_callbacks = self._scope_exit_callbacks
self._scope_exit_callbacks = []
with outer_cm as g:
try:
yield g
finally:
try:
for fn in self._scope_exit_callbacks:
fn()
finally:
self._scope_exit_callbacks = old_scope_exit_callbacks
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def outer_graph(self):
"""The Graph this FuncGraph is nested in.
Functions may capture Tensors from graphs they are nested in (transitive).
Returns:
A Graph object. Initially set to the current default graph when the
FuncGraph was created. If the previous `outer_graph` was deleted because
the function that owns it was deleted, `outer_graph` is reset to the
outermost default graph active when the FuncGraph was created. This
FuncGraph won't have captured anything from the new `outer_graph` (and
likely not from the previous setting, since that would have created a
strong reference), but it is returned so that FuncGraphs always have a
parent.
"""
current = self._weak_outer_graph()
if current is None:
return self._fallback_outer_graph
return current
@outer_graph.setter
def outer_graph(self, new_outer_graph):
"""Sets `outer_graph` to `new_outer_graph`."""
self._weak_outer_graph = weakref.ref(new_outer_graph)
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def trainable_variables(self):
"""A sequence of trainable variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of trainable variables for this func graph.
"""
return tuple(v for v in self.variables if v.trainable)
@property
def variables(self):
"""A sequence of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of variables for this func graph.
"""
def deref(weak_v):
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
return v
return tuple(deref(v) for v in self._weak_variables)
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((id(v), k) for k, v in self.captures)
uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access
op_type,
uncaptured_inputs,
dtypes,
input_types,
name,
attrs,
op_def,
compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
# Use a different list to avoid modifying the original inputs list.
captured_inputs = []
for inp in inputs:
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
captured_inputs.append(inp)
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,
compute_device)
def capture(self, tensor, name=None, shape=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
shape: Optional shape if a placeholder is created.
Returns:
Tensor from this FuncGraph.
Raises:
InaccessibleTensorError: if any tensors are accessed in a manner that
bypasses the mechanisms required for the data dependencies to be correctly
wired.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
# Small EagerTensors are captured with Const ops
if (tensor.dtype in dtypes.TF_VALUE_DTYPES and
np.prod(tensor.shape) <= _EAGER_CONST_THRESHOLD):
return self.capture_eager_tensor(tensor, name)
# Large EagerTensors and resources are captured with Placeholder ops
return self._capture_helper(tensor, name, shape)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
raise errors.InaccessibleTensorError(
"The tensor '%s' cannot be accessed here: it is defined"
" in another function or code block. Use return values,"
" explicit Python locals or TensorFlow collections to access"
" it. Defined in: %s; accessed from: %s.\n"
% (tensor, tensor.graph, self))
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name, shape=None):
capture = self._captures.get(id(tensor))
if capture is None:
placeholder = _create_substitute_placeholder(
tensor, name=name, dtype=tensor.dtype, shape=shape)
# Record the composite device as an attribute to the placeholder.
# This attribute would be propogated into the arg_attr of the FunctionDef.
# Currently, a packed eager tensor is always placed on a CompositeDevice.
if isinstance(tensor, ops.EagerTensor) and tensor.is_packed:
placeholder.op._set_attr( # pylint: disable=protected-access
"_composite_device",
attr_value_pb2.AttrValue(s=compat.as_bytes(tensor.device)))
self.add_capture(tensor, placeholder)
else:
placeholder = capture[1]
tape.record_operation("captured_value", [placeholder], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return placeholder
@property
def captures(self):
"""Order list of tuples containing external and internal captures."""
return self._captures.values()
def add_capture(self, tensor, placeholder):
"""Capture a specific tensor and utilize the provided placeholder.
Args:
tensor: Tensor to captures.
placeholder: Provided placeholder for the tensor.
"""
self._captures[id(tensor)] = (tensor, placeholder)
self.inputs.append(placeholder)
def replace_capture(self, tensor, placeholder):
"""Replace already existing capture."""
self._captures[id(tensor)] = (tensor, placeholder)
def reset_captures(self, capture_list):
"""Set the captures with the provided list of captures & placeholder."""
self._captures = py_collections.OrderedDict()
for tensor, placeholder in capture_list:
self._captures[id(tensor)] = (tensor, placeholder)
def pop_capture(self, tensor):
"""Remove the capture and return the generated placeholder."""
capture = self._captures.pop(id(tensor), None)
if capture is None:
return None
return capture[1]
def clear_captures(self):
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while self._captures:
self._captures.popitem()
memory.dismantle_ordered_dict(self._captures)
while self._deferred_captures:
self._deferred_captures.popitem()
memory.dismantle_ordered_dict(self._deferred_captures)
def capture_distributed_variable(self, variable, placeholder):
"""Add given distributed variable to captures with given placeholder."""
self._captures[id(variable)] = (variable, placeholder)
tape.record_operation("captured_value", [placeholder], [variable],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
def capture_eager_tensor(self, tensor, name):
capture = self._captures.get(id(tensor))
if capture is None:
# We clear all control dependencies and place the Const op on the same
# device as the source tensor. The device placement may be relaxed at
# a later date.
with ops.control_dependencies(None), self.device(tensor.device):
constant_value = tensor_util.constant_value(tensor)
if constant_value is None:
# Some eager tensors, e.g. parallel tensors, are not convertible to a
# single constant. We'll use a placeholder for this case.
return self._capture_helper(tensor, name)
graph_const = constant_op.constant(constant_value, dtype=tensor.dtype,
shape=tensor.shape, name=name)
self.add_capture(tensor, graph_const)
else:
graph_const = capture[1]
tape.record_operation("captured_value", [graph_const], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return graph_const
def captured(self, tensor):
"""Check if the specified tensor has been captured."""
return id(tensor) in self._captures
@property
def external_captures(self):
"""External tensors captured by this function."""
return [c[0] for c in self._captures.values()]
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return [c[1] for c in self._captures.values()]
@property
def deferred_external_captures(self):
"""Ordered nest of tensors whose placeholders will be fed at call time."""
return [c[0] for c in self._deferred_captures.values()]
@property
def deferred_internal_captures(self):
"""List of nest of placeholders which at call time will be fed."""
return [c[1] for c in self._deferred_captures.values()]
@property
def variable_captures(self):
"""Map of python object ids of variables to variables which are captured."""
return {
id(self._captures[id(v)][1]): v
for v in self.variables
if id(v) in self._captures
}
def mark_as_unsaveable(self, error_message):
"""Marks this FuncGraph as unsaveable.
Any attempts to export this FuncGraph will raise an error with the specified
message.
Args:
error_message: List or string containing the error message to be raised
when saving this FuncGraph to SavedModel.
"""
self._saveable = False
if isinstance(error_message, str):
error_message = [error_message]
self._saving_errors.update(error_message)
@property
def saveable(self):
"""Returns whether this FuncGraph is saveable."""
return self._saveable
@property
def saving_errors(self):
"""Returns set of errors preventing this FuncGraph from being saved."""
return self._saving_errors
def _add_scope_exit_callback(self, fn):
"""Add a function to call when this graph exits the default scope."""
if not callable(fn):
raise TypeError("fn is not callable: {}".format(fn))
if self._scope_exit_callbacks is None:
raise RuntimeError(
"Attempting to add a scope exit callback, but the default graph is "
"not the context scope graph. Did you forget to call "
"'with graph.as_default(): ...'?")
self._scope_exit_callbacks.append(fn)
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not allowlisted, and both
read and write to the outer graph's collections that are allowlisted.
The current allowlisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
deps_control_manager = auto_control_deps.AutomaticControlDependencies()
else:
deps_control_manager = ops.NullContextmanager()
with func_graph.as_default(), deps_control_manager as deps_ctx:
current_scope = variable_scope.get_variable_scope()
default_use_recource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
if add_control_dependencies:
x = deps_ctx.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def wrapper(*args, **kwargs):
"""Calls a converted version of original_func."""
# TODO(mdan): Push this block higher in tf.function's call stack.
try:
return autograph.converted_call(
original_func,
args,
kwargs,
options=autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
user_requested=True,
))
except Exception as e: # pylint:disable=broad-except
if hasattr(e, "ag_error_metadata"):
raise e.ag_error_metadata.to_exception(e)
else:
raise
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(original_func, wrapper)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
else:
_, original_func = tf_decorator.unwrap(python_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args, original_func)
check_mutation(func_kwargs_before, func_kwargs, original_func)
finally:
current_scope.set_use_resource(default_use_recource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = object_identity.ObjectIdentitySet()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.pop_capture(arg.handle)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = (
inputs + func_graph.internal_captures + nest.flatten(
func_graph.deferred_internal_captures, expand_composites=True))
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)
func_graph.collective_manager_ids_used = (
deps_control_manager.collective_manager_ids_used)
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures:
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2, func):
"""Check if two list of arguments are exactly the same."""
func_name = getattr(func, "__name__", func)
errmsg = ("{}() should not modify its Python input arguments."
" Check if it modifies any lists or dicts passed as"
" arguments. Modifying a copy is allowed.".format(func_name))
try:
# TODO(mdan): Compare more robustly so that argument names can be reported.
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None, shape=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
if shape is None:
shape = value.shape
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=shape, name=name)
custom_gradient.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_composite_tensor_spec(x):
"""Returns the TypeSpec for x if it's a composite tensor, or x otherwise."""
return (x._type_spec # pylint: disable=protected-access
if isinstance(x, composite_tensor.CompositeTensor) else x)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
# Replace any composite tensors with their TypeSpecs. This is important
# for ensuring that shape information that's not preserved by the TypeSpec
# (such as the number of values in a SparseTensor) gets properly masked.
arg_value = nest.map_structure(_get_composite_tensor_spec, arg_value)
flattened = nest.flatten(arg_value, expand_composites=True)
for arg in flattened:
# We have a shape entry for each arg, regardless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
arg_is_spec = isinstance(arg, tensor_spec.TensorSpec)
if arg_is_spec and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if not arg_is_spec:
custom_gradient.copy_handle_data(arg, placeholder)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, (resource_variable_ops.BaseResourceVariable,
resource_variable_ops.VariableSpec)):
if isinstance(arg, resource_variable_ops.VariableSpec):
name = arg.name or name
with func_graph.outer_graph.as_default():
placeholder = graph_placeholder(dtypes.resource, arg.shape,
name=name)
arg = resource_variable_ops.BaseResourceVariable(
name=name,
shape=arg.shape,
dtype=arg.dtype,
handle=placeholder,
handle_name=name)
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
func_graph.clear_captures()
ops.dismantle_graph(func_graph)
def override_func_graph_name_scope(func_graph, name_scope):
func_graph._name_stack = name_scope # pylint: disable=protected-access
|
#!/usr/bin/env python
# encoding: utf-8
from distutils.core import setup
setup(name='project_package_name',
version='0.1',
description = 'project description',
author = '...',
packages = ['project_package_name'],
)
|
STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = '+'
PONOVLJENA_CRKA = 'o'
NAPACNA_CRKA = '-'
ZMAGA = 'W'
PORAZ = 'X'
class Igra:
def __init__(self, geslo, crke):
self.geslo = geslo
self.crke = crke[:]
def napacne_crke(self):
return [crka for crka in self.crke if crka not in self.geslo]
def pravilne_crke(self):
return [crka for crka in self.crke if crka in self.geslo]
def stevilo_napak(self):
return len(self.napacne_crke())
def zmaga(self):
vse_crke = True
for crka in self.geslo:
if crka in self.pravilne_crke():
pass
else:
vse_crke = False
break
# vse_crke1 all(crka in self.crke for crka in self.geslo)
return vse_crke and STEVILO_DOVOLJENIH_NAPAK >= self.stevilo_napak()
def poraz(self):
return STEVILO_DOVOLJENIH_NAPAK < self.stevilo_napak()
def pravilni_del_gesla(self):
delni = ''
ugibanje = [crka.upper() for crka in self.crke]
for crka in self.geslo:
if crka.upper() in ugibanje:
delni += crka
else:
delni += '_ '
return delni.strip()
def nepravili_ugibi(self):
return ' '.join(self.napacne_crke())
def ugibaj(self, crka):
crka = crka.upper()
if crka in self.crke:
return PONOVLJENA_CRKA
elif crka in self.geslo:
self.crke.append(crka)
if self.zmaga():
return ZMAGA
else:
return PRAVILNA_CRKA
else:
self.crke.append(crka)
if self.poraz():
return PORAZ
else:
return NAPACNA_CRKA
with open('Vislice/besede.txt', 'r') as f:
bazen_besed = [beseda.strip().upper() for beseda in f.readlines()]
import random
def nova_igra():
geslo = random.choice(bazen_besed)
return Igra(geslo, [])
# testno_geslo = 'DEŽUJE'
# testne_crke = ['A', 'E', 'I', 'O', 'U', 'D', 'J', 'K', 'Ž']
# igra = Igra(testno_geslo, testne_crke)
# print(testno_geslo)
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import collections
import datetime
import os
import re
import shutil
import six
import sys
import traceback
import yaml
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Convert an old style NIC config file into the new format '
'using run-os-net-config.sh')
parser.add_argument('--script-dir', metavar='<script directory>',
help="Relative path to run-os-net-config.sh",
default="network/scripts/run-os-net-config.sh")
parser.add_argument('files', nargs="+", metavar='<file>',
help='List of one or more NIC config files to convert')
parser.add_argument('--yes',
action='store_true',
help=("Use --yes to skip the confirmation "
"to overwrite the original config file "),
)
opts = parser.parse_args(argv[1:])
return opts
def to_commented_yaml(filename):
"""Convert comments into 'comments<num>: ...' YAML"""
out_str = ''
last_non_comment_spaces = ''
with open(filename, 'r') as f:
comment_count = 0
for line in f:
# skip blank line
if line.isspace():
continue
char_count = 0
spaces = ''
for char in line:
char_count += 1
if char == ' ':
spaces += ' '
next
elif char == '#':
last_non_comment_spaces = spaces
comment_count += 1
comment = line[char_count:-1]
out_str += "%scomment%i_%i: '%s'\n" % \
(last_non_comment_spaces, comment_count, len(spaces),
comment)
break
else:
last_non_comment_spaces = spaces
out_str += line
# inline comments check
m = re.match(".*:.*#(.*)", line)
if m:
comment_count += 1
out_str += "%s inline_comment%i: '%s'\n" % \
(last_non_comment_spaces, comment_count,
m.group(1))
break
with open(filename, 'w') as f:
f.write(out_str)
return out_str
def to_normal_yaml(filename):
"""Convert back to normal #commented YAML"""
with open(filename, 'r') as f:
data = f.read()
out_str = ''
next_line_break = False
for line in data.split('\n'):
# get_input not supported by run-os-net-config.sh script
line = line.replace('get_input: ', '')
# normal comments
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line)
# inline comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line)
if m:
if next_line_break:
out_str += '\n'
next_line_break = False
for x in range(0, int(m.group(1))):
out_str += " "
out_str += "#%s\n" % m.group(2)
elif i:
out_str += " #%s\n" % i.group(1)
next_line_break = False
else:
if next_line_break:
out_str += '\n'
out_str += line
next_line_break = True
if next_line_break:
out_str += '\n'
with open(filename, 'w') as f:
f.write(out_str)
return out_str
class description(six.text_type):
pass
# FIXME: Some of this duplicates code from build_endpoint_map.py, we should
# refactor to share the common code
class TemplateDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
return self.represent_dict(data.items())
def description_presenter(self, data):
if '\n' in data:
style = '>'
else:
style = ''
return self.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG, data, style=style)
# We load mappings into OrderedDict to preserve their order
class TemplateLoader(yaml.SafeLoader):
def construct_mapping(self, node):
self.flatten_mapping(node)
return collections.OrderedDict(self.construct_pairs(node))
TemplateDumper.add_representer(description,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(collections.OrderedDict,
TemplateDumper.represent_ordered_dict)
TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
TemplateLoader.construct_mapping)
def write_template(template, filename=None):
with open(filename, 'w') as f:
yaml.dump(template, f, TemplateDumper, width=120,
default_flow_style=False)
def convert(filename, script_path):
print('Converting %s' % filename)
try:
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=TemplateLoader)
except Exception:
print(traceback.format_exc())
return 0
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
new_r = collections.OrderedDict()
new_r['type'] = 'OS::Heat::SoftwareConfig'
new_r['properties'] = collections.OrderedDict()
new_r['properties']['group'] = 'script'
old_net_config = r[1].get(
'properties', {}).get('config', {}).get('os_net_config')
new_config = {'str_replace': collections.OrderedDict()}
new_config['str_replace']['template'] = {'get_file': script_path}
new_config['str_replace']['params'] = \
{'$network_config': old_net_config}
new_r['properties']['config'] = new_config
tpl['resources'][r[0]] = new_r
else:
print("No match %s" % r[0])
return 0
# Preserve typical HOT template key ordering
od_result = collections.OrderedDict()
# Need to bump the HOT version so str_replace supports serializing to json
od_result['heat_template_version'] = "rocky"
if tpl.get('description'):
od_result['description'] = description(tpl['description'])
od_result['parameters'] = tpl['parameters']
od_result['resources'] = tpl['resources']
od_result['outputs'] = tpl['outputs']
write_template(od_result, filename)
return 1
def check_old_style(filename):
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
if isinstance(tpl.get('resources', {}), dict):
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
return True
return False
opts = parse_opts(sys.argv)
exit_val = 0
num_converted = 0
for base_path in opts.files:
if os.path.isfile(base_path) and base_path.endswith('.yaml'):
if check_old_style(base_path):
# Check for script in the user entered (or default) location or in
# path relative to NIC config files
script_paths = [opts.script_dir]
script_paths.append('../../scripts/run-os-net-config.sh')
script_paths.append('../network/scripts/run-os-net-config.sh')
script_paths.append('/usr/share/openstack-tripleo-heat-templates/'
'network/scripts/run-os-net-config.sh')
script_path = None
for p in script_paths:
if os.path.isfile(os.path.join(os.path.dirname(base_path), p)):
script_path = p
break
if script_path is None:
print("Error couldn't find run-os-net-config.sh relative "
"to filename")
sys.exit(1)
print("Using script at %s" % script_path)
extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_filename = os.path.realpath(base_path) + '.' + extension
print('The yaml file will be overwritten and the original saved '
'as %s' % backup_filename)
if not (opts.yes or
input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print("Skipping file %s" % base_path)
continue
if os.path.exists(backup_filename):
print("Backup file already exists, skipping file %s" %
base_path)
continue
shutil.copyfile(base_path, backup_filename)
to_commented_yaml(base_path)
num_converted += convert(base_path, script_path)
to_normal_yaml(base_path)
else:
print('File %s is not using old style NIC configuration' %
base_path)
else:
print('Unexpected argument %s' % base_path)
if num_converted == 0:
exit_val = 1
sys.exit(exit_val)
|
from django.urls import path
from . import views
app_name = 'basketapp'
urlpatterns = [
path('', views.view, name='view'),
path('add/<int:product_id>/', views.add, name='add'),
path('remove/<int:basket_item_id>)/', views.remove, name='remove'),
path('edit/<int:basket_item_id>/<int:quantity>/', views.edit, name='edit'),
]
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import warnings
from google.cloud.monitoring_dashboard.v1 import types
from google.cloud.monitoring_dashboard.v1.gapic import dashboards_service_client
from google.cloud.monitoring_dashboard.v1.gapic import enums
if sys.version_info[:2] == (2, 7):
message = (
"A future version of this library will drop support for Python 2.7."
"More details about Python 2 support for Google Cloud Client Libraries"
"can be found at https://cloud.google.com/python/docs/python2-sunset/"
)
warnings.warn(message, DeprecationWarning)
class DashboardsServiceClient(dashboards_service_client.DashboardsServiceClient):
__doc__ = dashboards_service_client.DashboardsServiceClient.__doc__
enums = enums
__all__ = ("enums", "types", "DashboardsServiceClient")
|
for (x_t,y_t) in data_set:
loss_fn = f(w, x_t, y_t)
# compute gradient
d_loss_fn_wrt_w = ...
w -= gamma * d_loss_fn_wrt_w
if <stopping condition is met>:
return w
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import types
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtWidgets
class _EditorMetaclass( Gaffer.Trackable.__class__ ) :
def __call__( cls, *args, **kw ) :
instance = type.__call__( cls, *args, **kw )
while hasattr( cls, "instanceCreatedSignal" ) :
cls.instanceCreatedSignal()( instance )
cls = cls.__bases__[0]
return instance
## Base class for UI components which display or manipulate a ScriptNode
# or its children. These make up the tabs in the UI layout.
class Editor( GafferUI.Widget ) :
__metaclass__ = _EditorMetaclass
def __init__( self, topLevelWidget, scriptNode, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self._qtWidget().setFocusPolicy( QtCore.Qt.ClickFocus )
assert( isinstance( scriptNode, Gaffer.ScriptNode ) )
self.__scriptNode = scriptNode
self.__context = None
self.__title = ""
self.__titleChangedSignal = GafferUI.WidgetSignal()
self.enterSignal().connect( Gaffer.WeakMethod( self.__enter ), scoped = False )
self.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ), scoped = False )
self.__setContextInternal( scriptNode.context(), callUpdate=False )
def scriptNode( self ) :
return self.__scriptNode
## May be called to explicitly set the title for this editor. The
# editor itself is not responsible for displaying the title - this
# is left to the enclosing ui.
def setTitle( self, title ) :
if title == self.__title :
return
self.__title = title
self.titleChangedSignal()( self )
## May be overridden to provide sensible default behaviour for
# the title, but must return BaseClass.getTitle() if it is non-empty.
def getTitle( self ) :
if self.__title :
return self.__title
# if there's no explicit title and a derived class
# has overridden getTitle() then we return the empty
# string to signify that the derived class is free
# to return what it wants
c = self.__class__
while c is not Editor :
if "getTitle" in c.__dict__ :
return ""
c = c.__bases__[0]
# otherwise we default to using the classname
return IECore.CamelCase.toSpaced( self.__class__.__name__ )
## A signal emitted whenever the title changes.
def titleChangedSignal( self ) :
return self.__titleChangedSignal
## By default Editors operate in the main context held by the script node. This function
# allows an alternative context to be provided, making it possible for an editor to
# display itself at a custom frame (or with any other context modification).
def setContext( self, context ) :
self.__setContextInternal( context, callUpdate=True )
def getContext( self ) :
return self.__context
def __setContextInternal( self, context, callUpdate ) :
assert( isinstance( context, ( Gaffer.Context, types.NoneType ) ) )
previousContext = self.__context
self.__context = context
if self.__context is not None :
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
else :
## \todo I'm not sure why this code allows a None context - surely we
# should always have a valid one?
self.__contextChangedConnection = None
if callUpdate :
modifiedItems = set()
if previousContext is not None :
modifiedItems |= set( previousContext.names() )
if self.__context is not None :
modifiedItems |= set( self.__context.names() )
self._updateFromContext( modifiedItems )
## May be implemented by derived classes to update state based on a change of context.
# To temporarily suspend calls to this function, use Gaffer.BlockedConnection( self._contextChangedConnection() ).
def _updateFromContext( self, modifiedItems ) :
pass
def _contextChangedConnection( self ) :
return self.__contextChangedConnection
## This must be implemented by all derived classes as it is used for serialisation of layouts.
# It is not expected that the script being edited is also serialised as part of this operation -
# instead the new script will be provided later as a variable named scriptNode. So a suitable
# serialisation will look like "GafferUI.Editor( scriptNode )".
def __repr__( self ) :
raise NotImplementedError
def __contextChanged( self, context, key ) :
assert( context.isSame( self.getContext() ) )
self._updateFromContext( set( [ key ] ) )
@classmethod
def types( cls ) :
return cls.__namesToCreators.keys()
@classmethod
def create( cls, name, scriptNode ) :
return cls.__namesToCreators[name]( scriptNode = scriptNode )
@classmethod
def registerType( cls, name, creator ) :
cls.__namesToCreators[name] = creator
__namesToCreators = {}
@classmethod
def instanceCreatedSignal( cls ) :
s = cls.__dict__.get( "__instanceCreatedSignal", None )
if s is not None :
return s
s = Gaffer.Signal1()
setattr( cls, "__instanceCreatedSignal", s )
return s
def __enter( self, widget ) :
if not isinstance( QtWidgets.QApplication.focusWidget(), ( QtWidgets.QLineEdit, QtWidgets.QPlainTextEdit ) ) :
self._qtWidget().setFocus()
def __leave( self, widget ) :
self._qtWidget().clearFocus()
|
import pytest
import brownie
from brownie import Contract, ZERO_ADDRESS
# gusd
gusd_token_address = "0xD2967f45c4f384DEEa880F807Be904762a3DeA07"
gusd_gauge_addresses = "0xC5cfaDA84E902aD92DD40194f0883ad49639b023"
# susd
susd_token_address = '0xC25a3A3b969415c80451098fa907EC722572917F'
susd_gauge_address = '0xA90996896660DEcC6E997655E065b23788857849'
@pytest.fixture(scope="module")
def swap_address(pool_data):
return pool_data['swap_address']
@pytest.fixture(scope="module")
def token_address(pool_data):
return pool_data['lp_token_address']
@pytest.fixture(scope="module")
def gauge_address(pool_data):
return pool_data['gauge_addresses'][0]
@pytest.fixture(scope="module")
def deposit_address(pool_data):
return pool_data['zap_address'] if 'zap_address' in pool_data else pool_data['swap_address']
@pytest.fixture(scope="module")
def other_token_address(pool_data):
return gusd_token_address if gusd_token_address != pool_data["lp_token_address"] else susd_token_address
@pytest.fixture(scope="module")
def other_gauge_address(pool_data):
return gusd_gauge_addresses if gusd_gauge_addresses != pool_data["gauge_addresses"][0] else susd_gauge_address
@pytest.fixture(scope="module")
def gauge(gauge_address):
return Contract(gauge_address)
@pytest.fixture(scope="module")
def underlying_decimals(pool_data, base_pool_data):
# number of decimal places for each underlying coin in the active pool
decimals = [i.get("decimals", i.get("wrapped_decimals")) for i in pool_data["coins"]]
if base_pool_data is None:
return decimals
base_decimals = [i.get("decimals", i.get("wrapped_decimals")) for i in base_pool_data["coins"]]
return decimals[:-1] + base_decimals
@pytest.fixture(scope="module")
def wrapped_decimals(pool_data):
# number of decimal places for each wrapped coin in the active pool
yield [i.get("wrapped_decimals", i.get("decimals")) for i in pool_data["coins"]]
@pytest.fixture(scope="module")
def wrapped_amounts_to_mint(wrapped_decimals):
return [100 * 10 ** i for i in wrapped_decimals]
@pytest.fixture(scope="module")
def underlying_amounts_to_mint(underlying_decimals):
return [100 * 10 ** i for i in underlying_decimals]
@pytest.fixture(scope="module")
def wrong_amounts_to_mint():
return [100 * 10 ** 18] * 5
# Different amounts are needed to always pass test_wrong_order_of_coins
@pytest.fixture(scope="module")
def wrapped_amounts(wrapped_decimals, n_coins_wrapped):
return [(10 + i) * 10 ** wrapped_decimals[i] for i in range(n_coins_wrapped)] + [0] * (5 - n_coins_wrapped)
# Different amounts are needed to always pass test_wrong_order_of_coins
@pytest.fixture(scope="module")
def underlying_amounts(underlying_decimals, n_coins_underlying):
return [(10 + i) * 10 ** underlying_decimals[i] for i in range(n_coins_underlying)] + [0] * (5 - n_coins_underlying)
@pytest.fixture(scope="module")
def n_coins_wrapped(wrapped_decimals):
return len(wrapped_decimals)
@pytest.fixture(scope="module")
def n_coins_underlying(underlying_decimals):
yield len(underlying_decimals)
@pytest.fixture(scope="module")
def value_wrapped(wrapped_amounts, wrapped_coins):
return wrapped_amounts[wrapped_coins.index(brownie.ETH_ADDRESS)] if brownie.ETH_ADDRESS in wrapped_coins else 0
@pytest.fixture(scope="module")
def value_underlying(underlying_amounts, underlying_coins):
return underlying_amounts[underlying_coins.index(brownie.ETH_ADDRESS)] if brownie.ETH_ADDRESS in underlying_coins else 0
@pytest.fixture(scope="module")
def use_underlying(pool_data):
if pool_data['swap_address'] in [
"0xDeBF20617708857ebe4F679508E7b7863a8A8EeE", # aave
"0xeb16ae0052ed37f479f7fe63849198df1765a733", # saave
"0x2dded6Da1BF5DBdF597C45fcFaa3194e53EcfeAF", # ib
"0x8301AE4fc9c624d1D396cbDAa1ed877821D7C511", # crveth (use_eth)
"0xB576491F1E6e5E62f1d8F26062Ee822B40B0E0d4", # cvxeth (use_eth)
]:
return True
return False
@pytest.fixture(scope="module")
def is_meta(pool_data):
return "meta" in pool_data.get("pool_types", [])
@pytest.fixture(scope="module")
def factory_pool_address(pool_data):
return pool_data["swap_address"] if "factory" in pool_data.get("pool_types", []) else ZERO_ADDRESS
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-6-8
@author: Chine
'''
import time
import json
import urllib
import re
from datetime import datetime, timedelta
from threading import Lock
from cola.core.parsers import Parser
from cola.core.utils import urldecode, beautiful_soup
from cola.core.errors import DependencyNotInstalledError, FetchBannedError
from cola.core.logs import get_logger
from login import WeiboLoginFailure
from bundle import WeiboUserBundle
from storage import DoesNotExist, Q, WeiboUser, Friend,\
MicroBlog, Geo, UserInfo, WorkInfo, EduInfo,\
Comment, Forward, Like, ValidationError
from conf import fetch_forward, fetch_comment, fetch_like,fetch_n_comments
try:
from dateutil.parser import parse
except ImportError:
raise DependencyNotInstalledError('python-dateutil')
TIMEOUT = 30.0
class WeiboParser(Parser):
def __init__(self, opener=None, url=None, bundle=None, **kwargs):
super(WeiboParser, self).__init__(opener=opener, url=url, **kwargs)
self.bundle = bundle
self.uid = bundle.label
self.opener.set_default_timeout(TIMEOUT)
if not hasattr(self, 'logger') or self.logger is None:
self.logger = get_logger(name='weibo_parser')
def _check_url(self, dest_url, src_url):
return dest_url.split('?')[0] == src_url.split('?')[0]
def check(self, url, br):
dest_url = br.geturl()
if not self._check_url(dest_url, url):
if dest_url.startswith('http://weibo.com/login.php'):
raise WeiboLoginFailure('Weibo not login or login expired')
if dest_url.startswith('http://weibo.com/sorry?usernotexists'):
self.bundle.exists = False
return False
return True
def get_weibo_user(self):
if self.bundle.weibo_user is not None:
return self.bundle.weibo_user
try:
self.bundle.weibo_user = getattr(WeiboUser, 'objects').get(uid=self.uid)
except DoesNotExist:
self.bundle.weibo_user = WeiboUser(uid=self.uid)
self.bundle.weibo_user.save()
return self.bundle.weibo_user
class MicroBlogParser(WeiboParser):
def parse(self, url=None):
if self.bundle.exists is False:
return
url = url or self.url
params = urldecode(url)
try:
br = self.opener.browse_open(url)
except Exception as e:
print(e)
print('休息10分钟!')
time.sleep(60*10)# self.logger.debug('load %s finish' % url)
if not self.check(url, br):
return
weibo_user = self.get_weibo_user()
params['_t'] = 0
params['__rnd'] = str(int(time.time() * 1000))
page = int(params.get('page', 1))
pre_page = int(params.get('pre_page', 0))
count = 15
if 'pagebar' not in params:
params['pagebar'] = '0'
pre_page += 1
elif params['pagebar'] == '0':
params['pagebar'] = '1'
elif params['pagebar'] == '1':
del params['pagebar']
pre_page = page
page += 1
count = 50
params['count'] = count
params['page'] = page
params['pre_page'] = pre_page
try:
data = json.loads(br.response().read())['data']
except Exception as e:
print(e)
print('休息10分钟!')
time.sleep(60 * 10) # self.logger.debug('load %s finish' % url)
soup = beautiful_soup(data)
finished = False
divs = soup.find_all('div', attrs={'class': 'WB_feed_type'}, mid=True)
max_id = None
for div in divs:
mid = div['mid']
if len(mid) == 0:
continue
max_id = mid
if 'end_id' not in params:
params['end_id'] = mid
if mid in weibo_user.newest_mids:
finished = True
break
if len(self.bundle.newest_mids) < 3:
self.bundle.newest_mids.append(mid)
try:
mblog = getattr(MicroBlog, 'objects').get(Q(mid=mid)&Q(uid=self.uid))
continue #认为已经爬过了
except DoesNotExist:
mblog = MicroBlog(mid=mid, uid=self.uid)
content_div = div.find('div', attrs={
'class': 'WB_text',
'node-type': 'feed_list_content'
})
for img in content_div.find_all("img", attrs={'type': 'face'}):
img.replace_with(img['title']);
mblog.content = content_div.text
#print(u'微博内容:'+mblog.content)
is_forward = div.get('isforward') == '1'
if is_forward:
mblog.omid = div['omid']
name_a = div.find('a', attrs={
'class': 'WB_name',
'node-type': 'feed_list_originNick'
})
text_a = div.find('div', attrs={
'class': 'WB_text',
'node-type': 'feed_list_reason'
})
if name_a is not None and text_a is not None:
mblog.forward = '%s: %s' % (
name_a.text,
text_a.text
)
mblog.created = parse(div.select('a.S_link2.WB_time')[0]['title'])
if self.bundle.last_update is None or mblog.created > self.bundle.last_update:
self.bundle.last_update = mblog.created
if weibo_user.last_update is not None and \
mblog.created <= weibo_user.last_update:
finished = True
break
func_div = div.find_all('div', 'WB_func')[-1]
action_type_re = lambda t: re.compile("^(feed_list|fl)_%s$" % t)
likes = func_div.find('a', attrs={'action-type': action_type_re("like")}).text
likes = likes.strip('(').strip(')')
likes = 0 if len(likes) == 0 else int(likes)
mblog.n_likes = likes
forwards = func_div.find('a', attrs={'action-type': action_type_re("forward")}).text
if '(' not in forwards:
mblog.n_forwards = 0
else:
mblog.n_forwards = int(forwards.strip().split('(', 1)[1].strip(')'))
comments = func_div.find('a', attrs={'action-type': action_type_re('comment')}).text
if '(' not in comments:
mblog.n_comments = 0
else:
mblog.n_comments = int(comments.strip().split('(', 1)[1].strip(')'))
# fetch geo info
map_info = div.find("div", attrs={'class': 'map_data'})
if map_info is not None:
geo = Geo()
geo.location = map_info.text.split('-')[0].strip()
geo_info = urldecode("?"+map_info.find('a')['action-data'])['geo']
geo.longtitude, geo.latitude = tuple([float(itm) for itm in geo_info.split(',', 1)])
mblog.geo = geo
# fetch forwards and comments
if fetch_forward or fetch_comment or fetch_like:
query = {'id': mid, '_t': 0, '__rnd': int(time.time()*1000)}
query_str = urllib.urlencode(query)
if fetch_forward and mblog.n_forwards > 0:
forward_url = 'http://weibo.com/aj/mblog/info/big?%s' % query_str
yield forward_url
if fetch_comment and mblog.n_comments >fetch_n_comments :#只抓取评论数多于规定条数的微博
comment_url = 'http://weibo.com/aj/comment/big?%s' % query_str
yield comment_url
if fetch_like and mblog.n_likes > 0:
query = {'mid': mid, '_t': 0, '__rnd': int(time.time()*1000)}
query_str = urllib.urlencode(query)
like_url = 'http://weibo.com/aj/like/big?%s' % query_str
yield like_url
mblog.save()
if 'pagebar' in params:
params['max_id'] = max_id
else:
del params['max_id']
# self.logger.debug('parse %s finish' % url)
# counter add one for the processed weibo list url
self.counter.inc('processed_weibo_list_page', 1)
# if not has next page
if len(divs) == 0 or finished:
weibo_user = self.get_weibo_user()
for mid in self.bundle.newest_mids:
if mid not in weibo_user.newest_mids:
weibo_user.newest_mids.append(mid)
while len(weibo_user.newest_mids) > 3:
weibo_user.newest_mids.pop()
weibo_user.last_update = self.bundle.last_update
weibo_user.save()
return
yield '%s?%s'%(url.split('?')[0], urllib.urlencode(params))
class ForwardCommentLikeParser(WeiboParser):
strptime_lock = Lock()
def _strptime(self, string, format_):
self.strptime_lock.acquire()
try:
return datetime.strptime(string, format_)
finally:
self.strptime_lock.release()
def parse_datetime(self, dt_str):
dt = None
if u'秒' in dt_str:
sec = int(dt_str.split(u'秒', 1)[0].strip())
dt = datetime.now() - timedelta(seconds=sec)
elif u'分钟' in dt_str:
sec = int(dt_str.split(u'分钟', 1)[0].strip()) * 60
dt = datetime.now() - timedelta(seconds=sec)
elif u'今天' in dt_str:
dt_str = dt_str.replace(u'今天', datetime.now().strftime('%Y-%m-%d'))
dt = self._strptime(dt_str, '%Y-%m-%d %H:%M')
elif u'月' in dt_str and u'日' in dt_str:
this_year = datetime.now().year
date_str = '%s %s' % (this_year, dt_str)
if isinstance(date_str, unicode):
date_str = date_str.encode('utf-8')
dt = self._strptime(date_str, '%Y %m月%d日 %H:%M')
else:
dt = parse(dt_str)
return dt
def parse(self, url=None):
if self.bundle.exists is False:
return
url = url or self.url
try:
br = self.opener.browse_open(url)
except Exception as e:
print(e)
print('休息10分钟!')
time.sleep(60*10)
try:
jsn = json.loads(br.response().read())
except ValueError:
print('休息10分钟!')
time.sleep(60 * 10)
raise FetchBannedError('fetch banned by weibo server')
# self.logger.debug('load %s finish' % url)
try:
soup = beautiful_soup(jsn['data']['html'])
current_page = jsn['data']['page']['pagenum']
n_pages = jsn['data']['page']['totalpage']
except KeyError:
print('休息10分钟!')
time.sleep(60 * 10)
raise FetchBannedError('fetch banned by weibo server')
if not self.check(url, br):
return
decodes = urldecode(url)
mid = decodes.get('id', decodes.get('mid'))
mblog = self.bundle.current_mblog
if mblog is None or mblog.mid != mid:
try:
mblog = getattr(MicroBlog, 'objects').get(Q(mid=mid)&Q(uid=self.uid))
except DoesNotExist:
mblog = MicroBlog(mid=mid, uid=self.uid)
mblog.save()
def set_instance(instance, dl):
instance.avatar = dl.find('dt').find('img')['src']
date = dl.find('dd').find(attrs={'class': 'S_txt2'}).text
date = date.strip().strip('(').strip(')')
instance.created = self.parse_datetime(date)
for div in dl.find_all('div'): div.extract()
for span in dl.find_all('span'): span.extract()
instance.content = dl.text.strip()
counter_type = None
#print(u'微博:'+mblog.content+u'的评论')
if url.startswith('http://weibo.com/aj/comment'):
counter_type = 'comment'
dls = soup.find_all('dl', mid=True)
for dl in dls:
uid = dl.find('a', usercard=True)['usercard'].split("id=", 1)[1]
comment = Comment(uid=uid)
set_instance(comment, dl)
#print(u'微博评论:'+comment.content)
mblog.comments.append(comment)
elif url.startswith('http://weibo.com/aj/mblog/info'):
counter_type = 'forward'
dls = soup.find_all('dl', mid=True)
for dl in dls:
forward_again_a = dl.find('a', attrs={'action-type': re.compile("^(feed_list|fl)_forward$")})
uid = urldecode('?%s' % forward_again_a['action-data'])['uid']
forward = Forward(uid=uid, mid=dl['mid'])
set_instance(forward, dl)
mblog.forwards.append(forward)
elif url.startswith('http://weibo.com/aj/like'):
counter_type = 'like'
lis = soup.find_all('li', uid=True)
for li in lis:
like = Like(uid=li['uid'])
like.avatar = li.find('img')['src']
mblog.likes.append(like)
mblog.save()
# self.logger.debug('parse %s finish' % url)
# counter add one for the processed forward or comment or like list url
if counter_type is not None:
self.counter.inc('processed_%s_list_page' % counter_type, 1)
if current_page >= n_pages:
return
params = urldecode(url)
new_params = urldecode('?page=%s'%(current_page+1))
params.update(new_params)
params['__rnd'] = int(time.time()*1000)
next_page = '%s?%s' % (url.split('?')[0] , urllib.urlencode(params))
yield next_page
class UserInfoParser(WeiboParser):
def parse(self, url=None):
if self.bundle.exists is False:
return
url = url or self.url
try:
br = self.opener.browse_open(url)
except Exception as e:
print(e)
print('休息10分钟!')
time.sleep(60*10)
# self.logger.debug('load %s finish' % url)
soup = beautiful_soup(br.response().read())
if not self.check(url, br):
return
weibo_user = self.get_weibo_user()
info = weibo_user.info
if info is None:
weibo_user.info = UserInfo()
new_style = False
profile_div = None
career_div = None
edu_div = None
tags_div = None
for script in soup.find_all('script'):
text = script.text
if text.startswith('FM.view'):
text = text.strip().replace(';', '').replace('FM.view(', '')[:-1]
data = json.loads(text)
domid = data['domid']
if domid.startswith('Pl_Official_LeftInfo__'):
info_soup = beautiful_soup(data['html'])
info_div = info_soup.find('div', attrs={'class': 'profile_pinfo'})
for block_div in info_div.find_all('div', attrs={'class': 'infoblock'}):
block_title = block_div.find('form').text.strip()
if block_title == u'基本信息':
profile_div = block_div
elif block_title == u'工作信息':
career_div = block_div
elif block_title == u'教育信息':
edu_div = block_div
elif block_title == u'标签信息':
tags_div = block_div
elif domid.startswith('Pl_Official_PersonalInfo__'):
new_style = True
info_soup = beautiful_soup(data['html'])
for block_div in info_soup.find_all('div', attrs={'class': 'WB_cardwrap'}):
block_title_div = block_div.find('h4', attrs={'class': 'obj_name'})
if block_title_div is None:
block_title_div = block_div.find('div', attrs={'class': 'obj_name'})\
.find('h2')
if block_title_div is None:
continue
block_title = block_title_div.text.strip()
inner_div = block_div.find('div', attrs={'class': 'WB_innerwrap'})
if block_title == u'基本信息':
profile_div = inner_div
elif block_title == u'工作信息':
career_div = inner_div
elif block_title == u'教育信息':
edu_div = inner_div
elif block_title == u'标签信息':
tags_div = inner_div
elif domid == 'Pl_Official_Header__1':
header_soup = beautiful_soup(data['html'])
weibo_user.info.avatar = header_soup.find('div', attrs={'class': 'pf_head_pic'})\
.find('img')['src']
weibo_user.info.n_follows = int(header_soup.find('ul', attrs={'class': 'user_atten'})\
.find('strong', attrs={'node-type': 'follow'}).text)
weibo_user.info.n_fans = int(header_soup.find('ul', attrs={'class': 'user_atten'})\
.find('strong', attrs={'node-type': 'fans'}).text)
elif domid.startswith('Pl_Core_T8CustomTriColumn__'):
# new style friends info
header_soup = beautiful_soup(data['html'])
tds = header_soup.find('table', attrs={'class': 'tb_counter'})\
.find_all('td')
weibo_user.info.n_follows = int(tds[0].find('strong').text)
weibo_user.info.n_fans = int(tds[1].find('strong').text)
elif domid.startswith('Pl_Official_Headerv6__'):
# new style avatar info
header_soup = beautiful_soup(data['html'])
weibo_user.info.avatar = header_soup.find('p', attrs='photo_wrap')\
.find('img')['src']
elif 'STK' in text:
text = text.replace('STK && STK.pageletM && STK.pageletM.view(', '')[:-1]
data = json.loads(text)
pid = data['pid']
if pid == 'pl_profile_infoBase':
profile_div = beautiful_soup(data['html'])
elif pid == 'pl_profile_infoCareer':
career_div = beautiful_soup(data['html'])
elif pid == 'pl_profile_infoEdu':
edu_div = beautiful_soup(data['html'])
elif pid == 'pl_profile_infoTag':
tags_div = beautiful_soup(data['html'])
elif pid == 'pl_profile_photo':
soup = beautiful_soup(data['html'])
weibo_user.info.avatar = soup.find('img')['src']
profile_map = {
u'昵称': {'field': 'nickname'},
u'所在地': {'field': 'location'},
u'性别': {'field': 'sex',
'func': lambda s: True if s == u'男' else False},
u'生日': {'field': 'birth'},
u'博客': {'field': 'blog'},
u'个性域名': {'field': 'site'},
u'简介': {'field': 'intro'},
u'邮箱': {'field': 'email'},
u'QQ': {'field': 'qq'},
u'MSN': {'field': 'msn'}
}
if profile_div is not None:
if not new_style:
divs = profile_div.find_all(attrs={'class': 'pf_item'})
else:
divs = profile_div.find_all('li', attrs={'class': 'li_1'})
for div in divs:
if not new_style:
k = div.find(attrs={'class': 'label'}).text.strip()
v = div.find(attrs={'class': 'con'}).text.strip()
else:
k = div.find('span', attrs={'class': 'pt_title'}).text.strip().strip(u':')
d = div.find('span', attrs={'class': 'pt_detail'})
if d:
v = d.text.strip()
else:
v = div.find('a').text.strip()
if k in profile_map:
if k == u'个性域名' and '|' in v:
v = v.split('|')[1].strip()
func = (lambda s: s) \
if 'func' not in profile_map[k] \
else profile_map[k]['func']
v = func(v)
setattr(weibo_user.info, profile_map[k]['field'], v)
weibo_user.info.work = []
if career_div is not None:
if not new_style:
for div in career_div.find_all(attrs={'class': 'con'}):
work_info = WorkInfo()
ps = div.find_all('p')
for p in ps:
a = p.find('a')
if a is not None:
work_info.name = a.text
text = p.text
if '(' in text:
work_info.date = text.strip().split('(')[1].strip(')')
else:
text = p.text
if text.startswith(u'地区:'):
work_info.location = text.split(u':', 1)[1]
elif text.startswith(u'职位:'):
work_info.position = text.split(u':', 1)[1]
else:
work_info.detail = text
weibo_user.info.work.append(work_info)
else:
li = career_div.find('li', attrs={'class': 'li_1'})
for span in li.find_all('span', attrs={'class': 'pt_detail'}):
work_info = WorkInfo()
text = span.text
a = span.find('a')
if a is not None:
work_info.name = a.text
if '(' in text:
work_info.date = text.strip().split('(')[1]\
.replace('\r', '')\
.replace('\n', '')\
.replace('\t', '')\
.split(')', 1)[0]
for l in text.split('\r\n'):
l = l.strip()
if len(l) == 0:
continue
if l.startswith(u'地区:'):
work_info.location = l.split(u':', 1)[1]
elif l.startswith(u'职位:'):
work_info.position = l.split(u':', 1)[1]
else:
work_info.detail = text.replace('\r', '')\
.replace('\n', '')\
.replace('\t', '')\
.strip()
weibo_user.info.work.append(work_info)
weibo_user.info.edu = []
if edu_div is not None:
if not new_style:
for div in edu_div.find_all(attrs={'class': 'con'}):
edu_info = EduInfo()
ps = div.find_all('p')
for p in ps:
a = p.find('a')
text = p.text
if a is not None:
edu_info.name = a.text
if '(' in text:
edu_info.date = text.strip().split('(')[1].strip().strip(')')
else:
edu_info.detail = text
weibo_user.info.edu.append(edu_info)
else:
span = edu_div.find('li', attrs={'class': 'li_1'})\
.find('span', attrs={'class': 'pt_detail'})
text = span.text
names = []
for a in span.find_all('a'):
names.append(a.text)
for idx, name in enumerate(names):
start_pos = text.find(name) + len(name)
if idx < len(names) - 1:
end_pos = text.find(names[idx+1], start_pos)
else:
end_pos = len(text)
t = text[start_pos: end_pos]
edu_info = EduInfo()
edu_info.name = name
if '(' in text:
edu_info.date = t.strip().split('(')[1]\
.replace('\r', '')\
.replace('\n', '')\
.replace('\t', '')\
.split(')', 1)[0]
t = t[t.find(')')+1:]
text = text[end_pos:]
edu_info.detail = t.replace('\r', '').replace('\n', '')\
.replace('\t', '').strip()
weibo_user.info.edu.append(edu_info)
weibo_user.info.tags = []
if tags_div is not None:
if not new_style:
for div in tags_div.find_all(attrs={'class': 'con'}):
for a in div.find_all('a'):
weibo_user.info.tags.append(a.text)
else:
for a in tags_div.find('span', attrs={'class': 'pt_detail'}).find_all('a'):
weibo_user.info.tags.append(a.text.strip())
weibo_user.save()
# self.logger.debug('parse %s finish' % url)
# counter add one for the profile url
self.counter.inc('processed_profile_page', 1)
class UserFriendParser(WeiboParser):
def parse(self, url=None):
if self.bundle.exists is False:
return
url = url or self.url
try:
br = self.opener.browse_open(url)
except Exception as e:
print(e)
print('休息10分钟!')
time.sleep(60*10)
# self.logger.debug('load %s finish' % url)
soup = beautiful_soup(br.response().read())
if not self.check(url, br):
return
weibo_user = self.get_weibo_user()
html = None
decodes = urldecode(url)
is_follow = True
is_new_mode = False
is_banned = True
for script in soup.find_all('script'):
text = script.text
if text.startswith('FM.view'):
if is_banned: is_banned = False
text = text.strip().replace(';', '').replace('FM.view(', '')[:-1]
data = json.loads(text)
domid = data['domid']
if domid.startswith('Pl_Official_LeftHisRelation__') or \
domid.startswith('Pl_Official_HisRelation__'):
html = beautiful_soup(data['html'])
if 'relate' in decodes and decodes['relate'] == 'fans':
is_follow = False
is_new_mode = True
elif 'STK' in text:
if is_banned: is_banned = False
text = text.replace('STK && STK.pageletM && STK.pageletM.view(', '')[:-1]
data = json.loads(text)
if data['pid'] == 'pl_relation_hisFollow' or \
data['pid'] == 'pl_relation_hisFans':
html = beautiful_soup(data['html'])
if data['pid'] == 'pl_relation_hisFans':
is_follow = False
if is_banned:
print('休息10分钟!')
time.sleep(60 * 10)
raise FetchBannedError('fetch banned by weibo server')
ul = None
try:
ul = html.find(attrs={'class': 'cnfList', 'node-type': 'userListBox'})
if ul is None:
ul = html.find(attrs={'class': 'follow_list', 'node-type': 'userListBox'})
except AttributeError, e:
print('休息10分钟!')
time.sleep(60 * 10)
if br.geturl().startswith('http://e.weibo.com'):
return
raise e
if ul is None:
if is_follow is True:
if is_new_mode:
yield 'http://weibo.com/%s/follow?relate=fans' % self.uid
else:
yield 'http://weibo.com/%s/fans' % self.uid
return
current_page = decodes.get('page', 1)
if current_page == 1:
if is_follow:
weibo_user.follows = []
else:
weibo_user.fans = []
for cls in ('S_line1', 'S_line2'):
for li in ul.find_all(attrs={'class': cls, 'action-type': 'itemClick'}):
data = dict([l.split('=') for l in li['action-data'].split('&')])
friend = Friend()
friend.uid = data['uid']
friend.nickname = data['fnick']
friend.sex = True if data['sex'] == u'm' else False
yield WeiboUserBundle(str(friend.uid))
if is_follow:
weibo_user.follows.append(friend)
else:
weibo_user.fans.append(friend)
weibo_user.save()
# self.logger.debug('parse %s finish' % url)
# counter add one for the friend url
counter_type = 'follows' if is_follow else 'fans'
self.counter.inc('processed_%s_list_page' % counter_type, 1)
pages = html.find('div', attrs={'class': 'W_pages', 'node-type': 'pageList'})
if pages is None:
pages = html.find('div', attrs={'class': 'WB_cardpage', 'node-type': 'pageList'})
if pages is not None:
a = pages.find_all('a')
if len(a) > 0:
next_ = a[-1]
if next_['class'] == ['W_btn_c'] or 'next' in next_['class']:
decodes['page'] = int(decodes.get('page', 1)) + 1
query_str = urllib.urlencode(decodes)
url = '%s?%s' % (url.split('?')[0], query_str)
yield url
return
if is_follow is True:
if is_new_mode:
yield 'http://weibo.com/%s/follow?relate=fans' % self.uid
else:
yield 'http://weibo.com/%s/fans' % self.uid
|
from decimal import Decimal
from django.db.models import Sum
from django.shortcuts import get_object_or_404
from datetime import date, timedelta
from .models import Task
class TaskRepository:
"""Repository for tasks."""
def list(self):
return Task.objects.all()
def create(self, title: str, description: str, status: int):
return Task.objects.create(
title=title, description=description, status=status
)
def detail(self, id):
return get_object_or_404(Task, pk=id)
def update(self, request, id):
task = get_object_or_404(Task, pk=id)
task.status = request.data.get('status')
task.save()
return task
def destroy(self, pk=None):
task = Task.objects.get(id=pk)
task.delete()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitPeering']
class ExpressRouteCircuitPeering(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_asn: Optional[pulumi.Input[int]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_peering_config: Optional[pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[int]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
peering_type: Optional[pulumi.Input[str]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stats: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']]] = None,
vlan_id: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Peering in an ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] azure_asn: The Azure ASN.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]] connections: The list of circuit connections associated with Azure Private Peering for this circuit.
:param pulumi.Input[str] gateway_manager_etag: The GatewayManager Etag.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']] ipv6_peering_config: The IPv6 peering configuration.
:param pulumi.Input[str] last_modified_by: Gets whether the provider or the customer last modified the peering.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']] microsoft_peering_config: The Microsoft peering configuration.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[int] peer_asn: The peer ASN.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] peering_type: The peering type.
:param pulumi.Input[str] primary_azure_port: The primary port.
:param pulumi.Input[str] primary_peer_address_prefix: The primary address prefix.
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] route_filter: The reference of the RouteFilter resource.
:param pulumi.Input[str] secondary_azure_port: The secondary port.
:param pulumi.Input[str] secondary_peer_address_prefix: The secondary address prefix.
:param pulumi.Input[str] shared_key: The shared key.
:param pulumi.Input[str] state: The peering state.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']] stats: Gets peering stats.
:param pulumi.Input[int] vlan_id: The VLAN ID.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_asn'] = azure_asn
if circuit_name is None:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['connections'] = connections
__props__['gateway_manager_etag'] = gateway_manager_etag
__props__['id'] = id
__props__['ipv6_peering_config'] = ipv6_peering_config
__props__['last_modified_by'] = last_modified_by
__props__['microsoft_peering_config'] = microsoft_peering_config
__props__['name'] = name
__props__['peer_asn'] = peer_asn
if peering_name is None:
raise TypeError("Missing required property 'peering_name'")
__props__['peering_name'] = peering_name
__props__['peering_type'] = peering_type
__props__['primary_azure_port'] = primary_azure_port
__props__['primary_peer_address_prefix'] = primary_peer_address_prefix
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_filter'] = route_filter
__props__['secondary_azure_port'] = secondary_azure_port
__props__['secondary_peer_address_prefix'] = secondary_peer_address_prefix
__props__['shared_key'] = shared_key
__props__['state'] = state
__props__['stats'] = stats
__props__['vlan_id'] = vlan_id
__props__['etag'] = None
__props__['express_route_connection'] = None
__props__['peered_connections'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitPeering")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure-nextgen:network/v20190401:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitPeering':
"""
Get an existing ExpressRouteCircuitPeering resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRouteCircuitPeering(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> pulumi.Output[Optional[int]]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def connections(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']]]:
"""
The list of circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "connections")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteConnection")
def express_route_connection(self) -> pulumi.Output[Optional['outputs.ExpressRouteConnectionIdResponse']]:
"""
The ExpressRoute connection.
"""
return pulumi.get(self, "express_route_connection")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> pulumi.Output[Optional[str]]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> pulumi.Output[Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse']]:
"""
The IPv6 peering configuration.
"""
return pulumi.get(self, "ipv6_peering_config")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> pulumi.Output[Optional[str]]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']]:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> pulumi.Output[Optional[int]]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeredConnections")
def peered_connections(self) -> pulumi.Output[Sequence['outputs.PeerExpressRouteCircuitConnectionResponse']]:
"""
The list of peered circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "peered_connections")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> pulumi.Output[Optional[str]]:
"""
The peering type.
"""
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> pulumi.Output[Optional[str]]:
"""
The peering state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitStatsResponse']]:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> pulumi.Output[Optional[int]]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from urllib.parse import urlsplit, urlunsplit
import pytest
import requests
_KUMA_STATUS = None
def pytest_configure(config):
"""Configure pytest for the Kuma deployment under test."""
global _KUMA_STATUS
# The pytest-base-url plugin adds --base-url, and sets the default from
# environment variable PYTEST_BASE_URL. If still unset, force to staging.
if config.option.base_url is None:
config.option.base_url = "https://developer.allizom.org"
base_url = config.getoption("base_url")
# Process the server status from _kuma_status.json
base_parts = urlsplit(base_url)
kuma_status_url = urlunsplit(
(base_parts.scheme, base_parts.netloc, "_kuma_status.json", "", "")
)
response = requests.get(kuma_status_url, headers={"Accept": "application/json"})
response.raise_for_status()
_KUMA_STATUS = response.json()
_KUMA_STATUS["response"] = {"headers": response.headers}
@pytest.fixture(scope="session")
def kuma_status(base_url):
return _KUMA_STATUS
@pytest.fixture(scope="session")
def is_behind_cdn(kuma_status):
return "x-amz-cf-id" in kuma_status["response"]["headers"]
@pytest.fixture(scope="session")
def media_url():
return "https://media.prod.mdn.mozit.cloud"
@pytest.fixture(scope="session")
def attachment_url(kuma_status):
return f'https://{kuma_status["settings"]["ATTACHMENT_HOST"]}'
|
from django.urls import path
from . import views
urlpatterns = [
]
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Catcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import CatcoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(CatcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
|
# given a string check if its palindromePermutation
def solution(s):
s = s.replace(' ','')
if len(s) == 0 or len (s) == 1:
return True
if len(s) == 2:
return s[0] == s[1]
hashTable = [0] * 128
for i in xrange(0,len(s)):
tmp = ord(s[i])
if hashTable[tmp] == 0:
hashTable[tmp] = 1
else:
hashTable[tmp] = 0
return sum(hashTable) < 2
def test():
assert (solution('dfjklsva ') == False),"wrong"
assert (solution('') == True),"wrong"
assert (solution('abcde fg') == False),"wrong"
assert (solution('dfsjk ls va') == False),"wrong"
assert (solution('acd eacd e') == True),"wrong"
print 'All Passed'
if __name__ == "__main__":
test()
|
import time
import re
import random
import requests
from urllib import parse
import qq_init as qq
import pymongo
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class Spider(object):
def __init__(self):
'''
初始化
'''
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome(chrome_options=chrome_options)
self.driver.get('https://i.qq.com/')
self.__username = qq.USERNAME
self.__password = qq.PASSWORD
self.headers = {
'host': 'h5.qzone.qq.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.8',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'connection': 'keep-alive'
}
self.req = requests.Session()
self.cookies = {}
self.client = pymongo.MongoClient(host=qq.HOST, port=qq.PORT)
self.db = self.client[qq.DB]
def login(self):
'''
登录、调用get_g_tk()、get_friends()函数
:return:
'''
self.driver.switch_to.frame('login_frame')
self.driver.find_element_by_id('switcher_plogin').click()
self.driver.find_element_by_id('u').clear()
self.driver.find_element_by_id('u').send_keys(self.__username)
self.driver.find_element_by_id('p').clear()
self.driver.find_element_by_id('p').send_keys(self.__password)
self.driver.find_element_by_id('login_button').click()
time.sleep(7)
self.driver.get('http://user.qzone.qq.com/{}'.format(self.__username))
cookie = ''
for item in self.driver.get_cookies():
cookie += item["name"] + '=' + item['value'] + ';'
self.cookies = cookie
self.get_g_tk()
self.headers['Cookie'] = self.cookies
self.get_friends()
self.driver.quit()
def get_friends(self):
'''
获取全部好友
:return: qq, name
'''
url = 'https://user.qzone.qq.com/proxy/domain/r.qzone.qq.com/cgi-bin/tfriend/friend_hat_get.cgi?'
params = {
'uin': self.__username,
'fupdate': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
friends = self.req.get(url, headers=self.headers).text
name, qq_num = [], []
for _qq, _name in zip(re.findall('"\d+"', friends), re.findall('"realname":.*"', friends)):
name.append(re.sub('"|realname|:', '', _name))
qq_num.append(re.sub('"', '', _qq))
self.name, self.qq_num = name, qq_num
def get_g_tk(self):
'''
获取g_tk()
:return: 生成的g_tk
'''
p_skey = self.cookies[self.cookies.find('p_skey=') + 7: self.cookies.find(';', self.cookies.find('p_skey='))]
if len(p_skey) > 50:
self.driver.quit()
raise BaseException(
'登录出错'
)
h = 5381
for i in p_skey:
h += (h << 5) + ord(i)
print('g_tk', h & 2147483647)
self.g_tk = h & 2147483647
def get_mood(self):
'''
构造说说请求链接
对所有好友进行请求
获取点赞好友信息
正则解析
存入数据库
设置时长 5 秒,防封号
:return:
'''
url = 'https://h5.qzone.qq.com/proxy/domain/taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6?'
params = {
'inCharset': 'utf-8',
'outCharset': 'utf-8',
'sort': 0,
'num': 20,
'repllyunm': 100,
'cgi_host': 'http://taotao.qq.com/cgi-bin/emotion_cgi_msglist_v6',
'callback': '_preloadCallback',
'code_version': 1,
'format': 'jsonp',
'need_private_comment': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
for q in self.qq_num:
num = 0
t1, pos = True, 0
url_ = url + '&uin=' + str(q)
black, shuoshuo = self.db['black'], self.db['mood']
while(t1):
url__ = url_ + '&pos=' + str(pos)
mood = self.req.get(url=url__, headers=self.headers)
if '\"msglist\":null' in mood.text or "\"message\":\"对不起,主人设置了保密,您没有权限查看\"" in mood.text:
t1 = False
if '\"message\":\"对不起,主人设置了保密,您没有权限查看\"' in mood.text:
data = {
'name': self.name[self.qq_num.index(q)],
'qq': q
}
black.insert(data)
else:
created_time = re.findall('created_time":\d+', mood.text)
source = re.findall('source_appid":".*?"source_name":".*?"', mood.text)
contents = re.findall('],"content":".*?"', mood.text)
forword = re.findall('fwdnum":\d+', mood.text)
comment_content = re.findall('commentlist":(null|.*?],)', mood.text)
comments = re.findall('cmtnum":\d+', mood.text)
pics = re.findall('","pic(_template|".*?])', mood.text)
like_url = 'https://user.qzone.qq.com/proxy/domain/users.qzone.qq.com/cgi-bin/likes/get_like_list_app?'
tids = re.findall('tid":".*?"', mood.text)
for _time, _source, _content, _forword, _comment_content, _comment, _pic, _tid in \
zip(created_time, source, contents, forword, comment_content, comments, pics, tids):
param = {
'uin': self.__username,
'unikey': 'http://user.qzone.qq.com/{}/mood/'.format(q)+re.sub('tid":"|"', '', _tid)+'.1',
'begin_uin': 0,
'query_count': 60,
'if_first_page': 1,
'g_tk': self.g_tk
}
like_url_current = like_url + parse.urlencode(param)
like = self.req.get(url=like_url_current, headers=self.headers)
likers = like.text.encode(like.encoding).decode('utf-8')
if likers is None:
likers = []
fuin, nick, sex, constellation, address = re.findall('fuin":\d+', likers), re.findall('nick":".*?"', likers), re.findall('gender":".*?"', likers), re.findall('tion":".*?"', likers), re.findall('addr":".*?"', likers)
infos = []
# 点赞信息
for _fuin, _nick, _sex, _constellation, _address in zip(fuin, nick, sex, constellation, address):
info = {
'fuin': re.sub('fuin":', '', _fuin),
'nick': re.sub('nick":"|"', '', _nick),
'sex': re.sub('gender":"|"', '', _sex),
'constellation': re.sub('tion":"|"', '', _constellation),
'address': re.sub('addr":"|"', '', _address)
}
infos.append(info)
num = num + 1
print(num)
data = {
# '_id': str(q) + '_' + str(random.random() * 10).replace('.', ''),
'_id': str(q) + '_' + str(num),
'name': self.name[self.qq_num.index(q)],
'CreateTime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(re.sub('created_time":', '', _time)))),
'source': re.sub('source_appid":".*?"source_name":"|"', '', _source),
'content': re.sub('],"content":"|"', '', _content),
'forward': re.sub('fwdnum":', '', _forword),
'comment_content': re.sub('null|commentlist":', '', _comment_content) if 'null' in _comment_content else str([(re.sub('content":"|"', '', x), re.sub('createTime2":"|"', '', y), re.sub('name":"|"', '', z), re.sub('uin":', '', zz)) for x, y, z, zz in zip(re.findall('content":".*?"', _comment_content), re.findall('createTime2":".*?"', _comment_content), re.findall('name":".*?"', _comment_content), re.findall('uin":\d+', _comment_content))]),
'comment': re.sub('cmtnum":', '', _comment),
'pic': [] if 'template' in _pic else [re.sub('url2":|"', '', i) for i in re.findall('url2":".*?"', _pic)],
'lick_url' : like_url_current
}
try:
data['like'] = re.sub('number":', '', re.search('number":\d+', likers).group())
except Exception as identifier:
print(identifier)
data['like'] = 0
data['likers'] = infos
if shuoshuo.insert(data):
print('%s 的说说写入到数据库成功!' % self.name[self.qq_num.index(q)])
else:
with open('filed', 'a+', encoding='utf-8') as f:
f.write('%s 的说说爬取失败!\n' % self.name[self.qq_num.index(q)])
print('%s 的说说写入到数据库成功!' % self.name[self.qq_num.index(q)])
pos += 20
time.sleep(4)
def get_board(self):
'''
获取留言, 与获取说说大同小异
:return:
'''
url = 'https://user.qzone.qq.com/proxy/domain/m.qzone.qq.com/cgi-bin/new/get_msgb?'
params = {
'uin': self.__username,
'num': 10,
'hostword': 0,
'essence': 1,
'inCharset': 'utf-8',
'outCharset': 'utf-8',
'format': 'jsonp',
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
for q in self.qq_num:
num = 0
t2 = True
url_ = url + '&hostUin=' + str(q)
start = 0
boardb = self.db['board']
while(t2):
url__ = url_ + '&start=' + str(start)
board = self.req.get(url=url__, headers=self.headers)
if '\"message":"空间主人设置了访问权限,您无法进行操作\"' in board.text or '\"message\":\"空间未开通\"' in board.text or '\"commentList\":[]' in board.text or '\"total\":0' in board.text:
t2 = False
else:
text = board.text
ids, nickname, uin, pubtime, content, replyList = \
re.findall('id":"\d+', text), re.findall('nickname":".*?"', text), re.findall('uin":\d+,\n"nick', text),\
re.findall('pubtime":".*?"', text), re.findall('ubbContent":".*?"', text), re.findall('"replyList":(\[\]|.*?\}\])', text, re.S)
for _id, _nickname, _uin, _time, _content, _reply in zip(ids, nickname, uin, pubtime, content, replyList):
num = num + 1
print(num)
data = {
# '_id': str(q) + '_' + re.sub('id":"', '', _id),
'_id': str(q) + '_' + str(num),
'owner': self.name[self.qq_num.index(q)],
'total': re.sub('total":', '', re.search('total":\d+', board.text).group()),
'name': re.sub('nickname":"|"', '', _nickname),
'qq': re.sub('uin":|,\n"nick', '', _uin),
'time': re.sub('pubtime":"|"', '', _time),
'content': re.sub('ubbContent":"|"', '', _content), # 下行需要改动
'replyList': [] if '[]' in _reply else str([re.sub('nick":"|"', '', name) + re.sub('content"|"', '', con) for name, con in zip(re.findall('nick":".*?"', _reply), re.findall('content":".*?"', _reply))])
}
if boardb.insert(data):
print('%s 的留言存储到Mongodb成功!' % self.name[self.qq_num.index(q)])
start += 10
def get_information(self):
'''
构造请求,正则解析
:return:
'''
url = 'https://h5.qzone.qq.com/proxy/domain/base.qzone.qq.com/cgi-bin/user/cgi_userinfo_get_all?'
params = {
'vuin': self.__username,
'fupdate': 1,
'g_tk': self.g_tk
}
url = url + parse.urlencode(params)
table = self.db['information']
for q in self.qq_num:
t3 = True
url_ = url + '&uin=' + str(q)
while(t3):
info = self.req.get(url=url_, headers=self.headers)
if '\"message\":\"您无权访问\"' in info.text:
t3 = False
else:
text = info.text
sex = ['其他', '男', '女']
constellation = ['白羊座', '金牛座', '双子座', '巨蟹座', '狮子座', '处女座', '天秤座', '天蝎座', '射手座', '摩羯座', '水瓶座', '双鱼座', '未填写']
data = {
'_id': str(q) + '_' + str(random.random() * 10).replace('.', ''),
'nickname': re.sub('nickname":"|"', '', re.search('nickname":".*?"', text).group()),
'spacename': re.sub('spacename":"|"', '', re.search('spacename":".*?"', text).group()),
'desc': re.sub('desc":"|"', '', re.search('desc":".*?"', text).group()),
'signature': re.sub('signature":"|"', '', re.search('signature":".*?"', text).group()),
'sex': sex[int(re.sub('sex":', '', re.search('sex":\d+', text).group()))],
'age': re.sub('"age":', '', re.search('"age":\d+', text).group()),
'birthday': re.sub('birthyear":', '', re.search('birthyear":\d+', text).group()) + '-' + re.sub('birthday":"|"', '', re.search('birthday":".*"', text).group()),
'constellation': constellation[int(re.sub('constellation":|,', '', re.search('constellation":.*,', text).group()).replace('-1', '12'))],
'country': re.sub('country":"|"', '', re.search('country":".*"', text).group()),
'province': re.sub('province":"|"', '', re.search('province":".*?"', text).group()),
'city': re.sub('city":"|"', '', re.search('city":".*?"', text).group()),
'hometown': re.sub('hco":"|"|,|\n|hc|hp|:', '', re.search('hco":".*\n".*\n".*', text).group()),
# 'marriage': marriage[int(re.sub('marriage":', '', re.search('marriage":\d', text).group()))],
'career': re.sub('career":"|"', '', re.search('career":".*?"', text).group()),
'address': re.sub('cb":"|"', '', re.search('cb":".*?"', text).group())
}
if table.insert(data):
print('%s 的信息写入到数据库成功!' % self.name[self.qq_num.index(q)])
t3 = False
if __name__ == '__main__':
sp = Spider()
sp.login()
sp.get_information()
t = time.perf_counter()
sp.get_board()
sp.get_mood()
End = time.perf_counter() - t
print('所有内容爬取完成!总用时%s!' % End)
|
import typing
import netmiko
import napalm_digineo_procurve.queries.interfaces
import napalm_digineo_procurve.queries.lldp_neighbors
import napalm_digineo_procurve.queries.device_info
import napalm_digineo_procurve.queries.system_info
import napalm_digineo_procurve.queries.uptime
def get_uptime(device: netmiko.BaseConnection) -> float:
return napalm_digineo_procurve.queries.uptime.query(device)
def get_system_information(
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.system_info.SystemInformation:
return napalm_digineo_procurve.queries.system_info.query(device)
def get_device_manufacturer_info(
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.device_info.DeviceInformation:
return napalm_digineo_procurve.queries.device_info.query(device)
def get_interfaces(
device: netmiko.BaseConnection
) -> typing.Sequence[napalm_digineo_procurve.queries.interfaces.Interface]:
return napalm_digineo_procurve.queries.interfaces.query(device)
def get_lldp_neighbors(
device: netmiko.BaseConnection
) -> typing.List[typing.Mapping[str, str]]:
return napalm_digineo_procurve.queries.lldp_neighbors.query(device)
|
import os
import cvb
print("acquire images from CVMock.vin")
device = cvb.DeviceFactory.open("/opt/cvb/drivers/CVMock.vin")
stream = device.stream
stream.start()
for i in range(5):
image, status = stream.wait()
if status == cvb.WaitStatus.Ok:
image_file = os.path.join(".", ".cvb", "test" + str(i) + ".jpg")
image.save(image_file)
print("saving: " + image_file)
stream.abort()
|
import torch.nn.functional as F
from SparseTensor import SparseTensor
def relu(input):
output = F.relu(input.F)
return SparseTensor(
output, coords_key=input.coords_key, coords_manager=input.coords_man)
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('User must have an email address')
user=self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user=self.create_user(email,password)
user.is_staff=True
user.is_superuser=True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email=models.EmailField(max_length=255, unique=True)
name=models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects=UserManager()
USERNAME_FIELD= 'email'
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Network architectures used in the StyleGAN2 paper."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.ops.upfirdn_2d import upsample_2d, downsample_2d, upsample_conv_2d, conv_downsample_2d
from dnnlib.tflib.ops.fused_bias_act import fused_bias_act
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolution or fully-connected layer.
def get_weight(shape, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable(weight_var, shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense_layer(x, fmaps, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolution layer with optional upsampling or downsampling.
def conv2d_layer(x, fmaps, kernel, up=False, down=False, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
return x
#----------------------------------------------------------------------------
# Apply bias and activation func.
def apply_bias_act(x, act='linear', alpha=None, gain=None, lrmul=1, bias_var='bias'):
b = tf.get_variable(bias_var, shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
return fused_bias_act(x, b=tf.cast(b, x.dtype), act=act, alpha=alpha, gain=gain)
#----------------------------------------------------------------------------
# Naive upsampling (nearest neighbor) and downsampling (average pooling).
def naive_upsample_2d(x, factor=2):
with tf.variable_scope('NaiveUpsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H, 1, W, 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
return tf.reshape(x, [-1, C, H * factor, W * factor])
def naive_downsample_2d(x, factor=2):
with tf.variable_scope('NaiveDownsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H // factor, factor, W // factor, factor])
return tf.reduce_mean(x, axis=[3,5])
#----------------------------------------------------------------------------
# Modulated convolution layer.
def modulated_conv2d_layer(x, y, fmaps, kernel, up=False, down=False, demodulate=True, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, fused_modconv=True, weight_var='weight', mod_weight_var='mod_weight', mod_bias_var='mod_bias'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
# Get weight.
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
ww = w[np.newaxis] # [BkkIO] Introduce minibatch dimension.
# Modulate.
s = dense_layer(y, fmaps=x.shape[1].value, weight_var=mod_weight_var) # [BI] Transform incoming W to style.
s = apply_bias_act(s, bias_var=mod_bias_var) + 1 # [BI] Add bias (initially 1).
ww *= tf.cast(s[:, np.newaxis, np.newaxis, :, np.newaxis], w.dtype) # [BkkIO] Scale input feature maps.
# Demodulate.
if demodulate:
d = tf.rsqrt(tf.reduce_sum(tf.square(ww), axis=[1,2,3]) + 1e-8) # [BO] Scaling factor.
ww *= d[:, np.newaxis, np.newaxis, np.newaxis, :] # [BkkIO] Scale output feature maps.
# Reshape/scale input.
if fused_modconv:
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
w = tf.reshape(tf.transpose(ww, [1, 2, 3, 0, 4]), [ww.shape[1], ww.shape[2], ww.shape[3], -1])
else:
x *= tf.cast(s[:, :, np.newaxis, np.newaxis], x.dtype) # [BIhw] Not fused => scale input activations.
# Convolution with optional up/downsampling.
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
# Reshape/scale output.
if fused_modconv:
x = tf.reshape(x, [-1, fmaps, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
elif demodulate:
x *= tf.cast(d[:, :, np.newaxis, np.newaxis], x.dtype) # [BOhw] Not fused => scale output activations.
return x
#----------------------------------------------------------------------------
# Minibatch standard deviation layer.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Main generator network.
# Composed of two sub-networks (mapping and synthesis) that are defined below.
# Used in configs B-F (Table 1).
def G_main(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.5, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = None, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
return_dlatents = False, # Return dlatents in addition to the images?
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
mapping_func = 'G_mapping', # Build func name for the mapping network.
synthesis_func = 'G_synthesis_stylegan2', # Build func name for the synthesis network.
**kwargs): # Arguments for sub-networks (mapping and synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training:
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=globals()[synthesis_func], **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=globals()[mapping_func], dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, is_training=is_training, **kwargs)
dlatents = tf.cast(dlatents, tf.float32)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.variable_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, is_training=is_training, **kwargs)
dlatents2 = tf.cast(dlatents2, tf.float32)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
layer_psi = np.ones(layer_idx.shape, dtype=np.float32)
if truncation_cutoff is None:
layer_psi *= truncation_psi
else:
layer_psi = tf.where(layer_idx < truncation_cutoff, layer_psi * truncation_psi, layer_psi)
dlatents = tflib.lerp(dlatent_avg, dlatents, layer_psi)
# Evaluate synthesis network.
deps = []
if 'lod' in components.synthesis.vars:
deps.append(tf.assign(components.synthesis.vars['lod'], lod_in))
with tf.control_dependencies(deps):
images_out = components.synthesis.get_output_for(dlatents, is_training=is_training, force_clean_graph=is_template_graph, **kwargs)
# Return requested outputs.
images_out = tf.identity(images_out, name='images_out')
if return_dlatents:
return images_out, dlatents
return images_out
#----------------------------------------------------------------------------
# Mapping network.
# Transforms the input latent code (z) to the disentangled latent code (w).
# Used in configs B-F (Table 1).
def G_mapping(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
act = mapping_nonlinearity
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# Normalize latents.
if normalize_latents:
with tf.variable_scope('Normalize'):
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + 1e-8)
# Mapping layers.
for layer_idx in range(mapping_layers):
with tf.variable_scope('Dense%d' % layer_idx):
fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
x = apply_bias_act(dense_layer(x, fmaps=fmaps, lrmul=mapping_lrmul), act=act, lrmul=mapping_lrmul)
# Broadcast.
if dlatent_broadcast is not None:
with tf.variable_scope('Broadcast'):
x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# StyleGAN synthesis network with revised architecture (Figure 2d).
# Implements progressive growing, but no skip connections or residual nets (Figure 7).
# Used in configs B-D (Table 1).
def G_synthesis_stylegan_revised(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv = True, # Implement modulated_conv2d_layer() as a single fused op?
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act = nonlinearity
num_layers = resolution_log2 * 2 - 2
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_layers, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
for layer_idx in range(num_layers - 1):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Single convolution layer with all the bells and whistles.
def layer(x, layer_idx, fmaps, kernel, up=False):
x = modulated_conv2d_layer(x, dlatents_in[:, layer_idx], fmaps=fmaps, kernel=kernel, up=up, resample_kernel=resample_kernel, fused_modconv=fused_modconv)
if randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_inputs[layer_idx], x.dtype)
noise_strength = tf.get_variable('noise_strength', shape=[], initializer=tf.initializers.zeros())
x += noise * tf.cast(noise_strength, x.dtype)
return apply_bias_act(x, act=act)
# Early layers.
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Conv'):
x = layer(x, layer_idx=0, fmaps=nf(1), kernel=3)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer(x, layer_idx=res*2-5, fmaps=nf(res-1), kernel=3, up=True)
with tf.variable_scope('Conv1'):
x = layer(x, layer_idx=res*2-4, fmaps=nf(res-1), kernel=3)
return x
def torgb(res, x): # res = 2..resolution_log2
with tf.variable_scope('ToRGB_lod%d' % (resolution_log2 - res)):
return apply_bias_act(modulated_conv2d_layer(x, dlatents_in[:, res*2-3], fmaps=num_channels, kernel=1, demodulate=False, fused_modconv=fused_modconv))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
with tf.variable_scope('Upsample_lod%d' % lod):
images_out = upsample_2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(x, res, lod):
y = block(res, x)
img = lambda: naive_upsample_2d(torgb(res, y), factor=2**lod)
img = cset(img, (lod_in > lod), lambda: naive_upsample_2d(tflib.lerp(torgb(res, y), upsample_2d(torgb(res - 1, x)), lod_in - lod), factor=2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# StyleGAN2 synthesis network (Figure 7).
# Implements skip connections and residual nets (Figure 7), but no progressive growing.
# Used in configs E-F (Table 1).
def G_synthesis_stylegan2(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv = True, # Implement modulated_conv2d_layer() as a single fused op?
clip_style = 'ffhq',
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
assert architecture in ['orig', 'skip', 'resnet']
act = nonlinearity
num_layers = resolution_log2 * 2 - 2
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_layers, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
# Noise inputs.
noise_inputs = []
for layer_idx in range(num_layers - 1):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Single convolution layer with all the bells and whistles.
def layer(x, layer_idx, fmaps, kernel, up=False):
x = modulated_conv2d_layer(x, dlatents_in[:, layer_idx], fmaps=fmaps, kernel=kernel, up=up, resample_kernel=resample_kernel, fused_modconv=fused_modconv)
if randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_inputs[layer_idx], x.dtype)
noise_strength = tf.get_variable('noise_strength', shape=[], initializer=tf.initializers.zeros())
noise = noise * tf.cast(noise_strength, x.dtype)
with tf.variable_scope('resampling'):
alpha = tf.get_variable('alpha', shape=[], initializer=tf.initializers.constant(0.5))
sp_att_mask = alpha + (1-alpha) * spatial_att(x, clip_style)
sp_att_mask *= tf.rsqrt(tf.reduce_mean(tf.square(sp_att_mask), axis=[2, 3], keepdims=True) + 1e-8)
x += noise
x = x * sp_att_mask
return apply_bias_act(x, act=act)
# Building blocks for main layers.
def block(x, res): # res = 3..resolution_log2
t = x
with tf.variable_scope('Conv0_up'):
x = layer(x, layer_idx=res*2-5, fmaps=nf(res-1), kernel=3, up=True)
with tf.variable_scope('Conv1'):
x = layer(x, layer_idx=res*2-4, fmaps=nf(res-1), kernel=3)
if architecture == 'resnet':
with tf.variable_scope('Skip'):
t = conv2d_layer(t, fmaps=nf(res-1), kernel=1, up=True, resample_kernel=resample_kernel)
x = (x + t) * (1 / np.sqrt(2))
return x
def upsample(y):
with tf.variable_scope('Upsample'):
return upsample_2d(y, k=resample_kernel)
def torgb(x, y, res): # res = 2..resolution_log2
with tf.variable_scope('ToRGB'):
t = apply_bias_act(modulated_conv2d_layer(x, dlatents_in[:, res*2-3], fmaps=num_channels, kernel=1, demodulate=False, fused_modconv=fused_modconv))
return t if y is None else y + t
# Early layers.
y = None
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Conv'):
x = layer(x, layer_idx=0, fmaps=nf(1), kernel=3)
if architecture == 'skip':
y = torgb(x, y, 2)
# Main layers.
for res in range(3, resolution_log2 + 1):
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
x = block(x, res)
if architecture == 'skip':
y = upsample(y)
if architecture == 'skip' or res == resolution_log2:
y = torgb(x, y, res)
images_out = y
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Original StyleGAN discriminator.
# Used in configs B-D (Table 1).
def D_stylegan(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 3, # Number of input color channels. Overridden based on dataset.
resolution = 1024, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive'
act = nonlinearity
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
# Building blocks for spatial layers.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=1), act=act)
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=3), act=act)
with tf.variable_scope('Conv1_down'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-2), kernel=3, down=True, resample_kernel=resample_kernel), act=act)
return x
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
x = fromrgb(images_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
x = block(x, res)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
with tf.variable_scope('Downsample_lod%d' % lod):
img = downsample_2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = tflib.lerp_clip(x, y, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(res, lod):
x = lambda: fromrgb(naive_downsample_2d(images_in, factor=2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(naive_downsample_2d(images_in, factor=2**(lod+1)), res - 1), lod_in - lod))
return y()
x = grow(3, resolution_log2 - 3)
# Final layers at 4x4 resolution.
with tf.variable_scope('4x4'):
if mbstd_group_size > 1:
with tf.variable_scope('MinibatchStddev'):
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(1), kernel=3), act=act)
with tf.variable_scope('Dense0'):
x = apply_bias_act(dense_layer(x, fmaps=nf(0)), act=act)
# Output layer with label conditioning from "Which Training Methods for GANs do actually Converge?"
with tf.variable_scope('Output'):
x = apply_bias_act(dense_layer(x, fmaps=max(labels_in.shape[1], 1)))
if labels_in.shape[1] > 0:
x = tf.reduce_sum(x * labels_in, axis=1, keepdims=True)
scores_out = x
# Output.
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
# StyleGAN2 discriminator (Figure 7).
# Implements skip connections and residual nets (Figure 7), but no progressive growing.
# Used in configs E-F (Table 1).
def D_stylegan2(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 3, # Number of input color channels. Overridden based on dataset.
resolution = 1024, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
assert architecture in ['orig', 'skip', 'resnet']
act = nonlinearity
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
# Building blocks for main layers.
def fromrgb(x, y, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB'):
t = apply_bias_act(conv2d_layer(y, fmaps=nf(res-1), kernel=1), act=act)
return t if x is None else x + t
def block(x, res): # res = 2..resolution_log2
t = x
with tf.variable_scope('Conv0'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=3), act=act)
with tf.variable_scope('Conv1_down'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-2), kernel=3, down=True, resample_kernel=resample_kernel), act=act)
if architecture == 'resnet':
with tf.variable_scope('Skip'):
t = conv2d_layer(t, fmaps=nf(res-2), kernel=1, down=True, resample_kernel=resample_kernel)
x = (x + t) * (1 / np.sqrt(2))
return x
def downsample(y):
with tf.variable_scope('Downsample'):
return downsample_2d(y, k=resample_kernel)
# Main layers.
x = None
y = images_in
for res in range(resolution_log2, 2, -1):
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if architecture == 'skip' or res == resolution_log2:
x = fromrgb(x, y, res)
x = block(x, res)
if architecture == 'skip':
y = downsample(y)
# Final layers.
with tf.variable_scope('4x4'):
if architecture == 'skip':
x = fromrgb(x, y, 2)
if mbstd_group_size > 1:
with tf.variable_scope('MinibatchStddev'):
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(1), kernel=3), act=act)
with tf.variable_scope('Dense0'):
x = apply_bias_act(dense_layer(x, fmaps=nf(0)), act=act)
# Output layer with label conditioning from "Which Training Methods for GANs do actually Converge?"
with tf.variable_scope('Output'):
x = apply_bias_act(dense_layer(x, fmaps=max(labels_in.shape[1], 1)))
if labels_in.shape[1] > 0:
x = tf.reduce_sum(x * labels_in, axis=1, keepdims=True)
scores_out = x
# Output.
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
def instance_norm(x, epsilon=1e-8):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('InstanceNorm'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2,3], keepdims=True)
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon)
x = tf.cast(x, orig_dtype)
return x
def adjust_range(x):
assert len(x.shape) == 4
with tf.variable_scope('Adjust_range'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2, 3], keepdims=True)
x_max = tf.reduce_max(x, axis=(2, 3), keepdims=True)
x = x / (x_max + 1e-8)
x = tf.cast(x, orig_dtype)
return x
def spatial_att(x, clip_style):
"""
Spatial attention mask
:param x: [NCHW]
:param clip_style:
:return: None negative mask tensor [NCHW]
"""
fmaps = x.shape[1].value
if clip_style == 'ffhq':
x = tf.reduce_sum(tf.nn.relu(-x), axis=1, keepdims=True)
elif clip_style == 'cat':
x = tf.reduce_sum(tf.abs(x), axis=1, keepdims=True)
elif clip_style == 'church':
x = tf.reduce_max(-x, axis=1, keepdims=True)
else:
raise ValueError('Unsupported clip style %s' % clip_style)
x = (adjust_range(x) + 1.0) / 2.0
b = get_weight(shape=[x.shape[2].value, x.shape[3].value], weight_var='bias')
att = x + b
return tf.tile(att, [1, fmaps, 1, 1])
|
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Implements the REJECT_ADD_ROLE_OWNER message
usage: rbac.role.owner.reject.create()"""
import logging
from rbac.common import addresser
from rbac.common.proposal.proposal_reject import ProposalReject
LOGGER = logging.getLogger(__name__)
class RejectAddRoleOwner(ProposalReject):
"""Implements the REJECT_ADD_ROLE_OWNER message
usage: rbac.role.owner.reject.create()"""
def __init__(self):
super().__init__()
self._register()
@property
def message_action_type(self):
"""The action type performed by this message"""
return addresser.MessageActionType.REJECT
@property
def message_subaction_type(self):
"""The subsequent action performed or proposed by this message"""
return addresser.MessageActionType.ADD
@property
def message_object_type(self):
"""The object type this message acts upon"""
return addresser.ObjectType.ROLE
@property
def message_related_type(self):
"""the object type of the related object this message acts upon"""
return addresser.ObjectType.USER
@property
def message_relationship_type(self):
"""The relationship type this message acts upon"""
return addresser.RelationshipType.OWNER
def make_addresses(self, message, signer_user_id):
"""Makes the appropriate inputs & output addresses for the message"""
inputs, outputs = super().make_addresses(message, signer_user_id)
# should be owner not admin
signer_admin_address = addresser.role.admin.address(
message.object_id, signer_user_id
)
inputs.add(signer_admin_address)
signer_owner_address = addresser.role.owner.address(
message.object_id, signer_user_id
)
inputs.add(signer_owner_address)
proposal_address = self.address(
object_id=message.object_id, related_id=message.related_id
)
inputs.add(proposal_address)
outputs.add(proposal_address)
return inputs, outputs
def validate_state(self, context, message, payload, input_state, store):
"""Validates that:
1. the signer is an owner of the role"""
super().validate_state(
context=context,
message=message,
payload=payload,
input_state=input_state,
store=store,
)
# TODO: change to verify proposal assignment and hierarchy
# TODO: should be owners
# if not addresser.role.admin.exists_in_state_inputs(
# inputs=payload.inputs,
# input_state=input_state,
# object_id=message.object_id,
# related_id=payload.signer.user_id,
# ):
# raise ValueError(
# "Signer {} must be an admin of the role {}".format(
# payload.signer.user_id, message.object_id
# )
# )
|
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from c3nav.celery import app
logger = logging.getLogger('c3nav')
@app.task(bind=True, max_retries=3)
def send_changeset_proposed_notification(self, pk, author, title, description):
subject = '[c3nav] New Changeset by %s: %s' % (author, title)
for user in User.objects.filter(permissions__review_changesets=True):
if not user.email:
continue
text = (
('Hi %s!\n\n' % user.username) +
('A new Changeset has been proposed by %s:\n\n' % author) +
('---\n\n') +
(title+'\n\n'+description)
)
send_mail(subject, text, settings.MAIL_FROM, [user.email])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.