text
stringlengths 2
999k
|
|---|
from __future__ import absolute_import
from .data_prep import img_pad
|
def main(request, response):
token = request.GET.first("token")
if request.server.stash.remove(token) is not None:
return "1"
else:
return "0"
|
import torch
import torch.nn as nn
import spconv
from functools import partial
from .spconv_backbone import post_act_block
from ...utils import common_utils
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
return out
class UNetV2(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x.features = x_m.features + x.features
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x.features = features.view(n, out_channels, -1).sum(dim=2)
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['point_features'] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)
batch_dict['encoded_spconv_tensor'] = out
batch_dict['encoded_spconv_tensor_stride'] = 8
return batch_dict
|
from collections import OrderedDict
from typing import Dict, Generic, Mapping, TypeVar
CacheKey = TypeVar("CacheKey")
CacheValue = TypeVar("CacheValue")
class LRUCache(Generic[CacheKey, CacheValue], OrderedDict):
"""
A dictionary-like container that stores a given maximum items.
If an additional item is added when the LRUCache is full, the least
recently used key is discarded to make room for the new item.
"""
def __init__(self, cache_size: int) -> None:
self.cache_size = cache_size
super(LRUCache, self).__init__()
def __setitem__(self, key: CacheKey, value: CacheValue) -> None:
"""Store a new views, potentially discarding an old value."""
if key not in self:
if len(self) >= self.cache_size:
self.popitem(last=False)
OrderedDict.__setitem__(self, key, value)
def __getitem__(self: Dict[CacheKey, CacheValue], key: CacheKey) -> CacheValue:
"""Gets the item, but also makes it most recent."""
value: CacheValue = OrderedDict.__getitem__(self, key)
OrderedDict.__delitem__(self, key)
OrderedDict.__setitem__(self, key, value)
return value
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "hdfs_namenode", "__about__.py")) as f:
exec(f.read(), ABOUT)
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-hdfs_namenode',
version=ABOUT['__version__'],
description='The HDFS Namenode check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent hdfs_namenode check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.hdfs_namenode'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
tests_require=get_requirements('requirements-dev.txt'),
# Extra files to ship with the wheel package
include_package_data=True,
)
|
'''This example demonstrates the use of Convolution1D for text classification.
'''
from __future__ import print_function
import sys
sys.path.append('/Users/wangwei/anaconda2/envs/python3_keras/lib/python3.6/site-packages')
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras import backend as K
#os.chdir('/Users/wangwei/cuda_keras_projets/keras/examples/')
import six.moves.cPickle as pickle # for python 3
#import cPickle for python 2.7
import pandas as pd
import numpy as np
import jieba
# set parameters:
maxlen = 64 #11
batch_size = 5
embedding_dims = 300
filters = 50 # 100
kernel_size = 3
hidden_dims = 100
epochs = 10
def get_idx_from_sent(sent, word_idx_map, k=300):
"""
Transforms sentence into a list of indices.
"""
x = []
words = list(jieba.cut(sent, cut_all=False))
for word in words:
if word in word_idx_map:
x.append(word_idx_map[word])
return x
def make_idx_data_cv(revs, word_idx_map, cv, k=300):
"""
Transforms sentences into a 2-d matrix.
"""
train, test = [], []
train_y, test_y = [],[]
for rev in revs:
sent = get_idx_from_sent(rev['text'], word_idx_map, k)
if rev["split"]==cv:
test.append(sent)
test_y.append(rev["y"])
else:
train.append(sent)
train_y.append(rev["y"])
#train = np.array(train, dtype='int')
#test = np.array(test, dtype='int')
return [train, test, train_y, test_y]
if __name__=="__main__":
print('The script that is running is :', __file__)
print('Depending on the training datasets: \n maximum length of a sentence is :', maxlen)
######### Main code starts here ###########
print("loading data...")
x = pickle.load(open("mr_folder/mr.p","rb"), encoding='latin1')
revs, W, W2, word_idx_map, word_idx_map2, vocab = x[0], x[1], x[2], x[3], x[4],x[5]
print("data loaded!")
print("using: word2vec vectors")
tmp = pd.DataFrame(revs)
max_l = np.max(tmp["num_words"])
print("number of sentences: " , str(len(revs)))
print("vocab size: " , str(len(vocab)))
print("max sentence length: " + str(max_l))
max_features = len(vocab)#50
#### Make datasets
datasets = make_idx_data_cv(revs, word_idx_map2, 1, k=300)
x_train = datasets[0]
x_test = datasets[1]
y_train = datasets[2]
y_test = datasets[3]
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
############# modelling with CNN
import keras
num_classes = 9
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('lengh of y_train is :', y_train.shape[0])
print('Build model...')
K.clear_session()
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features+1,
embedding_dims,
weights=[W],
input_length=maxlen,
trainable=False))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
#model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
#model.add(Dense(1))
model.add(Activation('sigmoid'))
######################
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
# model.compile(loss=keras.losses.categorical_crossentropy,
# optimizer=keras.optimizers.Adadelta(),
# metrics=['accuracy'])
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# serialize model to JSON
model_json = model.to_json()
with open("mr_folder/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("mr_folder/model.h5")
print("Saved model to disk")
|
# ---------------------------------------------------------------------------
# Copyright 2017-2018 OMRON Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------------
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
import sys
import time
import p2def
from serial_connector import SerialConnector
from hvc_p2_api import HVCP2Api
from hvc_tracking_result import HVCTrackingResult
from grayscale_image import GrayscaleImage
###############################################################################
# User Config. Please edit here if you need.
###############################################################################
# Output image file name.
img_fname = 'registerd_img.jpg'
# Read timeout value in seconds for serial communication.
# If you use UART slow baudrate, please edit here.
timeout = 30
# Album file name.
album_fname = 'Album.dat'
# HVC Camera Angle setting
hvc_camera_angle = p2def.HVC_CAM_ANGLE_0
# HVC_CAM_ANGLE_90
# HVC_CAM_ANGLE_180
# HVC_CAM_ANGLE_270
# Threshold value settings
body_thresh = 500 # Threshold for Human body detection [1 to 1000]
hand_thresh = 500 # Threshold for Hand detection [1 to 1000]
face_thresh = 500 # Threshold for Face detection [1 to 1000]
recognition_thresh = 500 # Threshold for Face recognition [0 to 1000]
# Detection Size setings
min_body_size = 30 # Mininum Human body detection size [20 to 8192]
max_body_size = 8192 # Maximum Human body detection size [20 to 8192]
min_hand_size = 40 # Mininum Hand detection size [20 to 8192]
max_hand_size = 8192 # Maximum Hand detection size [20 to 8192]
min_face_size = 64 # Mininum Face detection size [20 to 8192]
max_face_size = 8192 # Maximum Face detection size [20 to 8192]
# Execute functions
exec_func = p2def.EX_FACE\
| p2def.EX_DIRECTION\
| p2def.EX_RECOGNITION
# Detection face angle settings
face_angle_yaw = p2def.HVC_FACE_ANGLE_YAW_30
face_angle_roll = p2def.HVC_FACE_ANGLE_ROLL_15
# HVC_FACE_ANGLE_ROLL_45
###############################################################################
def _parse_arg(argv):
if len(argv) == 3:
# Gets port infomation
portinfo = argv[1]
# Gets baudrate
baudrate = int(argv[2])
if baudrate not in p2def.AVAILABLE_BAUD:
print("Error: Invalid baudrate.")
sys.exit()
else:
print("Error: Invalid argument.")
sys.exit()
return (portinfo, baudrate)
def _check_connection(hvc_p2_api):
(res_code, hvc_type, major, minor, release, rev) = hvc_p2_api.get_version()
if res_code == 0 and hvc_type.startswith('B5T-007001'):
pass
else:
raise IOError("Error: connection failure.")
def _set_hvc_p2_parameters(hvc_p2_api):
# Sets camera angle
res_code = hvc_p2_api.set_camera_angle(hvc_camera_angle)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid camera angle.")
# Sets threshold
res_code = hvc_p2_api.set_threshold(body_thresh, hand_thresh,\
face_thresh, recognition_thresh)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid threshold.")
# Sets detection size
res_code = hvc_p2_api.set_detection_size(min_body_size, max_body_size,\
min_hand_size, max_hand_size,\
min_face_size, max_face_size)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid detection size.")
# Sets face angle
res_code = hvc_p2_api.set_face_angle(face_angle_yaw, face_angle_roll)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid face angle.")
def main():
# Parses arguments
(portinfo, baudrate) = _parse_arg(sys.argv)
connector = SerialConnector()
hvc_p2_api = HVCP2Api(connector, exec_func, p2def.USE_STB_OFF)
# The 1st connection
hvc_p2_api.connect(portinfo, p2def.DEFAULT_BAUD, 10) # 1st connection should be 9600 baud.
_check_connection(hvc_p2_api)
hvc_p2_api.set_uart_baudrate(baudrate) # Changing to the specified baud rate
hvc_p2_api.disconnect()
# The 2nd connection in specified baudrate
hvc_p2_api.connect(portinfo, baudrate, timeout)
_check_connection(hvc_p2_api)
try:
# Sets HVC-P2 parameters
_set_hvc_p2_parameters(hvc_p2_api)
img = GrayscaleImage()
# Main loop
while True:
str = "\n"\
+ "Please select the command.\n"\
+ " r : registration.\n"\
+ " g : get user data.\n"\
+ " s : save album.\n"\
+ " l : load album.\n"\
+ " d : delete all album data.\n"\
+ " x : exit.\n"\
+ " >>"
operation_str = raw_input(str)
if operation_str == 'x':
break
if operation_str == 'r':
while True:
str_uid = raw_input('user id [0-99] ')
if str_uid >= '0' and str_uid <= '99':
user_id = int(str_uid)
break
while True:
str_did = raw_input('data id [0-9] ')
if str_did >= '0' and str_did <= '9':
data_id = int(str_did)
break
raw_input('Press Enter key to register.')
res_code = hvc_p2_api.register_data(user_id, data_id, img)
if res_code < p2def.RESPONSE_CODE_NORMAL: # error
print("Error: Invalid register album.")
break
if res_code == p2def.RESPONSE_CODE_NO_FACE:
print("\nNumber of faces that can be registered is 0.")
if res_code == p2def.RESPONSE_CODE_PLURAL_FACE:
print("\nNumber of detected faces is 2 or more.")
if res_code == p2def.RESPONSE_CODE_NORMAL: # success
img.save(img_fname)
print(f"Success to register. user_id={str_uid} data_id={str_did})
if operation_str == 'g':
while True:
str_uid = raw_input('user id [0-99] ')
if str_uid >= '0' and str_uid <= '99':
user_id = int(str_uid)
break
print(f"uid[{user_id}]: "
res_code, data_list = hvc_p2_api.get_user_data(user_id)
if res_code < p2def.RESPONSE_CODE_NORMAL: # error
print("Error: Invalid register album.")
break
print(data_list)
if operation_str == 's':
# Saves album to flash ROM on B5T-007001.
res_code = hvc_p2_api.save_album_to_flash()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album to flash.")
break
# Saves album to the file.
res_code, save_album = hvc_p2_api.save_album()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album.")
break
with open(album_fname, "wb") as file:
file.write(save_album)
print("Success to save album.")
if operation_str == 'l':
# Loads album from file
if os.path.isfile(album_fname):
with open(album_fname, "rb") as file:
load_album = file.read()
res_code = hvc_p2_api.load_album(load_album)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid load album.")
break
print("Success to load album.")
if operation_str == 'd':
# Deletes all album data
res_code = hvc_p2_api.delete_all_data()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album to flash.")
break
# Saves album to flash ROM on B5T-007001.
res_code = hvc_p2_api.save_album_to_flash()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album to flash.")
break
print("Success to delete album.")
except KeyboardInterrupt:
time.sleep(1)
finally:
hvc_p2_api.set_uart_baudrate(p2def.DEFAULT_BAUD)
hvc_p2_api.disconnect()
if __name__ == '__main__':
main()
|
import asyncio
import asyncpg
import coc
import discord
import logging
import math
from collections import namedtuple
from datetime import datetime
from discord.ext import commands, tasks
from cogs.utils.db_objects import DatabaseMessage
from cogs.utils.formatters import CLYTable, get_render_type
from cogs.utils import checks
log = logging.getLogger(__name__)
MockPlayer = namedtuple('MockPlayer', 'clan name')
mock = MockPlayer('Unknown', 'Unknown')
class DonationBoard(commands.Cog):
"""Contains all DonationBoard Configurations.
"""
def __init__(self, bot):
self.bot = bot
self.clan_updates = []
self._to_be_deleted = set()
self.bot.coc.add_events(
self.on_clan_member_donation,
self.on_clan_member_received,
self.on_clan_member_trophies_change,
self.on_clan_member_join
)
self.bot.coc._clan_retry_interval = 60
self.bot.coc.start_updates('clan')
self._batch_lock = asyncio.Lock(loop=bot.loop)
self._data_batch = {}
self._clan_events = set()
self.bulk_insert_loop.add_exception_type(asyncpg.PostgresConnectionError)
self.bulk_insert_loop.start()
self.update_board_loops.add_exception_type(asyncpg.PostgresConnectionError, coc.ClashOfClansException)
self.update_board_loops.start()
def cog_unload(self):
self.bulk_insert_loop.cancel()
self.update_board_loops.cancel()
self.bot.coc.remove_events(
self.on_clan_member_donation,
self.on_clan_member_received,
self.on_clan_member_trophies_change,
self.on_clan_member_join
)
@tasks.loop(seconds=30.0)
async def bulk_insert_loop(self):
async with self._batch_lock:
await self.bulk_insert()
@tasks.loop(seconds=60.0)
async def update_board_loops(self):
async with self._batch_lock:
clan_tags = list(self._clan_events)
self._clan_events.clear()
query = """SELECT DISTINCT boards.channel_id
FROM boards
INNER JOIN clans
ON clans.guild_id = boards.guild_id
WHERE clans.clan_tag = ANY($1::TEXT[])
"""
fetch = await self.bot.pool.fetch(query, clan_tags)
for n in fetch:
try:
await self.update_board(n['channel_id'])
except:
pass
async def bulk_insert(self):
query = """UPDATE players SET donations = players.donations + x.donations,
received = players.received + x.received,
trophies = x.trophies
FROM(
SELECT x.player_tag, x.donations, x.received, x.trophies
FROM jsonb_to_recordset($1::jsonb)
AS x(player_tag TEXT,
donations INTEGER,
received INTEGER,
trophies INTEGER)
)
AS x
WHERE players.player_tag = x.player_tag
AND players.season_id=$2
"""
query2 = """UPDATE eventplayers SET donations = eventplayers.donations + x.donations,
received = eventplayers.received + x.received,
trophies = x.trophies
FROM(
SELECT x.player_tag, x.donations, x.received, x.trophies
FROM jsonb_to_recordset($1::jsonb)
AS x(player_tag TEXT,
donations INTEGER,
received INTEGER,
trophies INTEGER)
)
AS x
WHERE eventplayers.player_tag = x.player_tag
AND eventplayers.live = true
"""
if self._data_batch:
response = await self.bot.pool.execute(query, list(self._data_batch.values()),
await self.bot.seasonconfig.get_season_id())
log.debug(f'Registered donations/received to the database. Status Code {response}.')
response = await self.bot.pool.execute(query2, list(self._data_batch.values()))
log.debug(f'Registered donations/received to the events database. Status Code {response}.')
self._data_batch.clear()
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
if not isinstance(channel, discord.TextChannel):
return
query = "DELETE FROM messages WHERE channel_id = $1;"
query2 = """UPDATE boards
SET channel_id = NULL,
toggle = False
WHERE channel_id = $1;
"""
await self.bot.pool.execute(query, channel.id)
await self.bot.pool.execute(query2, channel.id)
self.bot.utils.board_config.invalidate(self.bot.utils, channel.id)
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
config = await self.bot.utils.board_config(payload.channel_id)
if not config:
return
if config.channel_id != payload.channel_id:
return
if payload.message_id in self._to_be_deleted:
self._to_be_deleted.discard(payload.message_id)
return
self.bot.utils.get_message.invalidate(self.bot.utils, payload.message_id)
message = await self.safe_delete(message_id=payload.message_id, delete_message=False)
if message:
await self.new_board_message(self.bot.get_channel(payload.channel_id), config.type)
@commands.Cog.listener()
async def on_raw_bulk_message_delete(self, payload):
config = await self.bot.utils.board_config(payload.channel_id)
if not config:
return
if config.channel_id != payload.channel_id:
return
for n in payload.message_ids:
if n in self._to_be_deleted:
self._to_be_deleted.discard(n)
continue
self.bot.utils.get_message.invalidate(self, n)
message = await self.safe_delete(message_id=n, delete_message=False)
if message:
await self.new_board_message(self.bot.get_channel(payload.channel_id), config.type)
async def on_clan_member_donation(self, old_donations, new_donations, player, clan):
log.debug(f'Received on_clan_member_donation event for player {player} of clan {clan}')
if old_donations > new_donations:
donations = new_donations
else:
donations = new_donations - old_donations
async with self._batch_lock:
try:
self._data_batch[player.tag]['donations'] = donations
except KeyError:
self._data_batch[player.tag] = {
'player_tag': player.tag,
'donations': donations,
'received': 0,
'trophies': player.trophies
}
self._clan_events.add(clan.tag)
async def on_clan_member_received(self, old_received, new_received, player, clan):
log.debug(f'Received on_clan_member_received event for player {player} of clan {clan}')
if old_received > new_received:
received = new_received
else:
received = new_received - old_received
async with self._batch_lock:
try:
self._data_batch[player.tag]['received'] = received
except KeyError:
self._data_batch[player.tag] = {
'player_tag': player.tag,
'donations': 0,
'received': received,
'trophies': player.trophies
}
self._clan_events.add(clan.tag)
async def on_clan_member_trophies_change(self, _, new_trophies, player, clan):
log.debug(f'Received on_clan_member_trophy_change event for player {player} of clan {clan}.')
async with self._batch_lock:
try:
self._data_batch[player.tag]['trophies'] = new_trophies
except KeyError:
self._data_batch[player.tag] = {
'player_tag': player.tag,
'donations': 0,
'received': 0,
'trophies': new_trophies
}
self._clan_events.add(clan.tag)
async def on_clan_member_join(self, member, clan):
player = await self.bot.coc.get_player(member.tag)
player_query = """INSERT INTO players (
player_tag,
donations,
received,
trophies,
start_trophies,
season_id,
start_friend_in_need,
start_sharing_is_caring,
start_attacks,
start_defenses,
start_best_trophies,
start_update
)
VALUES ($1,$2,$3,$4,$4,$5,$6,$7,$8,$9,$10,True)
ON CONFLICT (player_tag, season_id)
DO NOTHING
"""
response = await self.bot.pool.execute(
player_query,
player.tag,
player.donations,
player.received,
player.trophies,
await self.bot.seasonconfig.get_season_id(),
player.achievements_dict['Friend in Need'].value,
player.achievements_dict['Sharing is caring'].value,
player.attack_wins,
player.defense_wins,
player.best_trophies
)
log.debug(f'New member {member} joined clan {clan}. Performed a query to insert them into players. '
f'Status Code: {response}')
query = """SELECT events.id
FROM events
INNER JOIN clans
ON clans.guild_id = events.guild_id
WHERE clans.clan_tag = $1
AND events.start <= now()
AND events.finish >= now()
"""
fetch = await self.bot.pool.fetch(query, clan.tag)
if not fetch:
return
event_query = """INSERT INTO eventplayers (
player_tag,
trophies,
event_id,
start_friend_in_need,
start_sharing_is_caring,
start_attacks,
start_defenses,
start_trophies,
start_best_trophies,
start_update,
live
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, True, True)
ON CONFLICT (player_tag, event_id)
DO UPDATE
SET live=True
WHERE eventplayers.player_tag = $1
AND eventplayers.event_id = $2
"""
for n in fetch:
response = await self.bot.pool.execute(
event_query,
player.tag,
player.trophies,
n['id'],
player.achievements_dict['Friend in Need'].value,
player.achievements_dict['Sharing is caring'].value,
player.attack_wins,
player.defense_wins,
player.trophies,
player.best_trophies
)
log.debug(f'New member {member} joined clan {clan}. '
f'Performed a query to insert them into eventplayers. Status Code: {response}')
async def new_board_message(self, channel, board_type):
if not channel:
return
try:
new_msg = await channel.send('Placeholder')
except (discord.NotFound, discord.Forbidden):
return
query = "INSERT INTO messages (guild_id, message_id, channel_id) VALUES ($1, $2, $3)"
await self.bot.pool.execute(query, new_msg.guild.id, new_msg.id, new_msg.channel.id)
event_config = await self.bot.utils.event_config(channel.id)
if event_config:
await self.bot.background.remove_event_msg(event_config.id, channel, board_type)
await self.bot.background.new_event_message(event_config, channel.guild.id, channel.id, board_type)
return new_msg
async def safe_delete(self, message_id, delete_message=True):
query = "DELETE FROM messages WHERE message_id = $1 RETURNING id, guild_id, message_id, channel_id"
fetch = await self.bot.pool.fetchrow(query, message_id)
if not fetch:
return None
message = DatabaseMessage(bot=self.bot, record=fetch)
if not delete_message:
return message
self._to_be_deleted.add(message_id)
m = await message.get_message()
if not m:
return
await m.delete()
async def get_board_messages(self, channel_id, number_of_msg=None):
config = await self.bot.utils.board_config(channel_id)
if not (config.channel or config.toggle):
return
fetch = await config.messages()
messages = [await n.get_message() for n in fetch if await n.get_message()]
size_of = len(messages)
if not number_of_msg or size_of == number_of_msg:
return messages
if size_of > number_of_msg:
for n in messages[number_of_msg:]:
await self.safe_delete(n.id)
return messages[:number_of_msg]
if not config.channel:
return
for _ in range(number_of_msg - size_of):
m = await self.new_board_message(config.channel, config.type)
if not m:
return
messages.append(m)
return messages
async def get_top_players(self, players, board_type, sort_by, in_event, season_id=None):
season_id = season_id or await self.bot.seasonconfig.get_season_id()
if board_type == 'donation':
column_1 = 'donations'
column_2 = 'received'
sort_by = 'donations' if sort_by == 'donation' else sort_by
elif board_type == 'trophy':
column_1 = 'trophies'
column_2 = 'trophies - start_trophies'
sort_by = column_2 if sort_by == 'gain' else column_1
else:
return
# this should be ok since columns can only be a choice of 4 defined names
if in_event:
query = f"""SELECT player_tag, {column_1}, {column_2}
FROM eventplayers
WHERE player_tag=ANY($1::TEXT[])
AND live=true
ORDER BY {sort_by} DESC NULLS LAST
LIMIT 100;
"""
fetch = await self.bot.pool.fetch(query, [n.tag for n in players])
else:
query = f"""SELECT player_tag, {column_1}, {column_2}
FROM players
WHERE player_tag=ANY($1::TEXT[])
AND season_id=$2
ORDER BY {sort_by} DESC NULLS LAST
LIMIT 100;
"""
fetch = await self.bot.pool.fetch(query, [n.tag for n in players], season_id)
return fetch
async def update_board(self, channel_id):
config = await self.bot.utils.board_config(channel_id)
if not config:
return
if not config.toggle:
return
if not config.channel:
return
if config.in_event:
query = """SELECT DISTINCT clan_tag FROM clans WHERE guild_id=$1 AND in_event=$2"""
fetch = await self.bot.pool.fetch(query, config.guild_id, config.in_event)
else:
query = "SELECT DISTINCT clan_tag FROM clans WHERE guild_id=$1"
fetch = await self.bot.pool.fetch(query, config.guild_id)
clans = await self.bot.coc.get_clans((n[0] for n in fetch)).flatten()
players = []
for n in clans:
players.extend(p for p in n.itermembers)
try:
top_players = await self.get_top_players(players, config.type, config.sort_by, config.in_event)
except:
log.error(
f"{clans} channelid: {channel_id}, guildid: {config.guild_id},"
f" sort: {config.sort_by}, event: {config.in_event}, type: {config.type}"
)
return
players = {n.tag: n for n in players if n.tag in set(x['player_tag'] for x in top_players)}
message_count = math.ceil(len(top_players) / 20)
messages = await self.get_board_messages(channel_id, number_of_msg=message_count)
if not messages:
return
for i, v in enumerate(messages):
player_data = top_players[i*20:(i+1)*20]
table = CLYTable()
for x, y in enumerate(player_data):
index = i*20 + x
if config.render == 2:
table.add_row([index,
y[1],
players.get(y['player_tag'], mock).name])
else:
table.add_row([index,
y[1],
y[2],
players.get(y['player_tag'], mock).name])
render = get_render_type(config, table)
fmt = render()
e = discord.Embed(colour=self.get_colour(config.type, config.in_event),
description=fmt,
timestamp=datetime.utcnow()
)
e.set_author(name=f'Event in Progress!' if config.in_event
else config.title,
icon_url=config.icon_url or 'https://cdn.discordapp.com/'
'emojis/592028799768592405.png?v=1')
e.set_footer(text='Last Updated')
await v.edit(embed=e, content=None)
@staticmethod
def get_colour(board_type, in_event):
if board_type == 'donation':
if in_event:
return discord.Colour.gold()
return discord.Colour.blue()
if in_event:
return discord.Colour.purple()
return discord.Colour.green()
@commands.command(hidden=True)
@commands.is_owner()
async def forceboard(self, ctx, channel_id: int = None):
await self.update_board(channel_id or ctx.channel.id)
await ctx.confirm()
def setup(bot):
bot.add_cog(DonationBoard(bot))
|
# -*- coding: utf-8 -*-
""" S3 Notifications
@copyright: 2011-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
import json
import os
import string
import sys
from io import StringIO
from urllib.parse import urlencode
from urllib import parse as urlparse
from urllib import request as urllib2
from urllib.request import urlopen
from urllib.error import HTTPError
from uuid import uuid4
from gluon import current, TABLE, THEAD, TBODY, TR, TD, TH, XML
from .s3datetime import s3_decode_iso_datetime, s3_encode_iso_datetime, s3_utc
from .s3utils import s3_str, s3_truncate, s3_unicode
# =============================================================================
class S3Notifications(object):
""" Framework to send notifications about subscribed events """
# -------------------------------------------------------------------------
@classmethod
def check_subscriptions(cls):
"""
Scheduler entry point, creates notification tasks for all
active subscriptions which (may) have updates.
"""
_debug = current.log.debug
now = datetime.datetime.utcnow()
_debug("S3Notifications.check_subscriptions(now=%s)" % now)
subscriptions = cls._subscriptions(now)
if subscriptions:
run_async = current.s3task.run_async
for row in subscriptions:
# Create asynchronous notification task.
row.update_record(locked = True)
run_async("notify_notify", args=[row.id])
message = "%s notifications scheduled." % len(subscriptions)
else:
message = "No notifications to schedule."
_debug(message)
return message
# -------------------------------------------------------------------------
@classmethod
def notify(cls, resource_id):
"""
Asynchronous task to notify a subscriber about updates,
runs a POST?format=msg request against the subscribed
controller which extracts the data and renders and sends
the notification message (see send()).
@param resource_id: the pr_subscription_resource record ID
"""
_debug = current.log.debug
_debug("S3Notifications.notify(resource_id=%s)" % resource_id)
db = current.db
s3db = current.s3db
stable = s3db.pr_subscription
rtable = db.pr_subscription_resource
ftable = s3db.pr_filter
# Extract the subscription data
join = stable.on(rtable.subscription_id == stable.id)
left = ftable.on(ftable.id == stable.filter_id)
# @todo: should not need rtable.resource here
row = db(rtable.id == resource_id).select(stable.id,
stable.pe_id,
stable.frequency,
stable.notify_on,
stable.method,
stable.email_format,
stable.attachment,
rtable.id,
rtable.resource,
rtable.url,
rtable.last_check_time,
ftable.query,
join = join,
left = left,
).first()
if not row:
return True
s = getattr(row, "pr_subscription")
r = getattr(row, "pr_subscription_resource")
f = getattr(row, "pr_filter")
# Create a temporary token to authorize the lookup request
auth_token = str(uuid4())
# Store the auth_token in the subscription record
r.update_record(auth_token = auth_token)
db.commit()
# Construct the send-URL
public_url = current.deployment_settings.get_base_public_url()
lookup_url = "%s/%s/%s" % (public_url,
current.request.application,
r.url.lstrip("/"))
# Break up the URL into its components
purl = list(urlparse.urlparse(lookup_url))
# Subscription parameters
# Date (must ensure we pass to REST as tz-aware)
last_check_time = s3_encode_iso_datetime(r.last_check_time)
query = {"subscription": auth_token, "format": "msg"}
if "upd" in s.notify_on:
query["~.modified_on__ge"] = "%sZ" % last_check_time
else:
query["~.created_on__ge"] = "%sZ" % last_check_time
# Filters
if f.query:
from .s3filter import S3FilterString
resource = s3db.resource(r.resource)
fstring = S3FilterString(resource, f.query)
for k, v in fstring.get_vars.items():
if v is not None:
if k in query:
value = query[k]
if type(value) is list:
value.append(v)
else:
query[k] = [value, v]
else:
query[k] = v
query_nice = s3_unicode(fstring.represent())
else:
query_nice = None
# Add subscription parameters and filters to the URL query, and
# put the URL back together
query = urlencode(query)
if purl[4]:
query = "&".join((purl[4], query))
page_url = urlparse.urlunparse([purl[0], # scheme
purl[1], # netloc
purl[2], # path
purl[3], # params
query, # query
purl[5], # fragment
])
# Serialize data for send (avoid second lookup in send)
data = json.dumps({"pe_id": s.pe_id,
"notify_on": s.notify_on,
"method": s.method,
"email_format": s.email_format,
"attachment": s.attachment,
"resource": r.resource,
"last_check_time": last_check_time,
"filter_query": query_nice,
"page_url": lookup_url,
"item_url": None,
})
# Send the request
_debug("Requesting %s" % page_url)
req = urllib2.Request(page_url, data=data.encode("utf-8"))
req.add_header("Content-Type", "application/json")
success = False
try:
response = json.loads(urlopen(req).read())
message = response["message"]
if response["status"] == "success":
success = True
except HTTPError as e:
message = ("HTTP %s: %s" % (e.code, e.read()))
except:
exc_info = sys.exc_info()[:2]
message = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
_debug(message)
# Update time stamps and unlock, invalidate auth token
intervals = s3db.pr_subscription_check_intervals
interval = datetime.timedelta(minutes=intervals.get(s.frequency, 0))
if success:
last_check_time = datetime.datetime.utcnow()
next_check_time = last_check_time + interval
r.update_record(auth_token = None,
locked = False,
last_check_time = last_check_time,
next_check_time = next_check_time,
)
else:
r.update_record(auth_token = None,
locked = False,
)
db.commit()
# Done
return message
# -------------------------------------------------------------------------
@classmethod
def send(cls, r, resource):
"""
Method to retrieve updates for a subscription, render the
notification message and send it - responds to POST?format=msg
requests to the respective resource.
@param r: the S3Request
@param resource: the S3Resource
"""
_debug = current.log.debug
_debug("S3Notifications.send()")
json_message = current.xml.json_message
# Read subscription data
source = r.body
source.seek(0)
data = source.read()
subscription = json.loads(data)
#_debug("Notify PE #%s by %s on %s of %s since %s" % \
# (subscription["pe_id"],
# str(subscription["method"]),
# str(subscription["notify_on"]),
# subscription["resource"],
# subscription["last_check_time"],
# ))
# Check notification settings
notify_on = subscription["notify_on"]
methods = subscription["method"]
if not notify_on or not methods:
return json_message(message = "No notifications configured "
"for this subscription")
# Authorization (pe_id must not be None)
pe_id = subscription["pe_id"]
if not pe_id:
r.unauthorised()
# Fields to extract
fields = resource.list_fields(key="notify_fields")
if "created_on" not in fields:
fields.append("created_on")
# Extract the data
data = resource.select(fields,
represent = True,
raw_data = True)
rows = data["rows"]
# How many records do we have?
numrows = len(rows)
if not numrows:
return json_message(message = "No records found")
#_debug("%s rows:" % numrows)
# Prepare meta-data
get_config = resource.get_config
settings = current.deployment_settings
page_url = subscription["page_url"]
crud_strings = current.response.s3.crud_strings.get(resource.tablename)
if crud_strings:
resource_name = crud_strings.title_list
else:
resource_name = string.capwords(resource.name, "_")
last_check_time = s3_decode_iso_datetime(subscription["last_check_time"])
email_format = subscription["email_format"]
if not email_format:
email_format = settings.get_msg_notify_email_format()
filter_query = subscription.get("filter_query")
meta_data = {"systemname": settings.get_system_name(),
"systemname_short": settings.get_system_name_short(),
"resource": resource_name,
"page_url": page_url,
"notify_on": notify_on,
"last_check_time": last_check_time,
"filter_query": filter_query,
"total_rows": numrows,
}
# Render contents for the message template(s)
renderer = get_config("notify_renderer")
if not renderer:
renderer = settings.get_msg_notify_renderer()
if not renderer:
renderer = cls._render
contents = {}
if email_format == "html" and "EMAIL" in methods:
contents["html"] = renderer(resource, data, meta_data, "html")
contents["default"] = contents["html"]
if email_format != "html" or "EMAIL" not in methods or len(methods) > 1:
contents["text"] = renderer(resource, data, meta_data, "text")
contents["default"] = contents["text"]
# Subject line
subject = get_config("notify_subject")
if not subject:
subject = settings.get_msg_notify_subject()
if callable(subject):
subject = subject(resource, data, meta_data)
from string import Template
subject = Template(subject).safe_substitute(S = "%(systemname)s",
s = "%(systemname_short)s",
r = "%(resource)s")
subject = subject % meta_data
# Attachment
attachment = subscription.get("attachment", False)
document_ids = None
if attachment:
attachment_fnc = settings.get_msg_notify_attachment()
if attachment_fnc:
document_ids = attachment_fnc(resource, data, meta_data)
# **data for send_by_pe_id function in s3msg
send_data = {}
send_data_fnc = settings.get_msg_notify_send_data()
if callable(send_data_fnc):
send_data = send_data_fnc(resource, data, meta_data)
# Helper function to find message templates from a priority list
join = lambda *f: os.path.join(current.request.folder, *f)
def get_msg_template(path, filenames):
for fn in filenames:
filepath = join(path, fn)
if os.path.exists(filepath):
try:
return open(filepath, "rb")
except:
pass
return None
# Render and send the message(s)
templates = settings.get_template()
if templates != "default" and not isinstance(templates, (tuple, list)):
templates = (templates,)
prefix = resource.get_config("notify_template", "notify")
send = current.msg.send_by_pe_id
success = False
errors = []
for method in methods:
error = None
# Get the message template
msg_template = None
filenames = ["%s_%s.html" % (prefix, method.lower())]
if method == "EMAIL" and email_format:
filenames.insert(0, "%s_email_%s.html" % (prefix, email_format))
if templates != "default":
for template in templates[::-1]:
path = join("modules", "templates", template, "views", "msg")
msg_template = get_msg_template(path, filenames)
if msg_template is not None:
break
if msg_template is None:
path = join("views", "msg")
msg_template = get_msg_template(path, filenames)
if msg_template is None:
msg_template = StringIO(s3_str(current.T("New updates are available.")))
# Select contents format
if method == "EMAIL" and email_format == "html":
output = contents["html"]
else:
output = contents["text"]
# Render the message
try:
message = current.response.render(msg_template, output)
except:
exc_info = sys.exc_info()[:2]
error = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
errors.append(error)
continue
finally:
if hasattr(msg_template, "close"):
msg_template.close()
if not message:
continue
# Send the message
#_debug("Sending message per %s" % method)
#_debug(message)
try:
sent = send(pe_id,
# RFC 2822
subject = s3_truncate(subject, 78),
message = message,
contact_method = method,
system_generated = True,
document_ids = document_ids,
**send_data
)
except:
exc_info = sys.exc_info()[:2]
error = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
sent = False
if sent:
# Successful if at least one notification went out
success = True
else:
if not error:
error = current.session.error
if isinstance(error, list):
error = "/".join(error)
if error:
errors.append(error)
# Done
if errors:
message = ", ".join(errors)
else:
message = "Success"
return json_message(success = success,
statuscode = 200 if success else 403,
message = message)
# -------------------------------------------------------------------------
@classmethod
def _subscriptions(cls, now):
"""
Helper method to find all subscriptions which need to be
notified now.
@param now: current datetime (UTC)
@return: joined Rows pr_subscription/pr_subscription_resource,
or None if no due subscriptions could be found
@todo: take notify_on into account when checking
"""
db = current.db
s3db = current.s3db
stable = s3db.pr_subscription
rtable = db.pr_subscription_resource
# Find all resources with due subscriptions
next_check = rtable.next_check_time
locked_deleted = (rtable.locked != True) & \
(rtable.deleted == False)
query = ((next_check == None) |
(next_check <= now)) & \
locked_deleted
tname = rtable.resource
last_check = rtable.last_check_time
mtime = last_check.min()
rows = db(query).select(tname,
mtime,
groupby = tname,
)
if not rows:
return None
# Select those which have updates
resources = set()
radd = resources.add
for row in rows:
tablename = row[tname]
table = s3db.table(tablename)
if not table or not "modified_on" in table.fields:
# Can't notify updates in resources without modified_on
continue
modified_on = table.modified_on
msince = row[mtime]
if msince is None:
query = (table.id > 0)
else:
query = (modified_on >= msince)
update = db(query).select(modified_on,
orderby = ~(modified_on),
limitby = (0, 1)
).first()
if update:
radd((tablename, update.modified_on))
if not resources:
return None
# Get all active subscriptions to these resources which
# may need to be notified now:
join = rtable.on((rtable.subscription_id == stable.id) & \
locked_deleted)
query = None
for rname, modified_on in resources:
q = (tname == rname) & \
((last_check == None) |
(last_check <= modified_on))
if query is None:
query = q
else:
query |= q
query = (stable.frequency != "never") & \
(stable.deleted == False) & \
((next_check == None) | \
(next_check <= now)) & \
query
return db(query).select(rtable.id,
join = join,
)
# -------------------------------------------------------------------------
@classmethod
def _render(cls, resource, data, meta_data, format=None):
"""
Method to pre-render the contents for the message template
@param resource: the S3Resource
@param data: the data returned from S3Resource.select
@param meta_data: the meta data for the notification
@param format: the contents format ("text" or "html")
"""
created_on_selector = resource.prefix_selector("created_on")
created_on_colname = None
notify_on = meta_data["notify_on"]
last_check_time = meta_data["last_check_time"]
rows = data["rows"]
rfields = data["rfields"]
output = {}
new, upd = [], []
if format == "html":
# Pre-formatted HTML
colnames = []
new_headers = TR()
mod_headers = TR()
for rfield in rfields:
if rfield.selector == created_on_selector:
created_on_colname = rfield.colname
elif rfield.ftype != "id":
colnames.append(rfield.colname)
label = rfield.label
new_headers.append(TH(label))
mod_headers.append(TH(label))
for row in rows:
append_record = upd.append
if created_on_colname:
try:
created_on = row["_row"][created_on_colname]
except (KeyError, AttributeError):
pass
else:
if s3_utc(created_on) >= last_check_time:
append_record = new.append
tr = TR([TD(XML(row[colname])) for colname in colnames])
append_record(tr)
if "new" in notify_on and len(new):
output["new"] = len(new)
output["new_records"] = TABLE(THEAD(new_headers), TBODY(new))
else:
output["new"] = None
if "upd" in notify_on and len(upd):
output["upd"] = len(upd)
output["upd_records"] = TABLE(THEAD(new_headers), TBODY(upd))
else:
output["upd"] = None
else:
# Standard text format
labels = []
append = labels.append
for rfield in rfields:
if rfield.selector == created_on_selector:
created_on_colname = rfield.colname
elif rfield.ftype != "id":
append((rfield.colname, rfield.label))
for row in rows:
append_record = upd.append
if created_on_colname:
try:
created_on = row["_row"][created_on_colname]
except (KeyError, AttributeError):
pass
else:
if s3_utc(created_on) >= last_check_time:
append_record = new.append
record = []
append_column = record.append
for colname, label in labels:
append_column((label, row[colname]))
append_record(record)
if "new" in notify_on and len(new):
output["new"] = len(new)
output["new_records"] = new
else:
output["new"] = None
if "upd" in notify_on and len(upd):
output["upd"] = len(upd)
output["upd_records"] = upd
else:
output["upd"] = None
output.update(meta_data)
return output
# END =========================================================================
|
import struct
import unittest
from typing import List
import pyparcel
DATA: List[int] = [
-1 << 31,
-1000,
-57,
-26,
-20,
-5,
-2,
-1,
0,
1,
2,
5,
20,
57,
1000,
(1 << 31) - 1,
]
class MyTestCase(unittest.TestCase):
def test_pack(self):
for i in DATA:
self.assertEqual(pyparcel.pack(i), struct.pack("i", i))
def test_pack_unpack(self):
for i in DATA:
self.assertEqual(i, pyparcel.unpack(pyparcel.pack(i), int()))
if __name__ == "__main__":
unittest.main()
|
"""
AdamP
Copyright (c) 2020-present NAVER Corp.
MIT license
"""
import torch
import torch.nn as nn
from torch.optim.optimizer import Optimizer, required
import math
class SGDP(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,
nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)
super(SGDP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), -1)
def _layer_view(self, x):
return x.view(1, -1)
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['momentum'] = torch.zeros_like(p.data)
# SGD
buf = state['momentum']
buf.mul_(momentum).add_(1 - dampening, grad)
if nesterov:
d_p = grad + momentum * buf
else:
d_p = buf
# Projection
wd_ratio = 1
if len(p.shape) > 1:
d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])
# Weight decay
if weight_decay != 0:
p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))
# Step
p.data.add_(-group['lr'], d_p)
return loss
|
from flask import Flask, jsonify, send_file, url_for
import data_functions
from datetime import datetime
app = Flask(__name__) #__name__ is a special variable in python that creates an instance of the web app
@app.route("/", methods =['GET'])
def hello():
return ("Hello World")
@app.route("/hello")
def helloo():
return "Hello Not World"
@app.route("/total_loss/<first_name>/<last_name>", methods = ['GET'])
def total_loss(first_name, last_name):
return str(data_functions.total_loss(first_name, last_name))
@app.route("/total_weights_graph/<first_name>/<last_name>", methods = ['GET'])
def total_weights(first_name, last_name):
data_functions.get_total_weights(first_name, last_name)
filename = '/Users/Pranav/PycharmProjects/WeightTracker'
return "http://127.0.0.1:5000" + url_for('static', filename="total_weights_graph.png")
@app.route("/weekly_weights_graph/<first_name>/<last_name>", methods = ['GET'])
def week_weights(first_name, last_name):
data_functions.get_week_weights(first_name, last_name)
return "http://127.0.0.1:5000" + url_for('static', filename="week_weights_graph.png")
@app.route("/add_weight/<first_name>/<last_name>/<weight>", methods = ['GET','POST'])
def add_weight(first_name, last_name, weight):
data_functions.add_weight(first_name, last_name, int(weight), datetime.now().timestamp())
return
if __name__ == '__main__':
app.run(debug=True)
|
class ConfigurationError(Exception):
"""
The exception raised by any object when it's misconfigured
(e.g. missing properties, invalid properties, unknown properties).
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return repr(self.message)
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from setuptools import find_packages, setup
__version__ = '4.3.1'
requirements = [
"neo4j-driver>=1.7.2,<4.0",
"pytz>=2018.4",
"statsd>=3.2.1",
"retrying>=1.3.3",
"requests>=2.23.0,<3.0",
"elasticsearch>=6.2.0,<7.0",
"pyhocon>=0.3.42",
"unidecode",
"Jinja2>=2.10.0,<2.12",
"pandas>=0.21.0,<1.2.0",
"amundsen-rds>=0.0.4"
]
kafka = ['confluent-kafka==1.0.0']
cassandra = ['cassandra-driver==3.20.1']
glue = ['boto3==1.10.1']
snowflake = [
'snowflake-connector-python',
'snowflake-sqlalchemy'
]
athena = ['PyAthena[SQLAlchemy]>=1.0.0, <2.0.0']
# Python API client for google
# License: Apache Software License
# Upstream url: https://github.com/googleapis/google-api-python-client
bigquery = [
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'google-auth>=1.0.0, <2.0.0dev'
]
jsonpath = ['jsonpath_rw==1.4.0']
db2 = [
'ibm_db==3.0.1',
'ibm-db-sa-py3==0.3.1-1'
]
dremio = [
'pyodbc==4.0.30'
]
druid = [
'pydruid'
]
spark = [
'pyspark == 3.0.1'
]
neptune = [
'amundsen-gremlin>=0.0.9',
'Flask==1.0.2',
'gremlinpython==3.4.3',
'requests-aws4auth==0.9',
'typing-extensions==3.7.4',
'overrides==2.5',
'boto3==1.10.1'
]
feast = [
'feast==0.8.0'
]
atlas = [
'pyatlasclient==1.1.2'
]
rds = [
'sqlalchemy>=1.3.6,<1.4',
'mysqlclient>=1.3.6,<3'
]
all_deps = requirements + kafka + cassandra + glue + snowflake + athena + \
bigquery + jsonpath + db2 + dremio + druid + spark + feast + neptune + rds
setup(
name='amundsen-databuilder',
version=__version__,
description='Amundsen Data builder',
url='https://www.github.com/amundsen-io/amundsendatabuilder',
maintainer='Amundsen TSC',
maintainer_email='amundsen-tsc@lists.lfai.foundation',
packages=find_packages(exclude=['tests*']),
dependency_links=[],
install_requires=requirements,
python_requires='>=3.6',
extras_require={
'all': all_deps,
'kafka': kafka, # To use with Kafka source extractor
'cassandra': cassandra,
'glue': glue,
'snowflake': snowflake,
'athena': athena,
'bigquery': bigquery,
'jsonpath': jsonpath,
'db2': db2,
'dremio': dremio,
'druid': druid,
'neptune': neptune,
'delta': spark,
'feast': feast,
'atlas': atlas,
'rds': rds
},
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core Fast Attention Module for Flax.
Implementation of the approximate fast softmax and generalized
attention mechanism leveraging structured random feature maps [RFM] techniques
and low rank decomposition of the attention matrix.
"""
# pylint: disable=invalid-name, missing-function-docstring, line-too-long
import abc
from collections.abc import Iterable # pylint: disable=g-importing-member
import functools
from absl import logging
import gin
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as onp
# Nonlinear mappings encoding different attention kernels.
gin.external_configurable(jnp.cos, 'jcos')
gin.external_configurable(jnp.sin, 'jsin')
gin.external_configurable(jnp.tanh, 'jtanh')
gin.external_configurable(jax.nn.sigmoid, 'jsigmoid')
gin.external_configurable(
lambda x: jax.nn.gelu(x, approximate=False), 'jgelu'
) # Needs to be exact, although might be slower. See https://github.com/google/jax/issues/4428.
gin.external_configurable(lambda x: x * x * (x > 0.0), 'jrequ')
gin.external_configurable(jnp.exp, 'jexp')
gin.external_configurable(lambda x: x, 'jidentity')
gin.external_configurable(
lambda x: (jnp.exp(x)) * (x <= 0.0) + (x + 1.0) * (x > 0.0), 'jshiftedelu'
) # Nonlinearity used in "Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention" (https://arxiv.org/abs/2006.16236).
def nonnegative_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True,
eps=0.0001):
"""Constructs nonnegative kernel features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
is_query: predicate indicating whether input data corresponds to queries or
keys
normalize_data: predicate indicating whether data should be normalized,
eps: numerical stabilizer.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where
# w_norm = w * data_normalizer for w in {q,k}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
last_dims_t = (len(data_dash.shape) - 1,)
if is_query:
data_dash = ratio * (
jnp.exp(data_dash - diag_data -
jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps)
else:
data_dash = ratio * (
jnp.exp(data_dash - diag_data - jnp.max(
data_dash, axis=last_dims_t + attention_dims_t, keepdims=True)) +
eps)
return data_dash
def sincos_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
normalize_data=True):
"""Constructs kernel sin-cos features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) *
# exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_dash_cos = ratio * jnp.cos(data_dash)
data_dash_sin = ratio * jnp.sin(data_dash)
data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1)
# Constructing D_data and data^{'}
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
# Additional renormalization for numerical stability
data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True)
diag_data -= data_renormalizer
diag_data = jnp.exp(diag_data)
data_prime = data_dash * diag_data
return data_prime
def generalized_kernel_feature_creator(data, projection_matrix, batch_dims_t,
precision, kernel_fn, kernel_epsilon,
normalize_data):
"""Constructs kernel features for fast generalized attention.
Args:
data: input for which features are computes
projection_matrix: matrix used to compute features
batch_dims_t: tuple of batch dimensions
precision: precision parameter
kernel_fn: kernel function used
kernel_epsilon: additive positive term added to every feature for numerical
stability
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast generalized attention.
"""
if normalize_data:
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
else:
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime
@gin.configurable
def make_fast_softmax_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.000001,
nb_features=256,
ortho_features=True,
ortho_scaling=0.0,
redraw_features=True,
unidirectional=False,
nonnegative_features=True,
lax_scan_unroll=1):
"""Construct a fast softmax attention method."""
logging.info(
'Fast softmax attention: %s features and orthogonal=%s, renormalize=%s',
nb_features, ortho_features, renormalize_attention)
if ortho_features:
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix,
nb_features,
qkv_dim,
scaling=ortho_scaling)
else:
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
if nonnegative_features:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
return nonnegative_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision,
is_query, normalize_data, numerical_stabilizer)
else:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
del is_query
return sincos_softmax_kernel_feature_creator(data, projection_matrix,
attention_dims_t,
batch_dims_t, precision,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
@gin.configurable
def make_fast_generalized_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.0,
nb_features=256,
features_type='deterministic',
kernel_fn=jax.nn.relu,
kernel_epsilon=0.001,
redraw_features=False,
unidirectional=False,
lax_scan_unroll=1):
"""Construct a fast generalized attention menthod."""
logging.info('Fast generalized attention.: %s features and renormalize=%s',
nb_features, renormalize_attention)
if features_type == 'ortho':
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False)
elif features_type == 'iid':
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
elif features_type == 'deterministic':
matrix_creator = None
else:
raise ValueError('Unknown feature value type')
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=False):
del attention_dims_t
del is_query
return generalized_kernel_feature_creator(data, projection_matrix,
batch_dims_t, precision,
kernel_fn, kernel_epsilon,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
class RandomMatrix(object):
r"""Abstract class providing a method for constructing 2D random arrays.
Class is responsible for constructing 2D random arrays.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_2d_array(self):
raise NotImplementedError('Abstract method')
class GaussianUnstructuredRandomMatrix(RandomMatrix):
def __init__(self, nb_rows, nb_columns, key):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
def get_2d_array(self):
return random.normal(self.key, (self.nb_rows, self.nb_columns))
class GaussianOrthogonalRandomMatrix(RandomMatrix):
r"""Class providing a method to create Gaussian orthogonal matrix.
Class is responsible for constructing 2D Gaussian orthogonal arrays.
"""
def __init__(self, nb_rows, nb_columns, key, scaling=0):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
self.scaling = scaling
def get_2d_array(self):
nb_full_blocks = int(self.nb_rows / self.nb_columns)
block_list = []
rng = self.key
for _ in range(nb_full_blocks):
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q)
remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns
if remaining_rows > 0:
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q[0:remaining_rows])
final_matrix = jnp.vstack(block_list)
if self.scaling == 0:
multiplier = jnp.linalg.norm(
random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1)
elif self.scaling == 1:
multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows))
else:
raise ValueError('Scaling must be one of {0, 1}. Was %s' % self._scaling)
return jnp.matmul(jnp.diag(multiplier), final_matrix)
class FastAttention(object):
r"""Abstract class providing a method for fast attention.
Class is responsible for providing a method <dot_product_attention> for fast
approximate attention.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying fast approximate dot-product
attention. It calculates the attention weights given query and key and
combines the values using the attention weights. This function supports
multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of [batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels].
key: keys for calculating attention with shape of [batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels].
value: values to be used in attention with shape of [batch_size, dim1,
dim2,..., dimN, num_heads, value_channels].
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout.
dropout_rate: dropout rate.
deterministic: bool, deterministic or not (to apply dropout).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels].
"""
raise NotImplementedError('Abstract method')
def _numerator(z_slice_shape, precision, unroll=1):
def fwd(qs, ks, vs):
def body(p, qkv):
(q, k, v) = qkv
p += jnp.einsum('...m,...d->...md', k, v, precision=precision)
X_slice = jnp.einsum('...m,...md->...d', q, p, precision=precision)
return p, X_slice
init_value = jnp.zeros(z_slice_shape)
p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll)
return W, (p, qs, ks, vs)
def bwd(pqkv, W_ct):
def body(carry, qkv_xct):
p, p_ct = carry
q, k, v, x_ct = qkv_xct
q_ct = jnp.einsum('...d,...md->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...d,...m->...md', x_ct, q, precision=precision)
k_ct = jnp.einsum('...md,...d->...m', p_ct, v, precision=precision)
v_ct = jnp.einsum('...md,...m->...d', p_ct, k, precision=precision)
p -= jnp.einsum('...m,...d->...md', k, v, precision=precision)
return (p, p_ct), (q_ct, k_ct, v_ct)
p, qs, ks, vs = pqkv
_, (qs_ct, ks_ct, vs_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct),
reverse=True,
unroll=unroll)
return qs_ct, ks_ct, vs_ct
@jax.custom_vjp
def _numerator_impl(qs, ks, vs):
W, _ = fwd(qs, ks, vs)
return W
_numerator_impl.defvjp(fwd, bwd)
return _numerator_impl
def _denominator(t_slice_shape, precision, unroll=1):
def fwd(qs, ks):
def body(p, qk):
q, k = qk
p += k
x = jnp.einsum('...m,...m->...', q, p, precision=precision)
return p, x
p = jnp.zeros(t_slice_shape)
p, R = lax.scan(body, p, (qs, ks), unroll=unroll)
return R, (qs, ks, p)
def bwd(qkp, R_ct):
def body(carry, qkx):
p, p_ct = carry
q, k, x_ct = qkx
q_ct = jnp.einsum('...,...m->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...,...m->...m', x_ct, q, precision=precision)
k_ct = p_ct
p -= k
return (p, p_ct), (q_ct, k_ct)
qs, ks, p = qkp
_, (qs_ct, ks_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, R_ct),
reverse=True,
unroll=unroll)
return (qs_ct, ks_ct)
@jax.custom_vjp
def _denominator_impl(qs, ks):
R, _ = fwd(qs, ks)
return R
_denominator_impl.defvjp(fwd, bwd)
return _denominator_impl
class FastAttentionviaLowRankDecomposition(FastAttention):
r"""Class providing a method for fast attention via low rank decomposition.
Class is responsible for providing a method <dot_product_attention> for fast
dot-product attention with the use of low rank decomposition (e.g. with
random feature maps).
"""
def __init__(self,
matrix_creator,
kernel_feature_creator,
renormalize_attention,
numerical_stabilizer,
redraw_features,
unidirectional,
lax_scan_unroll=1): # For optimal GPU performance, set to 16.
rng = random.PRNGKey(0)
self.matrix_creator = matrix_creator
self.projection_matrix = self.draw_weights(rng)
self.kernel_feature_creator = kernel_feature_creator
self.renormalize_attention = renormalize_attention
self.numerical_stabilizer = numerical_stabilizer
self.redraw_features = redraw_features
self.unidirectional = unidirectional
self.lax_scan_unroll = lax_scan_unroll
def draw_weights(self, key):
if self.matrix_creator is None:
return None
matrixrng, _ = random.split(key)
projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array()
return projection_matrix
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
n = key.ndim
# Constructing projection tensor.
if self.redraw_features:
# TODO(kchoro): Get rid of the constant below.
query_seed = lax.convert_element_type(
jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32)
rng = random.PRNGKey(query_seed)
self.projection_matrix = self.draw_weights(rng)
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(onp.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
k_extra_perm = axis + batch_dims + (n - 1,)
key_extra = key.transpose(k_extra_perm)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
v_perm = batch_dims + axis + (n - 1,)
value = value.transpose(v_perm)
batch_dims_t = tuple(range(len(batch_dims)))
attention_dims_t = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
# Constructing tensors Q^{'} and K^{'}.
query_prime = self.kernel_feature_creator(query, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, True)
key_prime = self.kernel_feature_creator(key, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, False)
if self.unidirectional:
index = attention_dims_t[0]
z_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],) + (value.shape[-1],)
numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll)
W = numerator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0))
# Constructing W = (Q^{'}(K^{'})^{T})_{masked}V
W = jnp.moveaxis(W, 0, index)
if not self.renormalize_attention:
# Unidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Unidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
index = attention_dims_t[0]
t_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],)
denominator_fn = _denominator(t_slice_shape, precision,
self.lax_scan_unroll)
R = denominator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0))
R = jnp.moveaxis(R, 0, index)
else:
contract_query = tuple(
range(len(batch_dims) + len(axis),
len(batch_dims) + len(axis) + 1))
contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1))
# Constructing Z = (K^{'})^{T}V
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
Z = lax.dot_general(
key_prime,
value,
((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)),
precision=precision)
# Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V
# q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m)
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
W = lax.dot_general(
query_prime,
Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)),
precision=precision)
if not self.renormalize_attention:
# Bidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Bidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
contract_key = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
contract_thick_all_ones = tuple(
range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim))
# Construct T = (K^{'})^{T} 1_L
# k (bs, <non-attention dims>, num_heads, <attention dims>, channels)
T = lax.dot_general(
key_prime,
thick_all_ones, ((contract_key, contract_thick_all_ones),
(batch_dims_t, batch_dims_t)),
precision=precision)
# Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L
# q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m)
# T (bs, <non-attention dims>, num_heads, channels_m)
R = lax.dot_general(
query_prime,
T, (((query_prime.ndim - 1,), (T.ndim - 1,)),
(batch_dims_t, range(0,
len(T.shape) - 1))),
precision=precision)
R = R + 2 * self.numerical_stabilizer * (
jnp.abs(R) <= self.numerical_stabilizer)
R = jnp.reciprocal(R)
R = jnp.expand_dims(R, len(R.shape))
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
# R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel)
result = W * R
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
result = result.transpose(perm_inv)
return result
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core import MarsRequestHandler
class ExtraTestHandler(MarsRequestHandler):
def get(self):
self.write('Test')
web_handlers = {
'/api/extra_test': ExtraTestHandler
}
|
load(
"@rules_mono//dotnet/private:providers.bzl",
"DotnetLibrary",
)
def _make_runner_arglist(dotnet, source, output):
args = dotnet.actions.args()
args.add("/useSourcePath")
if type(source) == "Target":
args.add_all(source.files)
else:
args.add(source)
args.add(output)
return args
def emit_resx_net(
dotnet,
name = "",
src = None,
identifier = None,
out = None,
customresgen = None):
if name == "" and out == None:
fail("either name or out must be set")
if not out:
result = dotnet.declare_file(dotnet, path = name + ".resources")
else:
result = dotnet.declare_file(dotnet, path = out)
args = _make_runner_arglist(dotnet, src, result)
inputs = src.files if type(src) == "Target" else [src]
dotnet.actions.run(
inputs = inputs,
outputs = [result],
executable = dotnet.resgen,
arguments = [args],
mnemonic = "NetResxCompile",
progress_message = (
"Compiling resoources" + dotnet.label.package + ":" + dotnet.label.name
),
)
return dotnet.new_resource(
dotnet = dotnet,
name = name,
result = result,
identifier = identifier,
)
|
import pathgraph
import robotsearch
import unittest
class TestGraphMethods(unittest.TestCase):
def test_create_undirected_graph(self):
self.assertTrue(isinstance(pathgraph.graph_by_type("undirected"), pathgraph.UndirectedGraph))
def test_create_directed_graph(self):
self.assertTrue(isinstance(pathgraph.graph_by_type("directed"), pathgraph.DirectedGraph))
def test_add_duplicate_edge_undirected(self):
graph = pathgraph.graph_by_type("undirected")
destination = pathgraph.DestinationNode("B", 1)
self.assertTrue(graph.add_edge(fromKey="A", destination=destination))
self.assertFalse(graph.add_edge(fromKey="A", destination=destination))
def test_add_duplicate_edge_directed(self):
graph=pathgraph.graph_by_type("directed")
destination = pathgraph.DestinationNode("B", 1)
self.assertTrue(graph.add_edge(fromKey="A", destination=destination))
self.assertFalse(graph.add_edge(fromKey="A", destination=destination))
def main():
unittest.main()
if __name__ == "__main__":
main()
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from tempfile import mkdtemp, mkstemp
from os.path import exists, isdir, join
from os import remove, close
from shutil import rmtree
from json import dumps
from skbio.stats.distance import randdm
from skbio import OrdinationResults
from qiita_client import ArtifactInfo
from qiita_client.testing import PluginTestCase
import pandas as pd
import numpy as np
from qtp_diversity import plugin
from qtp_diversity.validate import (
_validate_distance_matrix, _validate_ordination_results,
_validate_alpha_vector, _validate_feature_data_taxonomy, validate)
class ValidateTests(PluginTestCase):
def setUp(self):
self.out_dir = mkdtemp()
self._clean_up_files = [self.out_dir]
self.metadata = {
'1.SKM4.640180': {'col': "doesn't really matters"},
'1.SKB8.640193': {'col': "doesn't really matters"},
'1.SKD8.640184': {'col': "doesn't really matters"},
'1.SKM9.640192': {'col': "doesn't really matters"},
'1.SKB7.640196': {'col': "doesn't really matters"}}
plugin('https://localhost:8383', 'register', 'ignored')
def tearDown(self):
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def _create_distance_matrix(self, sample_ids):
dm = randdm(len(sample_ids), sample_ids)
fd, fp = mkstemp(suffix='.txt', dir=self.out_dir)
close(fd)
dm.write(fp)
return fp
def _create_ordination_results(self, sample_ids):
eigvals = [0.51236726, 0.30071909, 0.26791207, 0.20898868]
proportion_explained = [0.2675738328, 0.157044696, 0.1399118638,
0.1091402725]
axis_labels = ['PC1', 'PC2', 'PC3', 'PC4']
samples = [[-2.584, 1.739, 3.828, -1.944],
[-2.710, -1.859, -8.648, 1.180],
[2.350, 9.625, -3.457, -3.208],
[2.614, -1.114, 1.476, 2.908],
[2.850, -1.925, 6.232, 1.381]]
ord_res = OrdinationResults(
short_method_name='PCoA',
long_method_name='Principal Coordinate Analysis',
eigvals=pd.Series(eigvals, index=axis_labels),
samples=pd.DataFrame(np.asarray(samples), index=sample_ids,
columns=axis_labels),
proportion_explained=pd.Series(proportion_explained,
index=axis_labels))
fd, fp = mkstemp(suffix='.txt', dir=self.out_dir)
close(fd)
ord_res.write(fp)
return fp
def _create_alpha_vector(self, sample_ids):
fd, fp = mkstemp(suffix='.txt', dir=self.out_dir)
close(fd)
with open(fp, 'w') as f:
f.write("\tobserved_otus\n")
for s_id in sample_ids:
f.write("%s\t%d\n" % (s_id, np.random.randint(1, 200)))
return fp
def _create_job(self, a_type, files, analysis):
parameters = {'template': None,
'files': dumps(files),
'artifact_type': a_type,
'analysis': analysis}
data = {'command': dumps(['Diversity types', '0.1.1', 'Validate']),
'parameters': dumps(parameters),
'status': 'running'}
job_id = self.qclient.post(
'/apitest/processing_job/', data=data)['job']
return job_id, parameters
def test_validate_distance_matrix(self):
# Create a distance matrix
sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',
'1.SKM9.640192', '1.SKB7.640196']
dm_fp = self._create_distance_matrix(sample_ids)
# Test success
obs_success, obs_ainfo, obs_error = _validate_distance_matrix(
{'plain_text': [dm_fp]}, self.metadata, self.out_dir)
self.assertTrue(obs_success)
exp_ainfo = [ArtifactInfo(None, "distance_matrix",
[(dm_fp, 'plain_text')])]
self.assertEqual(obs_ainfo, exp_ainfo)
self.assertEqual(obs_error, "")
# Test failure
sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',
'1.SKM9.640192', 'NotASample']
dm_fp = self._create_distance_matrix(sample_ids)
obs_success, obs_ainfo, obs_error = _validate_distance_matrix(
{'plain_text': [dm_fp]}, self.metadata, self.out_dir)
self.assertFalse(obs_success)
self.assertIsNone(obs_ainfo)
self.assertEqual(obs_error, "The distance matrix contain samples not "
"present in the metadata")
def test_validate_ordination_results(self):
# Create the ordination results
sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',
'1.SKM9.640192', '1.SKB7.640196']
ord_res_fp = self._create_ordination_results(sample_ids)
# Test success
obs_success, obs_ainfo, obs_error = _validate_ordination_results(
{'plain_text': [ord_res_fp]}, self.metadata, self.out_dir)
self.assertTrue(obs_success)
exp_ainfo = [ArtifactInfo(None, "ordination_results",
[(ord_res_fp, 'plain_text')])]
self.assertEqual(obs_ainfo, exp_ainfo)
self.assertEqual(obs_error, "")
# Test failure
sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',
'1.SKM9.640192', 'NotASample']
ord_res_fp = self._create_ordination_results(sample_ids)
obs_success, obs_ainfo, obs_error = _validate_ordination_results(
{'plain_text': [ord_res_fp]}, self.metadata, self.out_dir)
self.assertFalse(obs_success)
self.assertIsNone(obs_ainfo)
self.assertEqual(obs_error, "The ordination results contain samples "
"not present in the metadata")
def test_validate_alpha_vector(self):
# Create the alpha vector
sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',
'1.SKM9.640192']
alpha_vector_fp = self._create_alpha_vector(sample_ids)
# Test success
obs_success, obs_ainfo, obs_error = _validate_alpha_vector(
{'plain_text': [alpha_vector_fp]}, self.metadata, self.out_dir)
self.assertEqual(obs_error, "")
self.assertTrue(obs_success)
exp_ainfo = [ArtifactInfo(None, "alpha_vector",
[(alpha_vector_fp, 'plain_text')])]
self.assertEqual(obs_ainfo, exp_ainfo)
# Test failure wrong ids
sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',
'NotASample']
alpha_vector_fp = self._create_alpha_vector(sample_ids)
obs_success, obs_ainfo, obs_error = _validate_alpha_vector(
{'plain_text': [alpha_vector_fp]}, self.metadata, self.out_dir)
self.assertEqual(obs_error, "The alpha vector contains samples not "
"present in the metadata")
self.assertFalse(obs_success)
self.assertIsNone(obs_ainfo)
# Test failure wrong format
fd, alpha_vector_fp = mkstemp(suffix='.txt', dir=self.out_dir)
close(fd)
with open(alpha_vector_fp, 'w') as f:
f.write("\tobserved_otus\nsample 1\n")
obs_success, obs_ainfo, obs_error = _validate_alpha_vector(
{'plain_text': [alpha_vector_fp]}, self.metadata, self.out_dir)
self.assertEqual(obs_error, "The alpha vector format is incorrect")
self.assertFalse(obs_success)
self.assertIsNone(obs_ainfo)
def test_validate(self):
# Test artifact type error
job_id, params = self._create_job(
'NotAType', {'plan_text': 'Will fail before checking this'}, 1)
obs_success, obs_ainfo, obs_error = validate(
self.qclient, job_id, params, self.out_dir)
self.assertFalse(obs_success)
self.assertIsNone(obs_ainfo)
self.assertEqual(
obs_error, "Unknown artifact type NotAType. Supported types: "
"FeatureData[Taxonomy], alpha_vector, distance_matrix, "
"ordination_results")
# Test missing metadata error - to be fair, I don't know how this error
# can happen in the live system, but better be safe than sorry
job_id, params = self._create_job(
'distance_matrix', {'plan_text': 'Will fail before checking this'},
None)
obs_success, obs_ainfo, obs_error = validate(
self.qclient, job_id, params, self.out_dir)
self.assertFalse(obs_success)
self.assertIsNone(obs_ainfo)
self.assertEqual(
obs_error, "Missing metadata information")
# Test distance matrix success
sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',
'1.SKM9.640192', '1.SKB7.640196']
dm_fp = self._create_distance_matrix(sample_ids)
job_id, params = self._create_job(
'distance_matrix', {'plain_text': [dm_fp]}, 1)
obs_success, obs_ainfo, obs_error = validate(
self.qclient, job_id, params, self.out_dir)
self.assertTrue(obs_success)
html_fp = join(self.out_dir, 'index.html')
exp_ainfo = [ArtifactInfo(None, "distance_matrix",
[(dm_fp, 'plain_text'),
(html_fp, 'html_summary')])]
self.assertEqual(obs_ainfo, exp_ainfo)
self.assertEqual(obs_error, "")
# Test ordination results success
ord_res_fp = self._create_ordination_results(sample_ids)
job_id, params = self._create_job(
'ordination_results', {'plain_text': [ord_res_fp]}, 1)
obs_success, obs_ainfo, obs_error = validate(
self.qclient, job_id, params, self.out_dir)
self.assertTrue(obs_success)
html_fp = join(self.out_dir, 'index.html')
esf_fp = join(self.out_dir, 'emperor_support_files')
exp_ainfo = [ArtifactInfo(None, "ordination_results",
[(ord_res_fp, 'plain_text'),
(html_fp, 'html_summary'),
(esf_fp, 'html_summary_dir')])]
self.assertEqual(obs_ainfo, exp_ainfo)
self.assertEqual(obs_error, "")
# Test alpha vector success
alpha_vector_fp = self._create_alpha_vector(sample_ids)
job_id, params = self._create_job(
'alpha_vector', {'plain_text': [alpha_vector_fp]}, 1)
obs_success, obs_ainfo, obs_error = validate(
self.qclient, job_id, params, self.out_dir)
self.assertTrue(obs_success)
html_fp = join(self.out_dir, 'index.html')
sf_fp = join(self.out_dir, 'support_files')
exp_ainfo = [ArtifactInfo(None, "alpha_vector",
[(alpha_vector_fp, 'plain_text'),
(html_fp, 'html_summary'),
(sf_fp, 'html_summary_dir')])]
self.assertEqual(obs_ainfo, exp_ainfo)
self.assertEqual(obs_error, "")
def test_validate_FeatureData_Taxonomy(self):
# Create the feature data
fd, taxonomy_fp = mkstemp(suffix='.txt', dir=self.out_dir)
close(fd)
with open(taxonomy_fp, 'w') as f:
f.write("Feature ID\tTaxonomy\tConfidence\n")
f.write("TACGGAGGA\tk__Bacteria;p__Bacteroidetes;c__Bacteroidia\t"
"0.9998743\n")
f.write("TACGTAGGG\tk__Bacteria;p__Firmicutes;c__Clostridia\t"
"0.9999999\n")
# Test success
obs_success, obs_ainfo, obs_error = _validate_feature_data_taxonomy(
{'plain_text': [taxonomy_fp]}, None, self.out_dir)
self.assertEqual(obs_error, "")
self.assertTrue(obs_success)
exp_ainfo = [ArtifactInfo(None, "FeatureData[Taxonomy]",
[(taxonomy_fp, 'plain_text')])]
self.assertEqual(obs_ainfo, exp_ainfo)
# Test failure wrong format
fd, taxonomy_fp = mkstemp(suffix='.txt', dir=self.out_dir)
close(fd)
with open(taxonomy_fp, 'w') as f:
f.write("Feature ID\tIt's gonna fail!\tConfidence\n")
f.write("TACGGAGGA\tk__Bacteria;p__Bacteroidetes;c__Bacteroidia\t"
"0.9998743\n")
f.write("TACGTAGGG\tk__Bacteria;p__Firmicutes;c__Clostridia\t"
"0.9999999\n")
obs_success, obs_ainfo, obs_error = _validate_feature_data_taxonomy(
{'plain_text': [taxonomy_fp]}, None, self.out_dir)
self.assertIn("The file header seems wrong", obs_error)
self.assertFalse(obs_success)
self.assertIsNone(obs_ainfo)
if __name__ == '__main__':
main()
|
# date: 2019.04.21
# https://stackoverflow.com/a/55778640/1832058
import requests
# not need Sessions
s = requests.Session()
s.headers.update({
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
#r = s.get('http://bit.do/')
#print(r.status_code)
#print(r.cookies)
# ------------------------------------
headers={
'X-Requested-With': 'XMLHttpRequest', # need it
#'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0',
#'Cookie': 'permasession=1555801674|ole2ky65f9', #
}
data = {
'action': 'shorten',
'url': 'https://onet.pl',
'url2': ' site2 ', # need spaces
'url_hash': None,
'url_stats_is_private': 0,
'permasession': '1555801674|ole2ky65f9', # need it
}
r = requests.post('http://bit.do/mod_perl/url-shortener.pl', headers=headers, data=data)
print(r.status_code)
print(r.json())
import datetime
datetime.datetime.fromtimestamp(1555801674)
|
import re
import copy
import warnings
import operator
import numpy as np
from astropy import _erfa as erfa
from astropy.utils.compat.misc import override__dir__
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.utils.data_info import MixinInfo
from astropy.utils import ShapedLikeNDArray
from astropy.time import Time
from astropy.utils.exceptions import AstropyUserWarning
from .distances import Distance
from .angles import Angle
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
GenericFrame)
from .builtin_frames import ICRS, SkyOffsetFrame
from .representation import (SphericalRepresentation,
UnitSphericalRepresentation, SphericalDifferential)
from .sky_coordinate_parsers import (_get_frame_class, _get_frame_without_data,
_parse_coordinate_data)
__all__ = ['SkyCoord', 'SkyCoordInfo']
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ['{0.' + compname + '.value:}' for compname
in repr_data.components]
return ','.join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ','.join(str(getattr(repr_data, comp).unit) or 'None'
for comp in repr_data.components)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if (issubclass(sc.representation_type, SphericalRepresentation)
and isinstance(sc.data, UnitSphericalRepresentation)):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type,
in_frame_units=True)
return repr_data
def _represent_as_dict(self):
obj = self._parent
attrs = (list(obj.representation_component_names) +
list(frame_transform_graph.frame_attributes.keys()))
# Don't output distance if it is all unitless 1.0
if 'distance' in attrs and np.all(obj.distance == 1.0):
attrs.remove('distance')
out = super()._represent_as_dict(attrs)
out['representation_type'] = obj.representation_type.get_name()
out['frame'] = obj.frame.name
# Note that obj.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
def new_like(self, skycoords, length, metadata_conflicts='warn', name=None):
"""
Return a new SkyCoord instance which is consistent with the input
SkyCoord objects ``skycoords`` and has ``length`` rows. Being
"consistent" is defined as being able to set an item from one to each of
the rest without any exception being raised.
This is intended for creating a new SkyCoord instance whose elements can
be set in-place for table operations like join or vstack. This is used
when a SkyCoord object is used as a mixin column in an astropy Table.
The data values are not predictable and it is expected that the consumer
of the object will fill in all values.
Parameters
----------
skycoords : list
List of input SkyCoord objects
length : int
Length of the output skycoord object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output name (sets output skycoord.info.name)
Returns
-------
skycoord : SkyCoord (or subclass)
Instance of this class consistent with ``skycoords``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(skycoords, metadata_conflicts, name,
('meta', 'description'))
skycoord0 = skycoords[0]
# Make a new SkyCoord object with the desired length and attributes
# by using the _apply / __getitem__ machinery to effectively return
# skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame
# attributes with the right shape.
indexes = np.zeros(length, dtype=np.int64)
out = skycoord0[indexes]
# Use __setitem__ machinery to check for consistency of all skycoords
for skycoord in skycoords[1:]:
try:
out[0] = skycoord[0]
except Exception as err:
raise ValueError(f'input skycoords are inconsistent: {err}')
# Set (merged) info attributes
for attr in ('name', 'meta', 'description'):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The `SkyCoord` class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: http://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a `SkyCoord`
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias. The frame classes that are built in
to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.
The string aliases are simply lower-case versions of the class name, and
allow for creating a `SkyCoord` object and transforming frames without
explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this `SkyCoord` should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied ``LON`` and ``LAT`` values, respectively. If
only one unit is supplied then it applies to both ``LON`` and
``LAT``.
obstime : valid `~astropy.time.Time` initializer, optional
Time(s) of observation.
equinox : valid `~astropy.time.Time` initializer, optional
Coordinate frame equinox.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : valid `~astropy.coordinates.Angle` initializer, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including `ICRS`,
`FK5`, `FK4`, and `FK4NoETerms`.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity`, optional
Proper motion components, in angle per time units.
l, b : valid `~astropy.coordinates.Angle` initializer, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the `Galactic` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity`, optional
Proper motion components in the `Galactic` frame, in angle per time
units.
x, y, z : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity`, optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (len(args) == 1 and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError('Cannot initialize from a coordinate frame '
'instance without coordinate data')
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError('Cannot create a SkyCoord without data')
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: remove these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def __eq__(self, value):
"""Equality operator for SkyCoord
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(getattr(self, attr),
getattr(value, attr)):
raise ValueError(f"cannot compare: extra frame attribute "
f"'{attr}' is not equivalent "
f"(perhaps compare the frames directly to avoid "
f"this exception)")
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method,
*args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, 'shape', ()):
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, '_' + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if self.__class__ is not value.__class__:
raise TypeError(f'can only set from object of same class: '
f'{self.__class__.__name__} vs. '
f'{value.__class__.__name__}')
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(getattr(self, attr),
getattr(value, attr)):
raise ValueError(f'attribute {attr} is not equivalent')
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
"""
Insert coordinate values before the given indices in the object and
return a new Frame object.
The values to be inserted must conform to the rules for in-place setting
of ``SkyCoord`` objects.
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple insertion before the index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.coordinates.SkyCoord` instance
New coordinate object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError('obj arg must be an integer')
if axis != 0:
raise ValueError('axis must be 0')
if not self.shape:
raise TypeError('cannot insert into scalar {} object'
.format(self.__class__.__name__))
if abs(idx0) > len(self):
raise IndexError('index {} is out of bounds for axis 0 with size {}'
.format(idx0, len(self)))
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name)
# Set the output values. This is where validation of `values` takes place to ensure
# that it can indeed be inserted.
out[:idx0] = self[:idx0]
out[idx0:idx0 + n_values] = values
out[idx0 + n_values:] = self[idx0:]
return out
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
`SkyCoord` that are not part of the destination frame's definition are
kept (stored on the resulting `SkyCoord`), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without loosing obstime).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance
The frame to transform this coordinate into. If a `SkyCoord`, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : `SkyCoord`
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if (frame_val is not None
and not (merge_attributes
and frame.is_frame_attr_default(attr))):
frame_kwargs[attr] = frame_val
elif (self_val is not None
and not self.is_frame_attr_default(attr)):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError('Transform `frame` must be a frame name, class, or instance')
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError('Cannot transform from {} to {}'
.format(self.frame.__class__, new_frame_cls))
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in (set(new_coord.get_frame_attr_names()) &
set(frame_kwargs.keys())):
frame_kwargs.pop(attr)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : `SkyCoord`
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
if (new_obstime is None and dt is None or
new_obstime is not None and dt is not None):
raise ValueError("You must specify one of `new_obstime` or `dt`, "
"but not both.")
# Validate that we have velocity info
if 's' not in self.frame.data.differentials:
raise ValueError('SkyCoord requires velocity data to evolve the '
'position.')
if 'obstime' in self.frame.frame_attributes:
raise NotImplementedError("Updating the coordinates in a frame "
"with explicit time dependence is "
"currently not supported. If you would "
"like this functionality, please open an "
"issue on github:\n"
"https://github.com/astropy/astropy")
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError('This object has no associated `obstime`. '
'apply_space_motion() must receive a time '
'difference, `dt`, and not a new obstime.')
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time('J2000')
new_obstime = None # we don't actually know the inital obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials['s']
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km/u.s)
except u.UnitConversionError: # No RV
rv = 0.
starpm = erfa.pmsafe(icrsrep.lon.radian, icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian/u.yr),
icrsvel.d_lat.to_value(u.radian/u.yr),
plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False),
differential_type=SphericalDifferential)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {attrnm: getattr(self, attrnm)
for attrnm in self._extra_frameattr_names}
frattrs['obstime'] = new_obstime
return self.__class__(icrs2, **frattrs).transform_to(self.frame)
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return (self.frame.name == string or
(isinstance(self.frame.name, list) and string in self.frame.name))
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the master transform graph.
"""
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.get_frame_attr_names():
return getattr(self.frame, attr)
else:
return getattr(self, '_' + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Fail
raise AttributeError("'{}' object has no attribute '{}'"
.format(self.__class__.__name__, attr))
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__('_' + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith('_') and hasattr(self._sky_coord_frame,
attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__('_' + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
# determine the aliases that this can be transformed to.
dir_values = set()
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_')))
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return dir_values
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ': ' + frameattrs
data = self.frame._data_repr()
if data:
data = ': ' + data
return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals())
def to_string(self, style='decimal', **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True},
'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}},
'dms': {'lonargs': {'unit': u.degree},
'latargs': {'unit': u.degree}},
'decimal': {'lonargs': {'unit': u.degree, 'decimal': True},
'latargs': {'unit': u.degree, 'decimal': True}}
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]['lonargs'])
latargs.update(styles[style]['latargs'])
else:
raise ValueError('Invalid style. Valid options are: {}'.format(",".join(styles)))
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (sph_coord.lon.to_string(**lonargs) +
" " + sph_coord.lat.to_string(**latargs))
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [(lonangle.to_string(**lonargs) +
" " + latangle.to_string(**latargs))]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two `SkyCoord` objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(getattr(self, fattrnm),
getattr(other, fattrnm)):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't frame-like")
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {}
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {}
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction (i.e., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction (i.e., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation : for the *total* angular offset (not broken out into components).
position_angle : for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!')
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat,
posang=position_angle, distance=separation)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_sky(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_sky')
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_3d(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_3d')
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` with angle units
The on-sky separation to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : integer array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(searcharoundcoords, self, seplimit,
storekdtree='_kdtree_sky')
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` with distance units
The physical radius to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : integer array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(searcharoundcoords, self, distlimit,
storekdtree='_kdtree_3d')
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
`SkyCoord` and another.
Parameters
----------
other : `SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get position_angle to another '
'SkyCoord or a coordinate frame with data')
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this `SkyCoord` at the origin.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this `SkyCoord` (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
"""
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) of the coordinates this `SkyCoord`
contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array `SkyCoord`, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position infromation is used
extra_frameattrs = {nm: getattr(self, nm)
for nm in self._extra_frameattr_names}
novel = SkyCoord(self.realize_frame(self.data.without_differentials()),
**extra_frameattrs)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode='all'):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'):
"""
Create a new `SkyCoord` from pixel coordinates using an
`~astropy.wcs.WCS` object.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : an instance of this class
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the cooordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(self, kind='barycentric', obstime=None,
location=None):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the `SkyCoord` will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the `SkyCoord` will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this `SkyCoord`.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` with velocity units
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.equivalencies.doppler_optical`,
`~astropy.units.equivalencies.doppler_radio`, and
`~astropy.units.equivalencies.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, 'location', None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError('`location` cannot be in both the '
'passed-in `obstime` and this `SkyCoord` '
'because it is ambiguous which is meant '
'for the radial_velocity_correction.')
elif timeloc is not None:
location = timeloc
else:
raise TypeError('Must provide a `location` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute, as an attribute on '
'the passed in `obstime`, or in the method '
'call.')
elif self.location is not None or timeloc is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`location` argument is passed in and there is '
'also a `location` attribute on this SkyCoord or '
'the passed-in `obstime`.')
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError('Must provide an `obstime` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute or in the method '
'call.')
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None:
# warn the user if the object has differentials set
if 's' in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning
)
pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime)
if kind == 'barycentric':
v_origin_to_earth = v_earth
elif kind == 'heliocentric':
v_sun = get_body_barycentric_posvel('sun', obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError("`kind` argument to radial_velocity_correction must "
"be 'barycentric' or 'heliocentric', but got "
"'{}'".format(kind))
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
icrs_cart_novel = icrs_cart.without_differentials()
if self.data.__class__ is UnitSphericalRepresentation:
targcart = icrs_cart_novel
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
targcart = icrs_cart_novel - obs_icrs_cart
targcart /= targcart.norm()
if kind == 'barycentric':
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr/speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials['s'].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn("SkyCoord contains some velocity information, but not enough to "
"calculate the full space motion of the source, and so this has "
"been ignored for the purposes of calculating the radial velocity "
"correction. This can lead to errors on the order of metres/second.",
AstropyUserWarning)
zb = zb - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new `SkyCoord` from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames, if they are also
followed by a non-alphanumeric character. It will also match columns
that *end* with the component name if a non-alphanumeric character is
*before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : astropy.Table
The table to load data from.
coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : same as this class
The new `SkyCoord` (or subclass) object.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs['frame'] = coord_kwargs.get('frame', frame)
comp_kwargs = {}
for comp_name in frame.representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r'(\W|\b|_)'
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b'
# the final regex ORs together the two patterns
rex = re.compile('(' + starts_with_comp + ')|(' + ends_with_comp + ')',
re.IGNORECASE | re.UNICODE)
for col_name in table.colnames:
if rex.match(col_name):
if comp_name in comp_kwargs:
oldname = comp_kwargs[comp_name].name
msg = ('Found at least two matches for component "{0}"'
': "{1}" and "{2}". Cannot continue with this '
'ambiguity.')
raise ValueError(msg.format(comp_name, oldname, col_name))
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError('Found column "{}" in table, but it was '
'already provided as "{}" keyword to '
'guess_from_table function.'.format(v.name, k))
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame='icrs', parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse: bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ('icrs', icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
# Copyright (c) 2012 Adi Roiban.
# See LICENSE for details.
"""
Unit tests for empirical package.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.table import Table
from gammapy.catalog.fermi import SourceCatalog3FGL
from gammapy.estimators import FluxPoints
from gammapy.modeling.models import SpectralModel
from gammapy.utils.scripts import make_path
from gammapy.utils.testing import (
assert_quantity_allclose,
mpl_plot_check,
requires_data,
requires_dependency,
)
FLUX_POINTS_FILES = [
"diff_flux_points.ecsv",
"diff_flux_points.fits",
"flux_points.ecsv",
"flux_points.fits",
]
class LWTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return 1e4 * np.exp(-6 * x)
def integral(self, xmin, xmax, **kwargs):
return -1.0 / 6 * 1e4 * (np.exp(-6 * xmax) - np.exp(-6 * xmin))
def inverse(self, y):
return -1.0 / 6 * np.log(y * 1e-4)
class XSqrTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return x ** 2
def integral(self, xmin, xmax, **kwargs):
return 1.0 / 3 * (xmax ** 3 - xmin ** 2)
def inverse(self, y):
return np.sqrt(y)
class ExpTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return np.exp(x * u.Unit("1 / TeV"))
def integral(self, xmin, xmax, **kwargs):
return np.exp(xmax * u.Unit("1 / TeV")) - np.exp(xmin * u.Unit("1 / TeV"))
def inverse(self, y):
return np.log(y * u.TeV) * u.TeV
def test_energy_ref_lafferty():
"""
Tests Lafferty & Wyatt x-point method.
Using input function g(x) = 10^4 exp(-6x) against
check values from paper Lafferty & Wyatt. Nucl. Instr. and Meth. in Phys.
Res. A 355 (1995) 541-547, p. 542 Table 1
"""
# These are the results from the paper
desired = np.array([0.048, 0.190, 0.428, 0.762])
model = LWTestModel()
energy_min = np.array([0.0, 0.1, 0.3, 0.6])
energy_max = np.array([0.1, 0.3, 0.6, 1.0])
actual = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)
assert_allclose(actual, desired, atol=1e-3)
@pytest.mark.xfail
def test_dnde_from_flux():
"""Tests y-value normalization adjustment method.
"""
table = Table()
table["e_min"] = np.array([10, 20, 30, 40])
table["e_max"] = np.array([20, 30, 40, 50])
table["flux"] = np.array([42, 52, 62, 72]) # 'True' integral flux in this test bin
# Get values
model = XSqrTestModel()
table["e_ref"] = FluxPoints._energy_ref_lafferty(model, table["e_min"], table["e_max"])
dnde = FluxPoints.from_table(table, reference_model=model)
# Set up test case comparison
dnde_model = model(table["e_ref"])
# Test comparison result
desired = model.integral(table["e_min"], table["e_max"])
# Test output result
actual = table["flux"] * (dnde_model / dnde)
# Compare
assert_allclose(actual, desired, rtol=1e-6)
@pytest.mark.xfail
@pytest.mark.parametrize("method", ["table", "lafferty", "log_center"])
def test_compute_flux_points_dnde_exp(method):
"""
Tests against analytical result or result from a powerlaw.
"""
model = ExpTestModel()
energy_min = [1.0, 10.0] * u.TeV
energy_max = [10.0, 100.0] * u.TeV
table = Table()
table.meta["SED_TYPE"] = "flux"
table["e_min"] = energy_min
table["e_max"] = energy_max
flux = model.integral(energy_min, energy_max)
table["flux"] = flux
if method == "log_center":
energy_ref = np.sqrt(energy_min * energy_max)
elif method == "table":
energy_ref = [2.0, 20.0] * u.TeV
elif method == "lafferty":
energy_ref = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)
table["e_ref"] = energy_ref
result = FluxPoints.from_table(table, reference_model=model)
# Test energy
actual = result.energy_ref
assert_quantity_allclose(actual, energy_ref, rtol=1e-8)
# Test flux
actual = result.dnde
desired = model(energy_ref)
assert_quantity_allclose(actual, desired, rtol=1e-8)
@requires_data()
def test_fermi_to_dnde():
from gammapy.catalog import SourceCatalog4FGL
catalog_4fgl = SourceCatalog4FGL("$GAMMAPY_DATA/catalogs/fermi/gll_psc_v20.fit.gz")
src = catalog_4fgl["FGES J1553.8-5325"]
fp = src.flux_points
assert_allclose(
fp.dnde.quantity[1, 0, 0],
4.567393e-10 * u.Unit("cm-2 s-1 MeV-1"),
rtol=1e-5,
)
@pytest.fixture(params=FLUX_POINTS_FILES, scope="session")
def flux_points(request):
path = "$GAMMAPY_DATA/tests/spectrum/flux_points/" + request.param
return FluxPoints.read(path)
@pytest.fixture(scope="session")
def flux_points_likelihood():
path = "$GAMMAPY_DATA/tests/spectrum/flux_points/binlike.fits"
return FluxPoints.read(path)
@requires_data()
class TestFluxPoints:
def test_info(self, flux_points):
info = str(flux_points)
assert "geom" in info
assert "axes" in info
assert "ref. model" in info
assert "quantities" in info
def test_energy_ref(self, flux_points):
actual = flux_points.energy_ref
desired = np.sqrt(flux_points.energy_min * flux_points.energy_max)
assert_quantity_allclose(actual, desired)
def test_energy_min(self, flux_points):
actual = flux_points.energy_min
desired = 299530.97 * u.MeV
assert_quantity_allclose(actual.sum(), desired)
def test_energy_max(self, flux_points):
actual = flux_points.energy_max
desired = 399430.975 * u.MeV
assert_quantity_allclose(actual.sum(), desired)
def test_write_fits(self, tmp_path, flux_points):
flux_points.write(tmp_path / "tmp.fits", sed_type=flux_points.sed_type_init)
actual = FluxPoints.read(tmp_path / "tmp.fits")
assert str(flux_points) == str(actual)
def test_write_ecsv(self, tmp_path, flux_points):
flux_points.write(tmp_path / "flux_points.ecsv", sed_type=flux_points.sed_type_init)
actual = FluxPoints.read(tmp_path / "flux_points.ecsv")
assert str(flux_points) == str(actual)
def test_quantity_access(self, flux_points_likelihood):
assert flux_points_likelihood.sqrt_ts
assert flux_points_likelihood.ts
assert flux_points_likelihood.stat
assert_allclose(flux_points_likelihood.n_sigma_ul, 2)
assert flux_points_likelihood.sed_type_init == "likelihood"
@requires_dependency("matplotlib")
def test_plot(self, flux_points):
with mpl_plot_check():
flux_points.plot()
@requires_dependency("matplotlib")
def test_plot_likelihood(self, flux_points_likelihood):
with mpl_plot_check():
flux_points_likelihood.plot_ts_profiles()
@requires_dependency("matplotlib")
def test_plot_likelihood_error(self, flux_points_likelihood):
del flux_points_likelihood._data["stat_scan"]
with pytest.raises(AttributeError):
flux_points_likelihood.plot_ts_profiles()
@requires_data()
def test_compute_flux_points_dnde_fermi():
"""
Test compute_flux_points_dnde on fermi source.
"""
fermi_3fgl = SourceCatalog3FGL()
source = fermi_3fgl["3FGL J0835.3-4510"]
flux_points = source.flux_points
table = source.flux_points_table
for column in ["e2dnde", "e2dnde_errn", "e2dnde_errp", "e2dnde_ul"]:
actual = table[column].quantity
desired = getattr(flux_points, column).quantity.squeeze()
assert_quantity_allclose(actual[:-1], desired[:-1], rtol=0.05)
@requires_data()
@requires_dependency("matplotlib")
def test_plot_fp_no_ul():
path = make_path("$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits")
table = Table.read(path)
table.remove_column('dnde_ul')
fp = FluxPoints.from_table(table, sed_type='dnde')
with mpl_plot_check():
fp.plot()
|
import os
from os.path import join, dirname
from dotenv import load_dotenv
from urllib.parse import urlparse
# loading .env file
env_path = join(dirname(__file__), '.env')
load_dotenv(env_path)
# use function
def url_path_check(path):
sample_host = 'http://localhost'
sample_url = sample_host + path
if urlparse(sample_url) and urlparse(sample_url).path == path:
return path
return None
def number_check(num=None):
if isinstance(int(num), int):
return int(num)
return None
# Register Env Param
try:
API_AUTH_FEATURE = os.environ.get('API_AUTH_FEATURE', 'False').lower() in ('true') or False
DEFAULT_LANGUAGE = os.environ.get('DEFAULT_LANGUAGE') or 'ja'
VERSION = os.environ.get('VERSION') or '1.0.0'
SHOW_SWAGGER_PATH = url_path_check(os.environ.get('SHOW_SWAGGER_PATH') or "") or None
SHOW_REDOC_PATH = url_path_check(os.environ.get('SHOW_REDOC_PATH') or "") or None
SHOW_OPENAPI_PATH = url_path_check(os.environ.get('SHOW_OPENAPI_PATH')) or None
DB_HOST = os.environ.get('DB_HOST') or 'pgsql'
DB_PORT = number_check(os.environ.get('DB_PORT')) or 5432
DB_USER = os.environ.get('DB_USER') or 'postgres'
DB_PASSWORD = os.environ.get('DB_PASSWORD') or 'postgres'
DATABASE = os.environ.get('DATABASE') or 'postgres'
except Exception:
print("defined param error: check .env file")
raise
|
import numpy as np
from util import util
from config.draco3_lb_config import PnCConfig, WBCConfig
from pnc.wbc.ihwbc.ihwbc import IHWBC
from pnc.wbc.ihwbc.joint_integrator import JointIntegrator
class Draco3LBController(object):
def __init__(self, tci_container, robot):
self._tci_container = tci_container
self._robot = robot
# Initialize WBC
l_jp_idx, l_jd_idx, r_jp_idx, r_jd_idx = self._robot.get_q_dot_idx(
['l_knee_fe_jp', 'l_knee_fe_jd', 'r_knee_fe_jp', 'r_knee_fe_jd'])
act_list = [False] * robot.n_floating + [True] * robot.n_a
act_list[l_jd_idx] = False
act_list[r_jd_idx] = False
n_q_dot = len(act_list)
n_active = np.count_nonzero(np.array(act_list))
n_passive = n_q_dot - n_active - 6
self._sa = np.zeros((n_active, n_q_dot))
self._sv = np.zeros((n_passive, n_q_dot))
j, k = 0, 0
for i in range(n_q_dot):
if i >= 6:
if act_list[i]:
self._sa[j, i] = 1.
j += 1
else:
self._sv[k, i] = 1.
k += 1
self._sf = np.zeros((6, n_q_dot))
self._sf[0:6, 0:6] = np.eye(6)
self._ihwbc = IHWBC(self._sf, self._sa, self._sv, PnCConfig.SAVE_DATA)
if WBCConfig.B_TRQ_LIMIT:
self._ihwbc.trq_limit = np.dot(self._sa[:, 6:],
self._robot.joint_trq_limit)
self._ihwbc.lambda_q_ddot = WBCConfig.LAMBDA_Q_DDOT
self._ihwbc.lambda_rf = WBCConfig.LAMBDA_RF
# Initialize Joint Integrator
self._joint_integrator = JointIntegrator(robot.n_a,
PnCConfig.CONTROLLER_DT)
self._joint_integrator.pos_cutoff_freq = WBCConfig.POS_CUTOFF_FREQ
self._joint_integrator.vel_cutoff_freq = WBCConfig.VEL_CUTOFF_FREQ
self._joint_integrator.max_pos_err = WBCConfig.MAX_POS_ERR
self._joint_integrator.joint_pos_limit = self._robot.joint_pos_limit
self._joint_integrator.joint_vel_limit = self._robot.joint_vel_limit
self._b_first_visit = True
def get_command(self):
if self._b_first_visit:
self.first_visit()
# Dynamics properties
mass_matrix = self._robot.get_mass_matrix()
mass_matrix_inv = np.linalg.inv(mass_matrix)
coriolis = self._robot.get_coriolis()
gravity = self._robot.get_gravity()
self._ihwbc.update_setting(mass_matrix, mass_matrix_inv, coriolis,
gravity)
# Task, Contact, and Internal Constraint Setup
w_hierarchy_list = []
for task in self._tci_container.task_list:
task.update_jacobian()
task.update_cmd()
w_hierarchy_list.append(task.w_hierarchy)
self._ihwbc.w_hierarchy = np.array(w_hierarchy_list)
for contact in self._tci_container.contact_list:
contact.update_contact()
for internal_constraint in self._tci_container.internal_constraint_list:
internal_constraint.update_internal_constraint()
# WBC commands
joint_trq_cmd, joint_acc_cmd, rf_cmd = self._ihwbc.solve(
self._tci_container.task_list, self._tci_container.contact_list,
self._tci_container.internal_constraint_list)
joint_trq_cmd = np.dot(self._sa[:, 6:].transpose(), joint_trq_cmd)
joint_acc_cmd = np.dot(self._sa[:, 6:].transpose(), joint_acc_cmd)
# Double integration
joint_vel_cmd, joint_pos_cmd = self._joint_integrator.integrate(
joint_acc_cmd, self._robot.joint_velocities,
self._robot.joint_positions)
command = self._robot.create_cmd_ordered_dict(joint_pos_cmd,
joint_vel_cmd,
joint_trq_cmd)
return command
def first_visit(self):
joint_pos_ini = self._robot.joint_positions
self._joint_integrator.initialize_states(np.zeros(self._robot.n_a),
joint_pos_ini)
self._b_first_visit = False
|
#!/usr/local/bin/python
# encoding: utf-8
"""
*Convert the HTML export of kindle notebooks (from kindle apps) to markdown*
:Author:
David Young
:Date Created:
October 17, 2016
"""
################# GLOBAL IMPORTS ####################
import sys
import os
import re
import collections
os.environ['TERM'] = 'vt100'
from fundamentals import tools
# THESE ARE THE 4 KINDLE COLORS ARE HOW THEY TRANSLATE TO MD
colorCode = {
"blue": "code",
"yellow": "text",
"orange": "quote",
"pink": "header"
}
class kindle_notebook():
"""
*convert the HTML export of kindle notebooks (from kindle apps) to markdown*
**Key Arguments:**
- ``log`` -- logger
- ``kindleExportPath`` -- path to the exported kindle HTML file
- ``outputPath`` -- the output path to the md file.
**Usage:**
To convert the exported HTML file of annotation and notes from a kindle book or document to markdown, run the code:
.. code-block:: python
from polyglot.markdown import kindle_notebook
nb = kindle_notebook(
log=log,
kindleExportPath="/path/to/kindle_export.html",
outputPath="/path/to/coverted_annotations.md"
)
nb.convert()
The colours of the annotations convert to markdown attributes via the following key:
.. code-block: json
colorCode = {
"blue": "code",
"yellow": "text",
"orange": "quote",
"pink": "header"
}
"""
# Initialisation
def __init__(
self,
log,
kindleExportPath,
outputPath
):
self.log = log
log.debug("instansiating a new 'kindle_notebook' object")
self.kindleExportPath = kindleExportPath
self.outputPath = outputPath
# xt-self-arg-tmpx
# Initial Actions
return None
def convert(self):
"""
*convert the kindle_notebook object*
**Return:**
- ``kindle_notebook``
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- update the package tutorial if needed
.. code-block:: python
usage code
"""
self.log.debug('starting the ``convert`` method')
import codecs
pathToReadFile = self.kindleExportPath
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(pathToReadFile, encoding='utf-8', mode='r')
annotations = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
annotations = annotations.replace(u"’", "'").replace(
u"“ ", '"').replace(u"“", '"').replace(u"”", '"').replace(u"–", "-").replace(u"—", "-")
# COLLECT KEY COMPONENTS
try:
title = self.find_component("bookTitle", annotations)
except:
return False
regex = re.compile(r'_xx\d*xx$')
title = regex.sub("", title)
authors = self.find_component("authors", annotations)
citation = self.find_component("citation", annotations)
# CLEAN THE CITATION
regex = re.compile(r'</?i>', re.S)
citation = regex.sub('*', citation)
regex = re.compile(r'Citation \(.*?\): ', re.S)
citation = regex.sub('', citation).replace(" Kindle edition.", "")
# COLLECT ANNOTATIONS
annotationDict = {}
matchObject = re.finditer(
r"""<div class="noteHeading">\s+Highlight\(<span.*?>(?P<color>.*?)</span>\)((?P<section>.*?)Page (?P<page>\d+))?.*?Location (?P<location>\d+)\s+</div>\s+<div class="noteText">(?P<note>.*?)</div>""",
annotations,
flags=re.S
)
for match in matchObject:
location = int(match.group("location"))
location = "%(location)09d" % locals()
if match.group("page"):
try:
annotationDict[location] = {"color": match.group("color"), "page": match.group(
"page"), "section": self.clean(match.group("section"))[3:-2], "note": self.clean(match.group("note"))}
except:
print match.group("note")
sys.exit(0)
else:
try:
annotationDict[location] = {"color": match.group(
"color"), "note": self.clean(match.group("note"))}
except:
print match.group("note")
sys.exit(0)
# COLLECT PERSONAL NOTES
matchObject = re.finditer(
r"""<div class="noteHeading">\s+Note -( Page (?P<page>\d+))?.*?Location (?P<location>\d+)\s+</div>\s+<div class="noteText">(?P<note>.*?)</div>""",
annotations,
flags=re.S
)
for match in matchObject:
location = int(match.group("location"))
location = "%(location)09dnote" % locals()
if match.group("page"):
annotationDict[location] = {"color": None, "page": match.group(
"page"), "note": self.clean(match.group("note"))}
else:
annotationDict[location] = {
"color": None, "note": self.clean(match.group("note"))}
annotationDict = collections.OrderedDict(
sorted(annotationDict.items()))
mdContent = "\n# %(title)s\n\nAuthors: **%(authors)s**\n\n" % locals()
for k, v in annotationDict.iteritems():
mdContent += self.convertToMD(v) + "\n\n"
if len(annotationDict) == 0:
return False
pathToWriteFile = self.outputPath
try:
self.log.debug("attempting to open the file %s" %
(pathToWriteFile,))
writeFile = codecs.open(
pathToWriteFile, encoding='utf-8', mode='w')
except IOError, e:
message = 'could not open the file %s' % (pathToWriteFile,)
self.log.critical(message)
raise IOError(message)
writeFile.write(mdContent)
writeFile.close()
self.log.debug('completed the ``convert`` method')
return pathToWriteFile
def clean(self, text):
return text.strip().replace(u"’", "'").replace(u"“ ", '"').replace(u"“", '"').replace(u"”", '"').replace(u"–", "-").replace(u"—", "-")
def find_component(self, divtag, annotations):
component = re.search(
r"""<div class="%(divtag)s">(.*?)</div>""" % locals(), annotations, re.S)
return self.clean(component.group(1))
def convertToMD(self, kindleNote):
if kindleNote["color"] == None:
return "**NOTE**\n: " + kindleNote["note"].replace("\n", " ")
mdType = colorCode[kindleNote["color"]]
if mdType == "code":
return "```\n" + kindleNote["note"] + "\n```"
elif mdType == "text":
return kindleNote["note"]
elif mdType == "header":
regex = re.compile(r'_xx\d*xx$')
kindleNote["note"] = regex.sub("", kindleNote["note"])
return "## " + kindleNote["note"].replace("\n", " ").replace(" ", " ").replace(" ", " ").replace(" ", " ")
elif mdType == "quote":
return "> " + kindleNote["note"].replace("\n", "> ")
# xt-class-method
# 5. @flagged: what actions of the base class(es) need ammending? ammend them here
# Override Method Attributes
# method-override-tmpx
|
from collections import defaultdict
from threading import local
import pymantic.primitives
class BaseParser(object):
"""Common base class for all parsers
Provides shared utilities for creating RDF objects, handling IRIs, and
tracking parser state.
"""
def __init__(self, environment=None):
self.env = environment or pymantic.primitives.RDFEnvironment()
self.profile = self.env.createProfile()
self._call_state = local()
def make_datatype_literal(self, value, datatype):
return self.env.createLiteral(value=value, datatype=datatype)
def make_language_literal(self, value, lang=None):
if lang:
return self.env.createLiteral(value=value, language=lang)
else:
return self.env.createLiteral(value=value)
def make_named_node(self, iri):
return self.env.createNamedNode(iri)
def make_blank_node(self, label=None):
if label:
return self._call_state.bnodes[label]
else:
return self.env.createBlankNode()
def make_triple(self, subject, predicate, object):
return self.env.createTriple(subject, predicate, object)
def make_quad(self, subject, predicate, object, graph):
return self.env.createQuad(subject, predicate, object, graph)
def _prepare_parse(self, graph):
self._call_state.bnodes = defaultdict(self.env.createBlankNode)
self._call_state.graph = graph
def _cleanup_parse(self):
del self._call_state.bnodes
del self._call_state.graph
def _make_graph(self):
return self.env.createGraph()
|
from huobi.exception.huobiapiexception import HuobiApiException
from huobi.impl.restapiinvoker import call_sync
from huobi.model.user import User
class AccountInfoMap:
user_map = dict()
account_id_type_map = dict()
account_type_id_map = dict()
def update_user_info(self, api_key, request_impl):
accounts = call_sync(request_impl.get_accounts())
user = User()
user.accounts = accounts
self.user_map[api_key] = user
if accounts and len(accounts):
self.account_id_type_map[api_key] = {}
self.account_type_id_map[api_key] = {}
for account_item in accounts:
self.account_id_type_map[api_key][account_item.id] = account_item.account_type
self.account_type_id_map[api_key][account_item.account_type] = account_item.id
def get_user(self, api_key):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.user_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found user by key: " + api_key)
return self.user_map[api_key]
def get_account_by_id(self, api_key, account_id):
user = self.get_user(api_key)
account = user.get_account_by_id(account_id)
if account is None:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR,
"[User] Cannot find the account, key: " +
api_key + ", account id: " + str(account_id))
return account
def get_all_accounts(self, api_key):
user = self.get_user(api_key)
return user.accounts
def get_account_type_by_id(self, api_key, account_id):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.account_id_type_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found account_id by key: " + api_key)
return self.account_id_type_map.get(api_key, {}).get(account_id, None)
def get_account_id_by_type(self, api_key, account_type):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.account_type_id_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found account_type by key: " + api_key)
return self.account_type_id_map.get(api_key, {}).get(account_type, None)
def get_all_accounts_without_check(self, api_key):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
user = self.user_map.get(api_key, None)
return None if (user is None) else user.accounts
account_info_map = AccountInfoMap()
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr
from bin_data import bin_data
# import pixel data
right_z_pixel_change = np.load("right_z_pixel_change.npy")
left_z_pixel_change = np.load("left_z_pixel_change.npy")
front_z_pixel_change = np.load("front_z_pixel_change.npy")
# average pixel change across front, left & right fovs
pixel_change = np.vstack((left_z_pixel_change, front_z_pixel_change, right_z_pixel_change)).mean(axis=0)
# import rate change data
dat = pd.read_pickle("df_population_vector_change.p")
# Clean the data (sequential data points are 1cm apart along trajectory)
dat = dat[dat.environment == 'D']
df = dat.filter(['animal', 'x_coord', 'y_coord', 'direction', 'timestamp'], axis=1)
dat = dat[~df.isnull().any(axis=1)]
good_pixel_ids = np.array(np.diff(dat.x_coord)**2 + np.diff(dat.y_coord)**2 < 1.01, dtype=bool)
pixel_change = pixel_change[good_pixel_ids]
good_rate_ids = np.append(False, good_pixel_ids)
turning_rate = np.abs(np.diff(dat['direction'])) % 360
turning_rate = turning_rate[good_pixel_ids]
dat = dat[good_rate_ids]
# z-score data
dat['rate change\n(euclidean)'] = (dat['rate change\n(euclidean)'] - np.mean(dat['rate change\n(euclidean)']))/np.std(dat['rate change\n(euclidean)'])
pixel_change = (pixel_change - np.mean(pixel_change))/np.std(pixel_change)
# Plot Occupancy
occupancy = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)])
plt.imshow(occupancy.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.title('Occupancy')
plt.show()
# Plot pixel change across space
pixel_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change) / occupancy
plt.imshow(pixel_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.axis('off')
plt.clim([-1.5,1.5])
plt.title('Pixel Change Map')
plt.show()
# Plot firing rate change across space
rate_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)']) / occupancy
plt.imshow(rate_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.axis('off')
plt.clim([-1.5,1.5])
plt.title('Rate Change Map')
plt.show()
corr, _ = pearsonr(pixel_change, dat['rate change\n(euclidean)'])
print('Rate Change vs Pixel Change Pearson r = %.3f' % corr)
# Filter bits of trajectory by head direction
north_ids = (np.degrees(dat.direction) % 360 >= 315) | (np.degrees(dat.direction) % 360 < 45)
north_occupancy = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
south_ids = (np.degrees(dat.direction) % 360 >= 135) & (np.degrees(dat.direction) % 360 < 225)
south_occupancy = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
east_ids = (np.degrees(dat.direction) % 360 >= 45) & (np.degrees(dat.direction) % 360 < 135)
east_occupancy = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
west_ids = (np.degrees(dat.direction) % 360 >= 225) & (np.degrees(dat.direction) % 360 < 315)
west_occupancy = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
cmap = plt.get_cmap('jet')
cmap.set_bad('w',1.)
# Calculate pixel and rate change maps by heading direction
north_pix_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[north_ids]) / north_occupancy
south_pix_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[south_ids]) / south_occupancy
east_pix_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[east_ids]) / east_occupancy
west_pix_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[west_ids]) / west_occupancy
north_rat_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][north_ids]) / north_occupancy
south_rat_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][south_ids]) / south_occupancy
east_rat_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][east_ids]) / east_occupancy
west_rat_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][west_ids]) / west_occupancy
c_lo = -1.5
c_hi = 1.5
# Plot change maps filtered by direction
plt.subplot(3,3,2)
plt.title('Unfolded Pixel Change Map')
plt.imshow(west_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,4)
plt.imshow(south_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,5)
plt.imshow(pixel_change_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,6)
plt.imshow(north_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,8)
plt.imshow(east_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.show()
plt.subplot(3,3,2)
plt.title('Unfolded Rate Change Map')
plt.imshow(west_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,4)
plt.imshow(south_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,5)
plt.imshow(rate_change_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,6)
plt.imshow(north_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,8)
plt.imshow(east_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.show()
|
"""A set of classes used during the parsing of VB code"""
StopSearch = -9999 # Used to terminate searches for parent properties
class VBElement(object):
"""An element of VB code"""
def __init__(self, details, text):
"""Initialize from the details"""
# import pdb; pdb.set_trace()
self.name = details[0]
self.text = makeUnicodeFromSafe(text[details[1]:details[2]])
self.elements = convertToElements(details[3], text)
def printTree(self, offset=0):
"""Print out this tree"""
print "%s%s : '%s'" % (
" " * offset, self.name, self.text.split("\n")[:20])
for subelement in self.elements:
subelement.printTree(offset + 1)
class VBFailedElement(object):
"""An failed element of VB code"""
def __init__(self, name, text):
"""Initialize from the details"""
self.name = name
self.text = text
self.elements = []
class VBNamespace(object):
"""Handles a VB Namespace"""
auto_handlers = []
auto_class_handlers = None
#
# Skip handlers are automatically by-passed. This is useful for quickly ignoring a
# handler in a base class
skip_handlers = []
#
# Used to translate () into [] under certain circumstances (LHS of an
# assign)
brackets_are_indexes = 0
default_scope = "Private"
#
# Set this to 1 if the object is a function (ie requires () when accessing)
is_function = 0
#
# Set to 1 for types which would mark the end of the docstrings
would_end_docstring = 1
#
# Intrinsic VB functions - we need to know these to be able to convert
# bare references (eg Dir) to function references (Dir())
intrinsic_functions = [
"Dir", "FreeFile", "Rnd", "Timer",
]
def __init__(self, scope="Private"):
"""Initialize the namespace"""
self.locals = []
self.local_default_scope = self.default_scope
self.auto_class_handlers = {
"object_definition": (VBVariableDefinition, self.locals),
"const_definition": (VBConstant, self.locals),
"user_type_definition": (VBUserType, self.locals),
"event_definition": (VBUnrendered, self.locals),
}
#
# This dictionary stores names which are to be substituted if found
self.name_substitution = {}
char_spec = Config["General", "IndentCharacter"]
if char_spec == "Space":
self._indent_char = " "
elif char_spec == "Tab":
self._indent_char = "\t"
else:
raise InvalidOption(
"Indent character option not understood: '%s'" % char_spec)
self._indent_amount = int(Config["General", "IndentAmount"])
def amGlobal(self, scope):
"""Decide if a variable will be considered a global
The algorithm works by asking our parent for a 'public_is_global' flag.
If this is true and the scope is either 'public' or 'global' then we
are a global. It is up to each parent to decide if publics are global.
Things like code modules will have this set whereas things like
subroutines will not.
"""
#
# First throw out anything which is private
log.info("Checking if global: '%s' scope is '%s'" % (self, scope))
if scope in ("Public", "Global"):
if self.getParentProperty("public_is_global", 0):
log.info("We are global!")
return 1
return 0
def assignParent(self, parent):
"""Set our parent
This is kept as a separate method because it is a useful hook for
subclasses. Once this method is called, the object is fully
initialized.
"""
self.parent = parent
def asString(self):
"""Convert to a nice representation"""
return repr(self)
def checkIfFunction(self, name):
"""Check if the name is a function or not"""
for loc in self.locals:
if loc.identifier == name:
return loc.is_function
raise UnresolvableName("Name '%s' is not known in this context" % name)
def checkOptionChoice(self, section, name, choices):
"""Return the index of a config option in a list of choices
We return the actual choice name which may seem odd but is done to
make the code readable. The main purpose of this method is to allow
the choice to be selected with the error trapping hidden.
"""
value = Config[section, name]
try:
return choices[list(choices).index(value)]
except ValueError:
raise InvalidOption("Invalid option for %s.%s, must be one of %s" % (
section, name, choices))
def checkOptionYesNo(self, section, name):
"""Return the yes/no value of an option checking for invalid answers"""
return self.checkOptionChoice(section, name, ("Yes", "No"))
def containsStatements(self):
"""Check if we contain statements"""
#
# TODO: This needs refactoring - it is horrible
if isinstance(self, NonCodeBlocks):
return 0
if not hasattr(self, "blocks"):
return 1
elif self.blocks:
for item in self.blocks:
if item.containsStatements():
return 1
return 0
else:
return 1
def createExtractHandler(self, token):
"""Create a handler which will extract a certain token value"""
def handler(element):
log.info("Grabbed attribute '%s' for %s as '%s'" %
(token, self, element.text))
setattr(self, token, element.text)
return handler
def filterListByClass(self, sequence, cls):
"""Return all elements of sequence that are an instance of the given class"""
return [item for item in sequence if isinstance(item, cls)]
def finalizeObject(self):
"""Finalize the object
This method is called once the object has been completely parsed and can
be used to do any processing required.
"""
def findParentOfClass(self, cls):
"""Return our nearest parent who is a subclass of cls"""
try:
parent = self.parent
except AttributeError:
raise NestingError(
"Reached outer layer when looking for parent of class")
if isinstance(parent, cls):
return parent
else:
return parent.findParentOfClass(cls)
def getHandler(self, element):
"""Find a handler for the element"""
if element.name in self.skip_handlers:
return None
elif element.name in self.auto_handlers:
log.info("Found auto handler for '%s' ('%s')" %
(element.name, self))
return self.createExtractHandler(element.name)
elif element.name in self.auto_class_handlers:
log.info("Found auto handler for '%s' ('%s')" %
(element.name, self))
obj_class, add_to = self.auto_class_handlers[element.name]
if obj_class == self.__class__:
# Ooops, recursive handling - we should handle the sub elements
def class_handler(element):
for sub_element in element.elements:
self.handleSubObject(sub_element, obj_class, add_to)
else:
def class_handler(element):
self.handleSubObject(element, obj_class, add_to)
return class_handler
try:
return getattr(self, "handle_%s" % element.name)
except AttributeError:
return None
def getIndent(self, indent):
"""Return some spaces to do indenting"""
return self._indent_char * indent * self._indent_amount
def getLocalNameFor(self, name):
"""Get the local version of a name
We look for any ancestor with a name conversion in operation for this name and
return the first one that has it. If there are none then we just use the name
"""
try:
return self.name_substitution[name]
except KeyError:
try:
return self.parent.getLocalNameFor(name)
except AttributeError:
return name
def getParentProperty(self, name, default=None):
"""Get a property from our nearest ancestor who has it"""
try:
return getattr(self, name)
except AttributeError:
try:
parent = self.parent
return parent.getParentProperty(name)
except AttributeError:
if default is not None:
return default
raise NestingError(
"Reached outer level when trying to access a parent property: "
"'%s'" % name)
def getWarning(self, warning_type, text, indent=0, crlf=0):
"""Construct a warning comment"""
ret = "%s# %s (%s) %s" % (
self.getIndent(indent),
Config["General", "AttentionMarker"],
warning_type,
text)
if crlf:
ret += "\n"
return ret
def handleSubObject(self, element, obj_class, add_to):
"""Handle an object which creates a sub object"""
v = obj_class(self.local_default_scope)
v.processElement(element)
v.assignParent(self)
v.finalizeObject()
#
# Assume that we are supposed to add this to a list of items
# if this fails then perhaps this is an attribute we are supposed to
# set
try:
add_to.append(v)
except AttributeError:
setattr(self, add_to, v)
#
log.info("Added new %s to %s" % (obj_class, self.asString()))
def isAFunction(self, name):
"""Check if the name is a function or not
We traverse up through the nested namespaces until someone knows
the name and then see if they are a function.
"""
if name in self.intrinsic_functions:
return 1
try:
return self.checkIfFunction(name)
except UnresolvableName:
try:
return self.parent.isAFunction(name)
except (AttributeError, UnresolvableName):
return 0 # Nobody knew the name so we can't know if it is or not
def processElement(self, element):
"""Process our tree"""
handler = self.getHandler(element)
if handler:
handler(element)
else:
if element.elements:
for subelement in element.elements:
self.processElement(subelement)
else:
log.info("Unhandled element '%s' from %s\n%s" %
(element.name, self, element.text))
def registerAsGlobal(self):
"""Register ourselves as a global object
We try to add ourselves to our parents "global_objects" table. This may fail
if we are not owned by anything that has a global_obects table, as would be
the case for converting a simple block of text.
"""
try:
global_objects = self.getParentProperty("global_objects")
except NestingError:
log.warn(
"Tried to register global object but there was no suitable object table")
else:
global_objects[self.identifier] = self
log.info("Registered a new global object: '%s'" % self)
def registerImportRequired(self, modulename):
"""Register a need to import a certain module
When we need to use a variable from another module we need to tell our
module-like containner to add an 'import' statement. So we search for
such a container and try to add the module name to the import list.
It is possible (but unlikely) that we need the import but we are not in
a container. If this happens we just warning and carry on.
"""
try:
module_imports = self.getParentProperty("module_imports")
except NestingError:
log.warn(
"Tried to request a module import (%s)"
" but couldn't find a suitable container" %
modulename)
else:
if modulename not in module_imports:
module_imports.append(modulename)
log.info("Registered a new module import: '%s'" % modulename)
def renderAsCode(self, indent=0):
"""Render this element as code"""
return self.getIndent(indent) + "# Unrendered object %s\n" % (self.asString(), )
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name"""
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def resolveName(self, name, rendering_locals=None, requestedby=None):
"""Convert a local name to a fully resolved name
We traverse up through the nested namespaces until someone knows
what to do with the name. If nobody knows then we know if must be
a local so it keeps the same name.
"""
if rendering_locals is None:
rendering_locals = self.getParentProperty("rendering_locals")
if not requestedby:
requestedby = self
try:
return self.resolveLocalName(name, rendering_locals, requestedby=requestedby)
except UnresolvableName:
try:
return self.parent.resolveName(
name, rendering_locals, requestedby=requestedby)
except AttributeError:
return name # Nobody knew the name so it must be local
def searchParentProperty(self, name):
"""Search for any ancestor who has the named parameter set to true
Stop searching if someone has the property set to StopSearch
"""
try:
if getattr(self, name) == StopSearch:
return 0
elif getattr(self, name):
return 1
except AttributeError:
pass
try:
parent = self.parent
return parent.searchParentProperty(name)
except AttributeError:
return 0
def handle_scope(self, element):
"""Handle a scope definition"""
self.local_default_scope = element.text
log.info("Changed default scope to %s" % self.local_default_scope)
def handle_line_end(self, element):
"""Handle the end of a line"""
self.local_default_scope = self.default_scope
class VBConsumer(VBNamespace):
"""Consume and store elements"""
def processElement(self, element):
"""Eat this element"""
self.element = element
log.info("Consumed element: %s" % element)
class VBUnrendered(VBConsumer):
"""Represents an unrendered statement"""
would_end_docstring = 0
def renderAsCode(self, indent):
"""Render the unrendrable!"""
if self.checkOptionYesNo("General", "WarnAboutUnrenderedCode") == "Yes":
return self.getWarning(
"UntranslatedCode",
self.element.text.replace("\n", "\\n"), indent, crlf=1)
else:
return ""
class VBMessage(VBUnrendered):
"""Allows a message to be placed in the python output"""
def __init__(self, scope="Private", message="No message", messagetype="Unknown"):
"""Initialise the message"""
super(VBMessage, self).__init__(scope)
self.message = message
self.messagetype = messagetype
def renderAsCode(self, indent=0):
"""Render the message"""
return self.getWarning(self.messagetype,
self.message, indent, crlf=1)
class VBMissingArgument(VBConsumer):
"""Represents an missing argument"""
def renderAsCode(self, indent=0):
"""Render the unrendrable!"""
return "VBMissingArgument"
class VBCodeBlock(VBNamespace):
"""A block of VB code"""
def __init__(self, scope="Private"):
"""Initialize the block"""
super(VBCodeBlock, self).__init__()
self.blocks = []
self.auto_class_handlers.update({
"assignment_statement": (VBAssignment, self.blocks),
"lset_statement": (VBLSet, self.blocks),
"rset_statement": (VBRSet, self.blocks),
"set_statement": (VBSet, self.blocks),
"comment_body": (VBComment, self.blocks),
"vb2py_directive": (VB2PYDirective, self.blocks),
"if_statement": (VBIf, self.blocks),
"inline_if_statement": (VBInlineIf, self.blocks),
"select_statement": (VBSelect, self.blocks),
"exit_statement": (VBExitStatement, self.blocks),
"while_statement": (VBWhile, self.blocks),
"do_statement": (VBDo, self.blocks),
"redim_statement": (VBReDim, self.blocks),
"explicit_call_statement": (VBExplicitCall, self.blocks),
"implicit_call_statement": (VBCall, self.blocks),
"inline_implicit_call": (VBCall, self.blocks),
"label_statement": (VBLabel, self.blocks),
"with_statement": (VBWith, self.blocks),
"end_statement": (VBEnd, self.blocks),
"for_statement": (VBFor, self.blocks),
"inline_for_statement": (VBFor, self.blocks),
"for_each_statement": (VBForEach, self.blocks),
"open_statement": (VBOpen, self.blocks),
"close_statement": (VBClose, self.blocks),
"input_statement": (VBInput, self.blocks),
"print_statement": (VBPrint, self.blocks),
"line_input_statement": (VBLineInput, self.blocks),
"seek_statement": (VBSeek, self.blocks),
"name_statement": (VBName, self.blocks),
"attribute_statement": (VBUnrendered, self.blocks),
"resume_statement": (VBUnrendered, self.blocks),
"goto_statement": (VBUnrendered, self.blocks),
"on_statement": (VBUnrendered, self.blocks),
"external_declaration": (VBUnrendered, self.blocks),
"get_statement": (VBUnrendered, self.blocks),
"put_statement": (VBUnrendered, self.blocks),
"option_statement": (VBUnrendered, self.blocks),
"class_header_block": (VBUnrenderedBlock, self.blocks),
"parser_failure": (VBParserFailure, self.blocks),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Watch out for the block not containing any statements (could be all
# comments!)
if not self.containsStatements():
self.blocks.append(VBPass())
#
return "".join([block.renderAsCode(indent) for block in self.blocks])
class VBUnrenderedBlock(VBCodeBlock):
"""Represents an unrendered block"""
would_end_docstring = 0
def renderAsCode(self, indent):
"""Render the unrendrable!"""
return ""
class VBOptionalCodeBlock(VBCodeBlock):
"""A block of VB code which can be empty and still sytactically correct"""
def containsStatements(self, indent=0):
"""Return true if this block contains statements
We always return 1 here because it doesn't matter if we contain statements of not
"""
return 1
class VBVariable(VBNamespace):
"""Handles a VB Variable"""
auto_handlers = [
"scope",
"type",
"string_size_indicator",
"value",
"identifier",
"optional",
"new_keyword",
"preserve_keyword",
"implicit_object",
]
skip_handlers = [
"const_statement",
]
def __init__(self, scope="Private"):
"""Initialize the variable"""
super(VBVariable, self).__init__(scope)
self.identifier = None
self.scope = scope
self.type = "Variant"
self.size_definitions = []
self.value = None
self.optional = None
self.expression = VBMissingArgument()
self.new_keyword = None
self.preserve_keyword = None
self.string_size_indicator = None
self.object = None
self.implicit_object = None
self.unsized_definition = None
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"size": (VBSizeDefinition, self.size_definitions),
"size_range": (VBSizeDefinition, self.size_definitions),
"unsized_definition": (VBConsumer, "unsized_definition"),
}
def finalizeObject(self):
"""We can use this opportunity to now determine if we are a global"""
if self.amGlobal(self.scope):
self.registerAsGlobal()
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.optional:
return "%s=%s" % (self.identifier, self.expression.renderAsCode())
else:
return self.identifier
class VBSizeDefinition(VBNamespace):
"""Handles a VB Variable size definition"""
def __init__(self, scope="Private"):
"""Initialize the size definition"""
super(VBSizeDefinition, self).__init__(scope)
#
self.expression = None
self.sizes = []
self.size_ranges = []
#
self.auto_class_handlers = {
"size": (VBExpression, self.sizes),
"size_range": (VBSizeDefinition, self.size_ranges),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.sizes:
return ", ".join([item.renderAsCode() for item in self.sizes])
else:
return "(%s)" % ", ".join([item.renderAsCode() for item in self.size_ranges])
class VBObject(VBNamespace):
"""Handles a VB Object"""
am_on_lhs = 0 # Set to 1 if the object is on the LHS of an assignment
def __init__(self, scope="Private"):
"""Initialize the object"""
super(VBObject, self).__init__(scope)
self.primary = None
self.modifiers = []
self.implicit_object = None
self.auto_class_handlers.update({
"primary": (VBConsumer, "primary"),
"attribute": (VBAttribute, self.modifiers),
"parameter_list": (VBParameterList, self.modifiers),
})
self.auto_handlers = (
"implicit_object",
)
def renderAsCode(self, indent=0):
"""Render this subroutine"""
return self._renderPartialObject(indent)
def finalizeObject(self):
"""Finalize the object
Check for any type markers.
"""
for obj in [self.primary] + self.modifiers:
try:
ending = obj.element.text[-1:] or " "
except AttributeError:
pass # It isn't a consumer so we can't check it
else:
if ending in "#$%&":
log.info(
"Removed type identifier from '%s'" % obj.element.text)
obj.element.text = obj.element.text[:-1]
def asString(self):
"""Return a string representation"""
if self.implicit_object:
log.info("Ooops an implicit object in definition")
ret = [self.primary.element.text] + \
[item.asString() for item in self.modifiers]
return ".".join(ret)
def fnPart(self):
"""Return the function part of this object (ie without any parameters"""
return self._renderPartialObject(indent=0, modifier=VBAttribute)
def _renderPartialObject(self, indent=0, modifier=None):
"""Render this object but only including modifiers of a certain class"""
#
# Check for implicit object and if we are one then find the nearest
# "With"
if self.implicit_object:
implicit_name = "%s." % self.getParentProperty("with_object")
else:
implicit_name = ""
#
# For the LHS objects we need to look for the local name for Function
# return arguments
if self.am_on_lhs:
obj_name = self.getLocalNameFor(self.primary.element.text)
else:
obj_name = self.primary.element.text
#
resolved_name = self.resolveName(obj_name)
#
# Check if this looks like a function
# TODO: This isn't very rigorous
if not self.modifiers:
if self.isAFunction(obj_name):
resolved_name += "()"
#
if modifier is None:
valid_modifiers = self.modifiers
else:
valid_modifiers = self.filterListByClass(self.modifiers, modifier)
#
return "%s%s%s" % (implicit_name,
resolved_name,
"".join([item.renderAsCode() for item in valid_modifiers]))
class VBLHSObject(VBObject):
"""Handles a VB Object appearing on the LHS of an assignment"""
am_on_lhs = 1 # Set to 1 if the object is on the LHS of an assignment
class VBAttribute(VBConsumer):
"""An attribute of an object"""
def renderAsCode(self, indent=0):
"""Render this attribute"""
return ".%s" % self.element.text
class VBParameterList(VBCodeBlock):
"""An parameter list for an object"""
def __init__(self, scope="Private"):
"""Initialize the object"""
super(VBParameterList, self).__init__(scope)
self.expressions = []
self.auto_class_handlers.update({
"expression": (VBExpression, self.expressions),
"missing_positional": (VBMissingPositional, self.expressions),
})
def renderAsCode(self, indent=0):
"""Render this attribute"""
#
# Check if we should replace () with [] - needed on the LHS of an
# assignment but not elsewhere since __call__ is mapped to __getitem__
# for array types
if self.searchParentProperty("brackets_are_indexes"):
fmt = "[%s]"
# Prevents double accounting in a(b(5)) expressions where b is a function
self.brackets_are_indexes = StopSearch
else:
fmt = "(%s)"
#
# Construct the list of parameters - this is harder than it looks because
# for any missing positional parameters we have to do some introspection
# to dig out the default value
param_list = []
for idx, element in zip(xrange(1000), self.expressions):
# Needed so that the element can get its default
element.parameter_index_position = idx
param_list.append(element.renderAsCode())
content = ", ".join(param_list)
return fmt % content
class VBMissingPositional(VBCodeBlock):
"""A positional argument that is missing from the argument list"""
def __init__(self, scope="Private"):
"""Initialize the object"""
super(VBMissingPositional, self).__init__(scope)
def renderAsCode(self, indent=0):
"""Render this attribute"""
#
# The parameter_index_position attribute will be set
# by our parent. We also need to look for the function name
# which depends on our context
try:
function_name = self.findParentOfClass(VBObject).fnPart()
except NestingError:
try:
function_name = self.getParentProperty("object").fnPart()
except NestingError:
raise UnresolvableName(
"Could not locate function name when supplying missing argument")
#
return "VBGetMissingArgument(%s, %d)" % (
function_name,
self.parameter_index_position)
class VBExpression(VBNamespace):
"""Represents an comment"""
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBExpression, self).__init__(scope)
self.parts = []
self.auto_class_handlers.update({
"sign": (VBExpressionPart, self.parts),
"pre_not": (VBExpressionPart, self.parts),
"par_expression": (VBParExpression, self.parts),
"point": (VBPoint, self.parts),
"operation": (VBOperation, self.parts),
"pre_named_argument": (VBExpressionPart, self.parts),
"pre_typeof": (VBUnrendered, self.parts),
})
# operators who requested regrouping (eg 'a Like b' -> 'Like(a,b)')
self.operator_groupings = []
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForOperatorGroupings()
return " ".join([item.renderAsCode(indent) for item in self.parts])
def checkForOperatorGroupings(self):
"""Look for operators who requested regrouping
Some operator cannot be translated in place (eg Like) since they must
be converted to functions. This means that we have to re-order the
parts of the expression.
"""
for item in self.operator_groupings:
idx = self.parts.index(item)
rh, lh = self.parts.pop(idx + 1), self.parts.pop(idx - 1)
item.rh, item.lh = rh, lh
class VBParExpression(VBNamespace):
"""A block in an expression"""
auto_handlers = [
"l_bracket",
"r_bracket",
]
def __init__(self, scope="Private"):
"""Initialize"""
super(VBParExpression, self).__init__(scope)
self.parts = []
self.named_argument = ""
self.auto_class_handlers.update({
"integer": (VBExpressionPart, self.parts),
"hexinteger": (VBExpressionPart, self.parts),
"stringliteral": (VBStringLiteral, self.parts),
"dateliteral": (VBDateLiteral, self.parts),
"floatnumber": (VBExpressionPart, self.parts),
"longinteger": (VBExpressionPart, self.parts),
"object": (VBObject, self.parts),
"par_expression": (VBParExpression, self.parts),
"operation": (VBOperation, self.parts),
"named_argument": (VBConsumer, "named_argument"),
"pre_not": (VBExpressionPart, self.parts),
"pre_typeof": (VBUnrendered, self.parts),
"point": (VBPoint, self.parts),
"sign": (VBExpressionPart, self.parts),
})
self.l_bracket = self.r_bracket = ""
# operators who requested regrouping (eg 'a Like b' -> 'Like(a,b)')
self.operator_groupings = []
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForOperatorGroupings()
if self.named_argument:
arg = "%s=" % self.named_argument.element.text
else:
arg = ""
ascode = " ".join([item.renderAsCode(indent) for item in self.parts])
return "%s%s%s%s" % (arg, self.l_bracket, ascode, self.r_bracket)
def checkForOperatorGroupings(self):
"""Look for operators who requested regrouping
Some operator cannot be translated in place (eg Like) since they must
be converted to functions. This means that we have to re-order the
parts of the expression.
"""
# Destructively scan the list so we don't try this a second time later!
while self.operator_groupings:
item = self.operator_groupings.pop()
idx = self.parts.index(item)
rh, lh = self.parts.pop(idx + 1), self.parts.pop(idx - 1)
item.rh, item.lh = rh, lh
class VBPoint(VBExpression):
"""A block in an expression"""
skip_handlers = [
"point",
]
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "(%s)" % ", ".join([item.renderAsCode() for item in self.parts])
class VBExpressionPart(VBConsumer):
"""Part of an expression"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.element.name == "object":
#
# Check for implicit object (inside a with)
if self.element.text.startswith("."):
return "%s%s" % (self.getParentProperty("with_object"),
self.element.text)
elif self.element.text.lower() == "like":
return "Like(%s, %s)" % (self.lh.renderAsCode(), self.rh.renderAsCode())
elif self.element.name == "pre_named_argument":
return "%s=" % (self.element.text.split(":=")[0],)
elif self.element.name == "pre_not":
self.element.text = "not"
elif self.element.name == "hexinteger":
if self.element.text.endswith("&"):
return "0x%s" % self.element.text[2:-1]
else:
return "0x%s" % self.element.text[2:]
return self.element.text
def finalizeObject(self):
"""Finalize the object
Check for any type markers.
"""
ending = self.element.text[-1:] or " "
if ending in "#$%&":
log.info("Removed type identifier from '%s'" % self.element.text)
self.element.text = self.element.text[:-1]
class VBOperation(VBExpressionPart):
"""An operation in an expression"""
translation = {
"&": "+",
"^": "**",
"=": "==",
"\\": "//", # TODO: Is this right?
"is": "is",
"or": "or",
"and": "and", # TODO: are there any more?
"xor": "^",
"mod": "%",
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.element.text.lower() in self.translation:
return self.translation[self.element.text.lower()]
else:
return super(VBOperation, self).renderAsCode(indent)
def finalizeObject(self):
"""Finalize the object"""
if self.element.text.lower() in ("like", ):
log.info("Found regrouping operator, reversing order of operands")
self.parent.operator_groupings.append(self)
class VBStringLiteral(VBExpressionPart):
"""Represents a string literal"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Remember to replace the double quotes with single ones
body = self.element.text[1:-1]
body = body.replace('""', '"')
#
if self.checkOptionYesNo("General", "AlwaysUseRawStringLiterals") == "Yes":
body = body.replace("'", "\'")
return "r'%s'" % body
else:
body = body.replace('\\', '\\\\')
body = body.replace("'", "\\'")
return "'%s'" % body
class VBDateLiteral(VBParExpression):
"""Represents a date literal"""
skip_handlers = [
"dateliteral",
]
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "MakeDate(%s)" % ", ".join([item.renderAsCode() for item in self.parts])
class VBProject(VBNamespace):
"""Handles a VB Project"""
def __init__(self, scope="Private"):
"""Initialize the module"""
super(VBProject, self).__init__(scope)
self.global_objects = {} # This is where global variables live
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local modules to see if they have a matching global variable
and if they do then we can construct the local name from it.
"""
# import pdb; pdb.set_trace()
if name in self.global_objects:
# Found as another module's public var - so mark it up and request
# an import
modulename = self.global_objects[
name].getParentProperty("modulename")
if requestedby:
requestedby.registerImportRequired(modulename)
return "%s.%s" % (modulename,
name)
else:
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
class VBModule(VBCodeBlock):
"""Handles a VB Module"""
skip_handlers = [
]
# If this is 1 then local functions will become methods
convert_functions_to_methods = 0
indent_all_blocks = 0
# Can be used to dissallow new style classes
allow_new_style_class = 1
# Public objects defined here will not be globals
public_is_global = 0
# Put methods and attribute names in here which always need to be public
# like Class_Initialize and Class_Terminate for classes
always_public_attributes = []
def __init__(
self, scope="Private", modulename="unknownmodule", classname="MyClass",
superclasses=None):
"""Initialize the module"""
super(VBModule, self).__init__(scope)
self.auto_class_handlers.update({
"sub_definition": (VBSubroutine, self.locals),
"fn_definition": (VBFunction, self.locals),
"property_definition": (VBProperty, self.locals),
"enumeration_definition": (VBEnum, self.locals),
})
self.local_names = []
self.modulename = modulename
self.classname = classname
self.superclasses = superclasses or []
#
self.rendering_locals = 0
self.docstrings = []
self.module_imports = [] # The additional modules we need to import
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.setCustomModulesAsGlobals()
if self.checkOptionYesNo("General", "TryToExtractDocStrings") == "Yes":
self.extractDocStrings()
#
# Pre-render the following before the import statments in case any
# of them ask us to do additional imports
header = self.renderModuleHeader(indent)
docstrings = self.renderDocStrings(indent)
declarations = self.renderDeclarations(indent + self.indent_all_blocks)
blocks = self.renderBlocks(indent + self.indent_all_blocks)
#
return "%s\n\n%s%s\n%s\n%s" % (
self.importStatements(indent),
header,
docstrings,
declarations,
blocks)
def importStatements(self, indent=0):
"""Render the standard import statements for this block"""
# Leading [""] gives a newline
other = [""] + ["import %s" % item for item in self.module_imports]
if self.checkOptionYesNo("General", "IncludeDebugCode") == "Yes":
debug = "\nfrom vb2py.vbdebug import *"
else:
debug = ""
return "from vb2py.vbfunctions import *%s%s" % (debug, "\n".join(other))
def renderDeclarations(self, indent):
"""Render the declarations as code
Most of the rendering is delegated to the individual declaration
classes. However, we cannot do this with properties since they need to
be grouped into a single assignment. We do the grouping here and
delegate the rendering to them.
"""
#
ret = []
self.rendering_locals = 1 # Used for switching behaviour (eg adding 'self')
#
# Handle non-properties and group properties together
properties = {}
for declaration in self.locals:
# Check for property
if isinstance(declaration, VBProperty):
log.info("Collected property '%s', decorator '%s'" % (
declaration.identifier, declaration.property_decorator_type))
decorators = properties.setdefault(declaration.identifier, {})
decorators[declaration.property_decorator_type] = declaration
else:
ret.append(declaration.renderAsCode(indent))
#
# Now render all the properties
for property in properties:
if properties[property]:
ret.append(properties[property].values()[0].renderPropertyGroup(
indent, property, **properties[property]))
#
self.rendering_locals = 0
#
return "".join(ret)
def renderBlocks(self, indent=0):
"""Render this module's blocks"""
return "".join([block.renderAsCode(indent) for block in self.blocks])
def extractDocStrings(self, indent=0):
"""Extract doc strings from this module
We look for comments in the body of the module and take all the ones before
anything that isn't a comment.
"""
for line in self.blocks[:]:
if isinstance(line, VBComment):
self.docstrings.append(line)
self.blocks.remove(line)
elif line.would_end_docstring:
break
def renderDocStrings(self, indent=0):
"""Render this module's docstrings"""
local_indent = indent + self.indent_all_blocks
if not self.docstrings:
return ""
elif len(self.docstrings) == 1:
return '%s"""%s"""\n' % (
self.getIndent(local_indent),
self.docstrings[0].asString())
else:
joiner = "\n%s" % self.getIndent(local_indent)
body_lines = [item.asString() for item in self.docstrings[1:]]
return '%s"""%s\n%s%s\n%s"""\n' % (
self.getIndent(local_indent),
self.docstrings[0].asString(),
self.getIndent(local_indent),
joiner.join(body_lines),
self.getIndent(local_indent))
def renderModuleHeader(self, indent=0):
"""Render a header for the module"""
return ""
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local variables to see if we know the name. If we do then we
just report it.
"""
if name in self.local_names:
return name
for obj in self.locals:
if obj.identifier == name:
return self.enforcePrivateName(obj)
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def enforcePrivateName(self, obj):
"""Enforce the privacy for this object name if required"""
if obj.scope == "Private" and self.checkOptionYesNo("General", "RespectPrivateStatus") == "Yes" \
and obj.identifier not in self.always_public_attributes:
return "%s%s" % (Config["General", "PrivateDataPrefix"], obj.identifier)
else:
return obj.identifier
def setCustomModulesAsGlobals(self):
"""Set all the custom import modules as global modules
If the user has specified custom imports (eg Comctllib) then
we need to import these as globals in the project. We force
them into the project (if there is one) global object
table so that they can be resolved at run time.
"""
#
# Get global object table if there is one
try:
global_objects = self.getParentProperty("global_objects")
except NestingError:
return
#
log.info("Processing custom modules now")
custom_modules = Config.getItemNames("CustomIncludes")
#
# Do for all custom modules
for module_id in custom_modules:
#
# Import this module
module_name = Config["CustomIncludes", module_id]
log.info("Processing custom module %s (%s)" %
(module_id, module_name))
module = __import__("vb2py.custom.%s" %
module_name, globals(), locals(), ["custom"])
#
# Get a container to store the values in
vbmodule = VBCodeModule(modulename="vb2py.custom.%s" % module_name)
#
# Now set all items in the module to be global (if they don't seem to be
# hidden)
for item_name in dir(module):
if not item_name.startswith("_"):
log.info("Registered new custom global '%s'" % item_name)
global_objects[item_name] = vbmodule
class VBClassModule(VBModule):
"""Handles a VB Class"""
# If this is 1 then local functions will become methods
convert_functions_to_methods = 1
indent_all_blocks = 1
# Put methods and attribute names in here which always need to be public
# like Class_Initialize and Class_Terminate for classes
always_public_attributes = ["Class_Initialize", "Class_Terminate"]
def __init__(self, *args, **kw):
"""Initialize the class module"""
super(VBClassModule, self).__init__(*args, **kw)
self.name_substitution = {"Me": "self"}
def renderModuleHeader(self, indent=0):
"""Render this element as code"""
supers = self.superclasses[:]
if self.checkOptionYesNo("Classes", "UseNewStyleClasses") == "Yes" and \
self.allow_new_style_class:
supers.insert(0, "Object")
if supers:
return "class %s(%s):\n" % (self.classname, ", ".join(supers))
else:
return "class %s:\n" % self.classname
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local variables to see if we know the name. If we do then we
need to add a self.
"""
# Don't do anything for locals
if rendering_locals:
prefix = ""
else:
prefix = "self."
#
if name in self.local_names:
return "%s%s" % (prefix, name)
for obj in self.locals:
if obj.identifier == name:
return "%s%s" % (prefix, self.enforcePrivateName(obj))
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def assignParent(self, parent):
"""Set our parent"""
super(VBClassModule, self).assignParent(parent)
self.identifier = self.classname
self.registerAsGlobal()
class VBCodeModule(VBModule):
"""Handles a VB Code module"""
public_is_global = 1 # Public objects defined here will be globals
def enforcePrivateName(self, obj):
"""Enforce the privacy for this object name if required
In a code module this is not required. Private variables and
definitions in a code module are not really hidden in the same way as
in a class module. They are accessible still. The main thing is that
they are not global.
"""
return obj.identifier
class VBFormModule(VBClassModule):
"""Handles a VB Form module"""
# If this is 1 then local functions will become methods
convert_functions_to_methods = 1
class VBCOMExternalModule(VBModule):
"""Handles external COM references"""
def __init__(self, *args, **kw):
"""Initialize the COM module
We always need win32com.client to be imported
"""
super(VBCOMExternalModule, self).__init__(*args, **kw)
self.module_imports.append("win32com.client")
docstring = VBRenderDirect(
"Automatically generated file based on project references")
self.docstrings.append(docstring)
def renderDeclarations(self, indent):
"""Render all the declarations
We have a list of libraries and objects in our names attribute
so we create a series of dummy classes with callable
attributes which return COM objects.
"""
library_code = []
for library, members in self.names.iteritems():
member_code = []
for member in members:
member_code.append(
' def %s(self):\n'
' """Create the %s.%s object"""\n'
' return win32com.client.Dispatch("%s.%s")\n'
'\n' % (member, library, member, library, member))
library_code.append('class _%s:\n'
' """COM Library"""\n\n'
'%s'
'%s = _%s()\n' % (
library,
''.join(member_code),
library,
library))
return '\n\n'.join(library_code)
class VBVariableDefinition(VBVariable):
"""Handles a VB Dim of a Variable"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
local_name = self.resolveName(self.identifier)
#
# TODO: Can't handle implicit objects yet
if self.implicit_object:
warning = self.getWarning(
"UnhandledDefinition",
"Dim of implicit 'With' object (%s) is not supported" % local_name,
indent=indent, crlf=1)
else:
warning = ""
#
if self.string_size_indicator:
size = self.string_size_indicator
self.type = "FixedString"
else:
size = ""
#
# Make sure we resolve the type properly
local_type = self.resolveName(self.type)
#
if self.unsized_definition: # This is a 'Dim a()' statement
return "%s%s%s = vbObjectInitialize(objtype=%s)\n" % (
warning,
self.getIndent(indent),
local_name,
local_type)
elif self.size_definitions: # There is a size 'Dim a(10)'
if self.preserve_keyword:
preserve = ", %s" % (local_name, )
else:
preserve = ""
if size:
size = ", stringsize=" + size
rendered_size_definitions = [
item.renderAsCode() for item in self.size_definitions]
return "%s%s%s = vbObjectInitialize((%s,), %s%s%s)\n" % (
warning,
self.getIndent(indent),
local_name,
", ".join(rendered_size_definitions),
local_type,
preserve,
size)
elif self.new_keyword: # It is an 'Dim a as new ...'
return "%s%s%s = %s(%s)\n" % (
warning,
self.getIndent(indent),
local_name,
local_type,
size)
else: # This is just 'Dim a as frob'
return "%s%s%s = %s(%s)\n" % (
warning,
self.getIndent(indent),
local_name,
local_type,
size)
def finalizeObject(self):
"""Finalize the object
Check for any type markers.
"""
ending = self.identifier[-1:] or " "
if ending in "#$%&":
log.info("Removed type identifier from '%s'" % self.identifier)
self.identifier = self.identifier[:-1]
class VBConstant(VBVariableDefinition):
"""Represents a constant in VB"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
# local_name = self.getLocalNameFor(self.identifier)
local_name = self.resolveName(self.identifier)
return "%s%s = %s\n" % (
self.getIndent(indent),
local_name,
self.expression.renderAsCode())
class VBReDim(VBCodeBlock):
"""Represents a Redim statement"""
def __init__(self, scope="Private"):
"""Initialize the Redim"""
super(VBReDim, self).__init__(scope)
#
self.variables = []
self.preserve = None
#
self.auto_class_handlers = {
"object_definition": (VBVariableDefinition, self.variables),
"preserve_keyword": (VBConsumer, "preserve"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
for var in self.variables:
var.preserve_keyword = self.preserve
return "".join([var.renderAsCode(indent) for var in self.variables])
class VBAssignment(VBNamespace):
"""An assignment statement"""
auto_handlers = [
]
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBAssignment, self).__init__(scope)
self.parts = []
self.object = None
self.auto_class_handlers.update({
"expression": (VBExpression, self.parts),
"object": (VBLHSObject, "object")
})
def asString(self):
"""Convert to a nice representation"""
return "%s = %s" % (self.object, self.parts)
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForModuleGlobals()
self.object.brackets_are_indexes = 1 # Convert brackets on LHS to []
return "%s%s = %s\n" % (self.getIndent(indent),
self.object.renderAsCode(),
self.parts[0].renderAsCode(indent))
def checkForModuleGlobals(self):
"""Check if this assignment requires a global statement
We can use this opportunity to now check if we need to append a
'global' statement to our container. If we are in a CodeModule an
assignment and the LHS of the assignment is a module level variable
which is not locally shadowed then we need a global.
So the procedure is,
- look for our parent who is a subroutine type
- if we don't have one then skip out
- see if this parent knows us, if so then we are a subroutine local
- also see if we are the subroutine name
- look for our parent who is a module type
- see if this parent knows us, if so then we are a module local
- if we are then tell our subroutine parent that we need a global statement
"""
log.info("Checking whether to use a global statement for '%s'" %
self.object.primary.element.text)
# import pdb; pdb.set_trace()
try:
enclosing_sub = self.findParentOfClass(VBSubroutine)
except NestingError:
return # We are not in a subroutine
log.info("Found sub")
try:
name = enclosing_sub.resolveLocalName(
self.object.primary.element.text)
except UnresolvableName:
if enclosing_sub.identifier == self.object.primary.element.text:
return
else:
return # We are a subroutine local
log.info("Am not local")
try:
enclosing_module = self.findParentOfClass(VBCodeModule)
except NestingError:
return # We are not in a module
log.info("Found code module")
try:
name = enclosing_module.resolveLocalName(
self.object.primary.element.text)
except UnresolvableName:
return # We are not known at the module level
# If we get to here then we are a module level local!
enclosing_sub.globals_required[
self.resolveName(self.object.primary.element.text)] = 1
log.info("Added a module level global: '%s'" %
self.resolveName(self.object.primary.element.text))
class VBSpecialAssignment(VBAssignment):
"""A special assignment eg LSet, RSet where the assignment ends up as a
function call"""
fn_name = None
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForModuleGlobals()
self.object.brackets_are_indexes = 1 # Convert brackets on LHS to []
return "%s%s = %s(%s, %s)\n" % (self.getIndent(indent),
self.object.renderAsCode(),
self.fn_name,
self.object.renderAsCode(),
self.parts[0].renderAsCode(indent))
class VBLSet(VBSpecialAssignment):
"""An LSet statement"""
fn_name = "LSet"
class VBRSet(VBSpecialAssignment):
"""An RSet statement"""
fn_name = "RSet"
class VBSet(VBAssignment):
"""A set statement"""
auto_handlers = [
"new_keyword",
]
new_keyword = ""
def renderAsCode(self, indent=0):
"""Render this element as code"""
if not self.new_keyword:
return super(VBSet, self).renderAsCode(indent)
else:
return "%s%s = %s()\n" % (
self.getIndent(indent),
self.object.renderAsCode(),
self.parts[0].renderAsCode(indent))
class VBEnd(VBAssignment):
"""An end statement"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%ssys.exit(0)\n" % self.getIndent(indent)
class VBCall(VBCodeBlock):
"""A call statement"""
auto_handlers = [
]
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBCall, self).__init__(scope)
self.parameters = []
self.object = None
self.auto_class_handlers = ({
"expression": (VBParExpression, self.parameters),
"missing_positional": (VBMissingPositional, self.parameters),
"object": (VBObject, "object")
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.parameters:
#
# Construct the list of parameters - this is harder than it looks because
# for any missing positional parameters we have to do some introspection
# to dig out the default value
param_list = []
for idx, element in zip(xrange(1000), self.parameters):
# Needed so that the element can get its default
element.parameter_index_position = idx
param_list.append(element.renderAsCode())
params = ", ".join(param_list)
else:
params = ""
#
self.object.am_on_lhs = 1
#
return "%s%s(%s)\n" % (self.getIndent(indent),
self.object.renderAsCode(),
params)
class VBExplicitCall(VBCodeBlock):
"""A call statement on a single line with parenthesis
This is illegal in VB but can be found in VBSCript
"""
auto_handlers = [
]
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBExplicitCall, self).__init__(scope)
self.parameters = []
self.object = None
self.auto_class_handlers = ({
"expression": (VBParExpression, self.parameters),
"missing_positional": (VBMissingPositional, self.parameters),
"qualified_object": (VBObject, "object")
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.parameters:
#
# Something has gone wrong here because there shouldn't be any parameters
# in the call. These should be encapsulated in the object.
raise VBParserError(
'Unexpected parameters (%s) in explicit call' % self.parameters)
#
self.object.am_on_lhs = 1
#
return "%s%s\n" % (self.getIndent(indent),
self.object.renderAsCode())
class VBExitStatement(VBConsumer):
"""Represents an exit statement"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
indenter = self.getIndent(indent)
rv_name = Config["Functions", "ReturnVariableName"]
if self.element.text == "Exit Function":
return "%sreturn %s\n" % (indenter, rv_name)
elif self.element.text == "Exit Sub":
return "%sreturn\n" % indenter
elif self.element.text == "Exit Property":
if self.getParentProperty("property_decorator_type") == "Get":
return "%sreturn %s\n" % (indenter, rv_name)
else:
return "%sreturn\n" % indenter
else:
return "%sbreak\n" % indenter
class VBComment(VBConsumer):
"""Represents an comment"""
#
# Used to indicate if this is a valid statement
not_a_statement = 0
def renderAsCode(self, indent=0):
"""Render this element as code"""
return self.getIndent(indent) + "#%s\n" % self.element.text
def asString(self):
"""Render this element as a string"""
return self.element.text
class VBLabel(VBUnrendered):
"""Represents a label"""
def renderAsCode(self, indent):
"""Render the label"""
if Config["Labels", "IgnoreLabels"] == "Yes":
return ""
else:
return super(VBLabel, self).renderAsCode(indent)
class VBOpen(VBCodeBlock):
"""Represents an open statement"""
def __init__(self, scope="Private"):
"""Initialize the open"""
super(VBOpen, self).__init__(scope)
#
self.filename = None
self.open_modes = []
self.channel = None
self.access_length = None
#
self.auto_class_handlers = ({
"filename": (VBParExpression, "filename"),
"open_mode": (VBConsumer, self.open_modes),
"channel": (VBParExpression, "channel"),
"access_length": (VBParExpression, "access_length"),
})
#
self.open_mode_lookup = {
"Input": "r",
"Output": "w",
"Append": "a",
"Binary": "b",
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
file_mode = ""
todo = []
for mode in self.open_modes:
m = mode.element.text.strip()
try:
file_mode += self.open_mode_lookup[m.strip()]
except KeyError:
todo.append("'%s'" % m.strip())
if self.access_length is not None:
todo.append("Access length is not supported (%s)" %
self.access_length.renderAsCode())
if todo:
todo_warning = self.getWarning(
"UnknownFileMode", ", ".join(todo), indent, crlf=1)
else:
todo_warning = ""
#
return "%s%sVBFiles.openFile(%s, %s, '%s')\n" % (
todo_warning,
self.getIndent(indent),
self.channel.renderAsCode(),
self.filename.renderAsCode(),
file_mode)
class VBClose(VBCodeBlock):
"""Represents a close statement"""
def __init__(self, scope="Private"):
"""Initialize the open"""
super(VBClose, self).__init__(scope)
#
self.channels = []
#
self.auto_class_handlers = ({
"expression": (VBParExpression, self.channels),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
if not self.channels:
return "%sVBFiles.closeFile()\n" % (
self.getIndent(indent))
else:
ret = []
for channel in self.channels:
ret.append("%sVBFiles.closeFile(%s)\n" % (
self.getIndent(indent),
channel.renderAsCode()))
return "".join(ret)
class VBSeek(VBCodeBlock):
"""Represents a seek statement"""
def __init__(self, scope="Private"):
"""Initialize the seek"""
super(VBSeek, self).__init__(scope)
#
self.expressions = []
#
self.auto_class_handlers = ({
"expression": (VBParExpression, self.expressions),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%sVBFiles.seekFile(%s, %s)\n" % (
self.getIndent(indent),
self.expressions[0].renderAsCode(),
self.expressions[1].renderAsCode(),)
class VBInput(VBCodeBlock):
"""Represents an input statement"""
input_type = "Input"
def __init__(self, scope="Private"):
"""Initialize the open"""
super(VBInput, self).__init__(scope)
#
self.channel = None
self.variables = []
#
self.auto_class_handlers = ({
"channel_id": (VBParExpression, "channel"),
"expression": (VBExpression, self.variables),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
# Make sure variables are converted as if they are on the LHS of an
# assignment
for var in self.variables:
var.brackets_are_indexes = 1
#
return "%s%s = VBFiles.get%s(%s, %d)\n" % (
self.getIndent(indent),
", ".join([var.renderAsCode() for var in self.variables]),
self.input_type,
self.channel.renderAsCode(),
len(self.variables))
class VBLineInput(VBInput):
"""Represents an input statement"""
input_type = "LineInput"
class VBPrint(VBCodeBlock):
"""Represents a print statement"""
def __init__(self, scope="Private"):
"""Initialize the print"""
super(VBPrint, self).__init__(scope)
#
self.channel = VBRenderDirect("None")
self.variables = []
self.hold_cr = None
#
self.auto_class_handlers = ({
"channel_id": (VBParExpression, "channel"),
"expression": (VBExpression, self.variables),
"print_separator": (VBPrintSeparator, self.variables),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
print_list = ", ".join([var.renderAsCode()
for var in self.variables if var.renderAsCode()])
if self.variables:
if self.variables[-1].renderAsCode() not in (None, "\t"):
print_list += ", '\\n'"
return "%sVBFiles.writeText(%s, %s)\n" % (
self.getIndent(indent),
self.channel.renderAsCode(),
print_list)
class VBPrintSeparator(VBConsumer):
"""Represents a print statement separator"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.element.text == ";":
return None
elif self.element.text == ",":
return '"\\t"'
else:
raise UnhandledStructureError(
"Unknown print separator '%s'" % self.element.text)
class VBName(VBCodeBlock):
"""Represents a name statement"""
def __init__(self, scope="Private"):
"""Initialize the print"""
super(VBName, self).__init__(scope)
#
self.channel = VBRenderDirect("None")
self.files = []
#
self.auto_class_handlers = ({
"expression": (VBExpression, self.files),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.registerImportRequired("os")
file_list = ", ".join([fle.renderAsCode() for fle in self.files])
return "%sName(%s)\n" % (
self.getIndent(indent),
file_list)
class VBUserType(VBCodeBlock):
"""Represents a select block"""
auto_handlers = [
]
select_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBUserType, self).__init__(scope)
#
self.variables = []
self.identifier = None
#
self.auto_class_handlers = {
"identifier": (VBConsumer, "identifier"),
"object_definition": (VBVariable, self.variables),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
vars = []
if not self.variables:
vars.append(VBPass().renderAsCode(indent + 2))
else:
for var in self.variables:
vars.append("%sself.%s = %s()" % (
self.getIndent(indent + 2),
var.identifier,
var.type))
#
return ("%sclass %s:\n"
"%sdef __init__(self):\n%s\n\n" % (
self.getIndent(indent),
self.identifier.element.text,
self.getIndent(indent + 1),
"\n".join(vars)))
class VBSubroutine(VBCodeBlock):
"""Represents a subroutine"""
public_is_global = 0 # Public objects defined here will not be globals
def __init__(self, scope="Private"):
"""Initialize the subroutine"""
super(VBSubroutine, self).__init__(scope)
self.identifier = None
self.scope = scope
self.block = VBPass()
self.parameters = []
self.globals_required = {}
# A list of objects required in a global statement
self.type = None
self.static = None
#
self.auto_class_handlers.update({
"formal_param": (VBVariable, self.parameters),
"block": (VBCodeBlock, "block"),
"type_definition": (VBUnrendered, "type"),
})
self.auto_handlers = [
"identifier",
"scope",
"static",
]
self.skip_handlers = [
"sub_definition",
]
self.rendering_locals = 0
def renderAsCode(self, indent=0):
"""Render this subroutine"""
code_block = self.block.renderAsCode(indent + 1)
locals = [declaration.renderAsCode(indent + 1)
for declaration in self.block.locals]
if self.static:
log.warn("Static function detected - static is not supported")
ret = "\n%sdef %s(%s):\n%s%s%s" % (
self.getIndent(indent),
self.getParentProperty("enforcePrivateName")(self),
self.renderParameters(),
self.renderGlobalStatement(indent + 1),
"\n".join(locals),
code_block)
return ret
def renderParameters(self):
"""Render the parameter list"""
params = [param.renderAsCode() for param in self.parameters]
if self.getParentProperty("convert_functions_to_methods"):
params.insert(0, "self")
return ", ".join(params)
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local variables and parameters to see if we know the
name. If we do then we return the original name.
"""
names = [obj.identifier for obj in self.block.locals + self.parameters]
if name in names:
return name
else:
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def renderGlobalStatement(self, indent=0):
"""Render the global statement if we need it"""
if self.globals_required:
return "%sglobal %s\n" % (self.getIndent(indent),
", ".join(self.globals_required.keys()))
else:
return ""
def assignParent(self, *args, **kw):
"""Assign our parent
We can use this opportunity to now determine if we are a global
"""
super(VBSubroutine, self).assignParent(*args, **kw)
#
# Check if we will be considered a global for the project
if hasattr(self, "parent"):
if self.parent.amGlobal(self.scope):
self.registerAsGlobal()
class VBFunction(VBSubroutine):
"""Represents a function"""
is_function = 1 # We need () if we are accessed directly
def renderAsCode(self, indent=0):
"""Render this subroutine"""
#
# Set a name conversion to capture the function name
# Assignments to this function name should go to the _ret parameter
return_var = Config["Functions", "ReturnVariableName"]
self.name_substitution[self.identifier] = return_var
#
if self.block:
block = self.block.renderAsCode(indent + 1)
else:
block = self.getIndent(indent + 1) + "pass\n"
#
locals = [declaration.renderAsCode(indent + 1)
for declaration in self.block.locals]
#
if Config["Functions", "PreInitializeReturnVariable"] == "Yes":
pre_init = "%s%s = None\n" % (
self.getIndent(indent + 1),
return_var)
else:
pre_init = ""
ret = "\n%sdef %s(%s):\n%s%s%s%s%sreturn %s\n" % (
self.getIndent(indent),
self.getParentProperty("enforcePrivateName")(self),
self.renderParameters(),
self.renderGlobalStatement(indent + 1),
pre_init,
"\n".join(locals),
block,
self.getIndent(indent + 1),
return_var)
return ret
class VBIf(VBCodeBlock):
"""Represents an if block"""
auto_handlers = [
]
skip_handlers = [
"if_statement",
]
def __init__(self, scope="Private"):
"""Initialize the If"""
super(VBIf, self).__init__(scope)
#
self.condition = None
self.if_block = VBPass()
self.elif_blocks = []
self.else_block = None
#
self.auto_class_handlers = {
"condition": (VBExpression, "condition"),
"if_block": (VBCodeBlock, "if_block"),
"else_if_statement": (VBElseIf, self.elif_blocks),
"else_block": (VBCodeBlock, "else_block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
ret = self.getIndent(indent) + \
"if %s:\n" % self.condition.renderAsCode()
ret += self.if_block.renderAsCode(indent + 1)
if self.elif_blocks:
for elif_block in self.elif_blocks:
ret += elif_block.renderAsCode(indent)
if self.else_block:
ret += self.getIndent(indent) + "else:\n"
ret += self.else_block.renderAsCode(indent + 1)
return ret
class VBElseIf(VBIf):
"""Represents an ElseIf statement"""
def __init__(self, scope="Private"):
"""Initialize the If"""
super(VBIf, self).__init__(scope)
#
self.condition = None
self.elif_block = VBPass()
#
self.auto_class_handlers = {
"condition": (VBExpression, "condition"),
"else_if_block": (VBCodeBlock, "elif_block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
ret = self.getIndent(indent) + \
"elif %s:\n" % self.condition.renderAsCode()
ret += self.elif_block.renderAsCode(indent + 1)
return ret
class VBInlineIf(VBCodeBlock):
"""Represents an if block"""
auto_handlers = [
]
skip_handlers = [
"if_statement",
]
def __init__(self, scope="Private"):
"""Initialize the If"""
super(VBInlineIf, self).__init__(scope)
#
self.condition = None
self.statements = []
#
self.auto_class_handlers = {
"condition": (VBExpression, "condition"),
"statement": (VBCodeBlock, self.statements),
"inline_implicit_call": (VBCodeBlock, self.statements), # TODO: remove me
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
assert self.statements, "Inline If has no statements!"
ret = "%sif %s:\n%s" % (
self.getIndent(indent),
self.condition.renderAsCode(),
self.statements[0].renderAsCode(indent + 1),)
#
if len(self.statements) == 2:
ret += "%selse:\n%s" % (
self.getIndent(indent),
self.statements[1].renderAsCode(indent + 1))
elif len(self.statements) > 2:
raise VBParserError(
"Inline if with more than one clause not supported")
#
return ret
class VBSelect(VBCodeBlock):
"""Represents a select block"""
auto_handlers = [
]
_select_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBSelect, self).__init__(scope)
#
self.blocks = []
self.comment_block = VBNothing()
#
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"case_item_block": (VBCaseItem, self.blocks),
"case_else_block": (VBCaseElse, self.blocks),
"case_comment_block": (VBOptionalCodeBlock, "comment_block"),
}
#
# Change the variable index if we are a select
if self.__class__ == VBSelect:
self.select_variable_index = VBSelect._select_variable_index
VBSelect._select_variable_index = VBSelect._select_variable_index + 1
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Change if/elif status on the first child
if self.blocks:
self.blocks[0].if_or_elif = "if"
#
if Config["Select", "EvaluateVariable"] != "EachTime":
ret = "%s%s = %s\n" % (self.getIndent(indent),
self.getSelectVariable(),
self.expression.renderAsCode())
else:
ret = ""
ret += self.comment_block.renderAsCode()
ret += "".join([item.renderAsCode(indent) for item in self.blocks])
return ret
def getSelectVariable(self):
"""Return the name of the select variable"""
eval_variable = Config["Select", "EvaluateVariable"]
if eval_variable == "Once":
if Config["Select", "UseNumericIndex"] == "Yes":
select_var = "%s%d" % (Config["Select", "SelectVariablePrefix"],
self.getParentProperty("select_variable_index"))
else:
select_var = Config["Select", "SelectVariablePrefix"]
elif eval_variable == "EachTime":
select_var = "%s" % self.getParentProperty(
"expression").renderAsCode()
else:
raise InvalidOption(
"Evaluate variable option not understood: '%s'" % eval_variable)
return select_var
class VBCaseBlock(VBSelect):
"""Represents a select block"""
if_or_elif = "elif" # Our parent will change this if we are the first
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBCaseBlock, self).__init__(scope)
#
self.lists = []
self.expressions = []
self.block = VBPass()
#
self.auto_class_handlers = {
"case_list": (VBCaseItem, self.lists),
"expression": (VBExpression, self.expressions),
"block": (VBCodeBlock, "block"),
}
class VBCaseItem(VBCaseBlock):
"""Represents a select block"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
select_variable_index = self.getParentProperty("select_variable_index")
if self.lists:
expr = " or ".join(["(%s)" % item.renderAsCode()
for item in self.lists])
return "%s%s %s:\n%s" % (
self.getIndent(indent),
self.if_or_elif,
expr,
self.block.renderAsCode(indent + 1))
elif len(self.expressions) == 1:
expression_text = self.expressions[0].renderAsCode()
# Now check for "Is"
if expression_text.startswith("Is "):
# This has "Is" - replace it and use the rest of the expression
return "%s %s" % (
self.getSelectVariable(),
expression_text[3:])
else:
# Standard case
return "%s == %s" % (
self.getSelectVariable(),
expression_text)
elif len(self.expressions) == 2:
return "%s <= %s <= %s" % (
self.expressions[0].renderAsCode(),
self.getSelectVariable(),
self.expressions[1].renderAsCode())
raise VBParserError("Error rendering case item")
class VBCaseElse(VBCaseBlock):
"""Represents a select block"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%selse:\n%s" % (self.getIndent(indent),
self.block.renderAsCode(indent + 1))
class VBFor(VBCodeBlock):
"""Represents a for statement"""
_for_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBFor, self).__init__(scope)
#
self.block = VBPass()
self.expressions = []
#
self.auto_class_handlers = {
"expression": (VBExpression, self.expressions),
"block": (VBCodeBlock, "block"), # Used for full 'for'
"body": (VBCodeBlock, "block"), # Used for inline 'for'
"object": (VBObject, "object"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
range_statement = ", ".join(
[item.renderAsCode() for item in self.expressions])
# Watch out for the weird dotted name in the for
self.handleDottedName(indent)
return "%sfor %s in vbForRange(%s):\n%s%s" % (
self.getIndent(indent),
self.loopname,
range_statement,
self.copiedname,
self.block.renderAsCode(indent + 1))
def handleDottedName(self, indent):
"""Handle a dotted name as the identifier
The For can reference a dotted name, which presumably changes the
value of that attribute. We can only do this by a local re-assignment
"""
name = self.object.renderAsCode()
if "." not in name:
# Ok, normal case
self.loopname = name
self.copiedname = ""
else:
# Ooops, assigning to a dotted name in the loop
self.loopname = "_idx%s" % VBFor._for_variable_index
VBFor._for_variable_index += 1
self.copiedname = "%s%s = %s\n" % (
self.getIndent(indent + 1),
name,
self.loopname
)
class VBForEach(VBFor):
"""Represents a for each statement"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
# Watch out for the weird dotted name in the for
self.handleDottedName(indent)
return "%sfor %s in %s:\n%s%s" % (
self.getIndent(indent),
self.loopname,
self.expressions[0].renderAsCode(),
self.copiedname,
self.block.renderAsCode(indent + 1))
class VBWhile(VBCodeBlock):
"""Represents a while statement"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBWhile, self).__init__(scope)
#
self.block = VBPass()
self.expression = None
#
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"block": (VBCodeBlock, "block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%swhile %s:\n%s" % (
self.getIndent(indent),
self.expression.renderAsCode(),
self.block.renderAsCode(indent + 1))
class VBDo(VBCodeBlock):
"""Represents a do statement"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBDo, self).__init__(scope)
#
self.block = VBPass()
self.pre_while = None
self.pre_until = None
self.post_while = None
self.post_until = None
#
self.auto_class_handlers = {
"while_clause": (VBExpression, "pre_while"),
"until_clause": (VBExpression, "pre_until"),
"post_while_clause": (VBExpression, "post_while"),
"post_until_clause": (VBExpression, "post_until"),
"block": (VBCodeBlock, "block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code
There are five different kinds of do loop
pre_while
pre_until
post_while
post_until
no conditions
"""
if self.pre_while:
return "%swhile %s:\n%s" % (
self.getIndent(indent),
self.pre_while.renderAsCode(),
self.block.renderAsCode(indent + 1))
elif self.pre_until:
return "%swhile not (%s):\n%s" % (
self.getIndent(indent),
self.pre_until.renderAsCode(),
self.block.renderAsCode(indent + 1))
elif self.post_while:
return "%swhile 1:\n%s%sif not (%s):\n%sbreak\n" % (
self.getIndent(indent),
self.block.renderAsCode(indent + 1),
self.getIndent(indent + 1),
self.post_while.renderAsCode(),
self.getIndent(indent + 2))
elif self.post_until:
return "%swhile 1:\n%s%sif %s:\n%sbreak\n" % (
self.getIndent(indent),
self.block.renderAsCode(indent + 1),
self.getIndent(indent + 1),
self.post_until.renderAsCode(),
self.getIndent(indent + 2))
else:
return "%swhile 1:\n%s" % (
self.getIndent(indent),
self.block.renderAsCode(indent + 1))
class VBWith(VBCodeBlock):
"""Represents a with statement"""
_with_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBWith, self).__init__(scope)
#
self.block = None
self.expression = None
#
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"block": (VBCodeBlock, "block"),
}
#
self.with_variable_index = VBWith._with_variable_index
VBWith._with_variable_index = VBWith._with_variable_index + 1
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Don't even do anything if there is no body to the With
if self.block:
#
# Before we render the expression we change its parent to our parent because
# we don't want any ".implicit" objects to be evaluated using our
# With object
self.expression.parent = self.parent
#
if self._evaluateVariableOption() == "EveryTime":
self.with_object = self.expression.renderAsCode()
return self.block.renderAsCode(indent)
else:
if self.checkOptionYesNo("With", "UseNumericIndex") == "Yes":
varname = "%s%d" % (
Config["With", "WithVariablePrefix"],
self.with_variable_index)
else:
varname = Config["With", "WithVariablePrefix"]
self.with_object = varname
return "%s%s = %s\n%s" % (
self.getIndent(indent),
varname,
self.expression.renderAsCode(),
self.block.renderAsCode(indent))
else:
return ""
def _evaluateVariableOption(self):
return self.checkOptionChoice(
"With", "EvaluateVariable", ("EveryTime", "Once"))
class VBProperty(VBSubroutine):
"""Represents a property definition"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBProperty, self).__init__(scope)
self.property_decorator_type = None
#
self.auto_handlers.append("property_decorator_type")
def renderPropertyGroup(self, indent, name, Let=None, Set=None, Get=None):
"""Render a group of property statements"""
if Let and Set:
raise UnhandledStructureError(
"Cannot handle both Let and Set properties for an object")
log.info("Rendering property group '%s'" % name)
ret = []
params = []
pset = Let or Set
pget = Get
#
# Get the name for this property - respecting the hidden status
obj = pset or pget # Need at least one!
proper_name = self.getParentProperty("enforcePrivateName")(obj)
if pset:
self.getParentProperty("local_names").append(
pset.identifier) # Store property name for namespace analysis
pset.identifier = "%s%s" % (
Config["Properties", "LetSetVariablePrefix"], pset.identifier)
ret.append(pset.renderAsCode(indent))
params.append("fset=%s" %
self.getParentProperty("enforcePrivateName")(pset))
if pget:
self.getParentProperty("local_names").append(
pget.identifier) # Store property name for namespace analysis
pget.__class__ = VBFunction # Needs to be a function
pget.name_substitution[pget.identifier] = Config[
"Functions", "ReturnVariableName"]
pget.identifier = "%s%s" % (
Config["Properties", "GetVariablePrefix"], pget.identifier)
ret.append(pget.renderAsCode(indent))
params.append("fget=%s" %
self.getParentProperty("enforcePrivateName")(pget))
return "\n%s%s%s = property(%s)\n" % (
"".join(ret),
self.getIndent(indent),
proper_name,
", ".join(params))
class VBEnum(VBCodeBlock):
"""Represents an enum definition"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBEnum, self).__init__(scope)
self.enumerations = []
self.identifier = None
#
self.auto_class_handlers = {
"enumeration_item": (VBEnumItem, self.enumerations),
}
self.auto_handlers = ["identifier"]
def renderAsCode(self, indent=0):
"""Render a group of property statements"""
count = 0
ret = []
for enumeration in self.enumerations:
if enumeration.expression:
cnt = enumeration.expression.renderAsCode()
else:
cnt = count
count += 1
ret.append("%s%s = %s" % (self.getIndent(indent),
enumeration.identifier.element.text,
cnt))
return "%s# Enumeration '%s'\n%s\n" % (
self.getIndent(indent),
self.identifier,
"\n".join(ret),
)
class VBEnumItem(VBCodeBlock):
"""Represents an enum item"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBEnumItem, self).__init__(scope)
self.identifier = None
self.expression = None
#
self.auto_class_handlers = {
"identifier": (VBConsumer, "identifier"),
"expression": (VBExpression, "expression"),
}
class VB2PYDirective(VBCodeBlock):
"""Handles a vb2py directive"""
skip_handlers = [
"vb2py_directive",
]
would_end_docstring = 0
def __init__(self, scope="Private"):
"""Initialize the module"""
super(VB2PYDirective, self).__init__(scope)
self.auto_handlers = (
"directive_type",
"config_name",
"config_section",
"expression",
)
self.directive_type = "Set"
self.config_name = None
self.config_section = None
self.expression = None
def renderAsCode(self, indent=0):
"""We use the rendering to do our stuff"""
if self.directive_type == "Set":
Config.setLocalOveride(
self.config_section, self.config_name, self.expression)
log.info("Doing a set: %s" %
str((self.config_section, self.config_name, self.expression)))
elif self.directive_type == "Unset":
Config.removeLocalOveride(self.config_section, self.config_name)
log.info("Doing an uset: %s" %
str((self.config_section, self.config_name)))
elif self.directive_type in ("GlobalSet", "GlobalAdd"):
pass # already handled this
elif self.directive_type == "Add":
Config.addLocalOveride(
self.config_section, self.config_name, self.expression)
log.info("Adding a setting: %s" %
str((self.config_section, self.config_name, self.expression)))
else:
raise DirectiveError(
"Directive not understood: '%s'" % self.directive_type)
return ""
def assignParent(self, *args, **kw):
"""Assign our parent
We can use this opportunity to now determine if we are a global
"""
super(VB2PYDirective, self).assignParent(*args, **kw)
#
# Check if we are a global level option - if se we set it now
if self.directive_type == "GlobalSet":
Config.setLocalOveride(
self.config_section, self.config_name, self.expression)
elif self.directive_type == "GlobalAdd":
Config.addLocalOveride(
self.config_section, self.config_name, self.expression)
class VBPass(VBCodeBlock):
"""Represents an empty statement"""
def renderAsCode(self, indent=0):
"""Render it!"""
return "%spass\n" % (self.getIndent(indent),)
class VBRenderDirect(VBCodeBlock):
"""Represents a pre-rendered statement"""
def __init__(self, text, indent=0, crlf=0):
"""Initialize"""
super(VBRenderDirect, self).__init__()
self.identifier = text
self.indent = indent
self.crlf = crlf
def renderAsCode(self, indent=0):
"""Render it!"""
s = ""
if self.indent:
s += self.getIndent(indent)
s += self.identifier
if self.crlf:
s += "\n"
return s
def asString(self):
"""Return string representation"""
return self.identifier
class VBNothing(VBCodeBlock):
"""Represents a block which renders to nothing at all"""
def renderAsCode(self, indent=0):
"""Render it!"""
return ""
class VBParserFailure(VBConsumer):
"""Represents a block which failed to parse"""
def renderAsCode(self, indent=0):
"""Render it!"""
fail_option = Config["General", "InsertIntoFailedCode"].lower()
warn = self.getWarning("ParserError", self.element.text, indent, crlf=1)
warn += self.getWarning(
"ParserStop", "Conversion of VB code halted", indent, crlf=1)
indentation = self.getIndent(indent)
message = 'VB2PY Code conversion failed at this point'
if fail_option == "exception":
warn += "%sraise NotImplemented('%s')" % (indentation, message)
elif fail_option == "warning":
warn += "%simport warnings;warnings.warn('%s')" % (indentation, message)
#
return warn
# FIXME: Circular import!
from vb2py.vbparser import *
# Blocks which do not contain valid statements
# If a block contains only these then it needs a pass
# statement to be a valid Python suite
NonCodeBlocks = (VBComment, VBUnrendered, VB2PYDirective)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable
import hydra
import numpy as np
import omegaconf
import torch
import mbrl.algorithms.mbpo as mbpo
import mbrl.algorithms.pets as pets
import mbrl.algorithms.planet as planet
import mbrl.algorithms.dreamer as dreamer #added April 2022 for project
import mbrl.util.env
import pandas as pd
from collections import Iterable
import wandb
def flatten_config(cfg, curr_nested_key):
"""The nested config file provided by Hydra cannot be parsed by wandb. This recursive function flattens the config file, separating the nested keys and their parents via an underscore. Allows for easier configuration using wandb.
Args:
cfg (Hydra config): The nested config file used by Hydra.
curr_nested_key (str): The current parent key (used for recursive calls).
Returns:
(dict): A flatt configuration dictionary.
"""
flat_cfg = {}
for curr_key in cfg.keys():
# deal with missing values
try:
curr_item = cfg[curr_key]
except Exception as e:
curr_item = 'NA'
# deal with lists
if type(curr_item) == list or type(curr_item) == omegaconf.listconfig.ListConfig:
for nested_idx, nested_item in enumerate(curr_item):
list_nested_key = f"{curr_nested_key}_{curr_key}_{nested_idx}"
flat_cfg[list_nested_key] = nested_item
# check if item is also a config
# recurse
elif isinstance(curr_item, Iterable) and type(curr_item) != str:
flat_cfg.update(flatten_config(curr_item, f"{curr_nested_key}_{curr_key}"))
# otherwise just add to return dict
else:
flat_cfg[f"{curr_nested_key}_{curr_key}"] = curr_item
return flat_cfg
@hydra.main(config_path="conf", config_name="main")
def run(cfg: omegaconf.DictConfig):
env, term_fn, reward_fn = mbrl.util.env.EnvHandler.make_env(cfg)
for config_item in cfg:
wandb.config[config_item] = cfg[config_item]
flat_cfg = flatten_config(cfg, "")
for config_item in flat_cfg:
wandb.config[config_item] = flat_cfg[config_item]
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if cfg.algorithm.name == "pets":
return pets.train(env, term_fn, reward_fn, cfg)
if cfg.algorithm.name == "mbpo":
test_env, *_ = mbrl.util.env.EnvHandler.make_env(cfg)
return mbpo.train(env, test_env, term_fn, cfg)
if cfg.algorithm.name == "planet":
return planet.train(env, cfg)
if cfg.algorithm.name == "dreamer": #added for project
return dreamer.train(env, cfg)
if __name__ == "__main__":
wandb.init(project="MBRL_Duckyt", entity="mbrl_ducky", monitor_gym=True)
run()
|
__copyright__ = \
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.
Last Modified: 10/02/2019
"""
__license__ = "CC BY-NC-SA 4.0"
__authors__ = "Javier Ribera, David Guera, Yuhao Chen, Edward J. Delp"
__version__ = "1.6.0"
import h5py
import torch
import shutil
def save_net(fname, net):
with h5py.File(fname, 'w') as h5f:
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
with h5py.File(fname, 'r') as h5f:
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def save_checkpoint(state, is_best,task_id, filename='checkpoint.pth.tar'):
torch.save(state, task_id+filename)
if is_best:
shutil.copyfile(task_id+filename, task_id+'model_best.pth.tar')
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.
Last Modified: 10/02/2019
"""
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from copy import deepcopy
from functools import partial
import pytest
import torch
from nncf.common.utils.logger import logger as nncf_logger
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SEHBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import EDBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_kernel import EKBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import EWBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim
from nncf.torch.model_creation import create_nncf_network
from tests.torch.helpers import BasicConvTestModel
from tests.torch.helpers import get_empty_config
from tests.torch.nas.creators import build_elastic_model_from_handler
from tests.torch.nas.descriptors import ElasticityDesc
from tests.torch.nas.helpers import do_conv2d
from tests.torch.nas.helpers import move_model_to_cuda_if_available
from tests.torch.nas.test_elastic_depth import BASIC_ELASTIC_DEPTH_PARAMS
from tests.torch.nas.test_elastic_depth import BasicTestSuperNet
from tests.torch.nas.test_elastic_depth import DepthBasicConvTestModel
from tests.torch.nas.test_elastic_kernel import BASIC_ELASTIC_KERNEL_PARAMS
from tests.torch.nas.test_elastic_width import BASIC_ELASTIC_WIDTH_PARAMS
from tests.torch.nas.test_elastic_width import TwoConvAddConvTestModel
from tests.torch.nas.test_elastic_width import TwoSequentialConvBNTestModel
@pytest.yield_fixture()
def _nncf_caplog(caplog):
nncf_logger.propagate = True
yield caplog
nncf_logger.propagate = False
def ref_width_output_fn(model, x):
return model.get_minimal_subnet_output_without_reorg(x)
COMMON_WIDTH_STATE_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoConvAddConvTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoConvAddConvTestModel/NNCFConv2d[conv1]/conv2d_0',
'TwoConvAddConvTestModel/NNCFConv2d[conv2]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoSequentialConvBNTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[0]/conv2d_0'],
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[3]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
]
def ref_kernel_output_fn(model, x):
conv = model.conv
ref_padding = 1
ref_weights = conv.weight[:, :, 1:4, 1:4]
return do_conv2d(conv, x, weight=ref_weights, padding=ref_padding)
COMMON_KERNEL_DESC = ElasticityDesc(
ElasticityDim.KERNEL,
model_cls=partial(BasicConvTestModel, 1, out_channels=1, kernel_size=5),
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_output_fn=ref_kernel_output_fn,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: ['BasicConvTestModel/NNCFConv2d[conv]/conv2d_0']
},
input_size=[1, 1, 5, 5]
)
COMMON_DEPTH_SUPERNET_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=BasicTestSuperNet,
params={
'mode': 'auto',
'min_block_size': 2
},
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 2,
'skipped_blocks': None
},
EDBuilderStateNames.SKIPPED_BLOCKS: [
{
'start_node_name': 'BasicTestSuperNet/NNCFConv2d[conv1]/conv2d_0',
'end_node_name': 'BasicTestSuperNet/__add___0'
}
],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: {0: [0]},
EDBuilderStateNames.OrdinalIds: [[1, 3]],
},
ref_search_space=[[0], []]
)
def ref_depth_output_fn(model, x):
model.set_skipped_layers(['conv1'])
return model(x)
COMMON_DEPTH_BASIC_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=DepthBasicConvTestModel,
params=BASIC_ELASTIC_DEPTH_PARAMS,
ref_output_fn=ref_depth_output_fn,
ref_search_space=[[0], []],
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 6,
'skipped_blocks': [['DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv0]/conv2d_0',
'DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv1]/conv2d_0']]
},
EDBuilderStateNames.SKIPPED_BLOCKS: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_state'],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_dependencies'],
EDBuilderStateNames.OrdinalIds: None,
}
)
LIST_STATE_AFTER_BUILD_DESCS = [
*COMMON_WIDTH_STATE_DESCS,
COMMON_DEPTH_SUPERNET_DESC,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_AFTER_BUILD_DESCS, ids=map(str, LIST_STATE_AFTER_BUILD_DESCS))
def test_can_get_builder_state_after_build(desc):
_, builder = desc.build_handler()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
ELASTIC_WIDTH_PARAMS_BB = {'filter_importance': 'L2', **BASIC_ELASTIC_WIDTH_PARAMS}
LIST_STATE_BEFORE_BUILD_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
params=ELASTIC_WIDTH_PARAMS_BB,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: ELASTIC_WIDTH_PARAMS_BB,
EWBuilderStateNames.GROUPED_NODE_NAMES_TO_PRUNE: []
}
),
ElasticityDesc(
ElasticityDim.KERNEL,
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: []
}
),
COMMON_DEPTH_BASIC_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_BEFORE_BUILD_DESCS, ids=map(str, LIST_STATE_BEFORE_BUILD_DESCS))
class TestBeforeBuild:
def test_can_get_builder_state_before_build(self, desc: ElasticityDesc):
builder = desc.create_builder()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
def test_output_warning_when_state_overrides_params(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder_with_config({})
old_state = old_builder.get_state()
new_params = desc.params
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
record = next(iter(_nncf_caplog.records))
assert record.levelno == logging.WARNING
def test_no_warning_when_state_and_params_are_the_same(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder()
old_state = old_builder.get_state()
new_params = desc.params.copy()
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
assert not _nncf_caplog.records
LIST_LOAD_STATE_DESCS = [
COMMON_DEPTH_BASIC_DESC,
*COMMON_WIDTH_STATE_DESCS,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_LOAD_STATE_DESCS, ids=map(str, LIST_LOAD_STATE_DESCS))
def test_can_load_handler_state(desc: ElasticityDesc):
model = desc.model_cls()
move_model_to_cuda_if_available(model)
model_copy = deepcopy(model)
device = next(iter(model.parameters())).device
dummy_input = torch.ones(model.INPUT_SIZE).to(device)
input_size = desc.input_size
if not input_size:
input_size = model.INPUT_SIZE
config = get_empty_config(input_sample_sizes=input_size)
old_nncf_network = create_nncf_network(model, config)
old_builder = desc.create_builder()
old_handler = old_builder.build(old_nncf_network)
elastic_model = build_elastic_model_from_handler(old_nncf_network, old_handler)
old_handler.activate_minimum_subnet()
old_output = elastic_model(dummy_input)
ref_output = desc.ref_output_fn(model, dummy_input)
assert torch.allclose(old_output, ref_output)
new_nncf_network = create_nncf_network(model_copy, config)
builder_state = old_builder.get_state()
# no need in config to restore builder state
new_builder = desc.create_builder_with_config({})
new_builder.load_state(builder_state)
new_handler = new_builder.build(new_nncf_network)
elastic_model = build_elastic_model_from_handler(new_nncf_network, new_handler)
new_handler.activate_minimum_subnet()
new_output = elastic_model(dummy_input)
assert torch.allclose(old_output, new_output)
|
{
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'animator',
'type': 'static_library',
'include_dirs': [
'../include/config',
'../include/core',
'../include/effects',
'../include/animator',
'../include/views',
'../include/xml',
'../include/utils',
'../include/images',
'../src/utils',
],
'sources': [
'../include/animator/SkAnimator.h',
'../include/animator/SkAnimatorView.h',
'../src/animator/SkAnimate.h',
'../src/animator/SkAnimateActive.cpp',
'../src/animator/SkAnimateActive.h',
'../src/animator/SkAnimateBase.cpp',
'../src/animator/SkAnimateBase.h',
'../src/animator/SkAnimateField.cpp',
'../src/animator/SkAnimateMaker.cpp',
'../src/animator/SkAnimateMaker.h',
'../src/animator/SkAnimateProperties.h',
'../src/animator/SkAnimateSet.cpp',
'../src/animator/SkAnimateSet.h',
'../src/animator/SkAnimator.cpp',
'../src/animator/SkAnimatorScript.cpp',
'../src/animator/SkAnimatorScript.h',
#'../src/animator/SkAnimatorScript2.cpp', fails on windows
#'../src/animator/SkAnimatorScript2.h',
'../src/animator/SkBoundable.cpp',
'../src/animator/SkBoundable.h',
'../src/animator/SkBuildCondensedInfo.cpp',
#'../src/animator/SkCondensedDebug.cpp', fails on windows
#'../src/animator/SkCondensedRelease.cpp',
'../src/animator/SkDisplayable.cpp',
'../src/animator/SkDisplayable.h',
'../src/animator/SkDisplayAdd.cpp',
'../src/animator/SkDisplayAdd.h',
'../src/animator/SkDisplayApply.cpp',
'../src/animator/SkDisplayApply.h',
'../src/animator/SkDisplayBounds.cpp',
'../src/animator/SkDisplayBounds.h',
'../src/animator/SkDisplayEvent.cpp',
'../src/animator/SkDisplayEvent.h',
'../src/animator/SkDisplayEvents.cpp',
'../src/animator/SkDisplayEvents.h',
'../src/animator/SkDisplayInclude.cpp',
'../src/animator/SkDisplayInclude.h',
'../src/animator/SkDisplayInput.cpp',
'../src/animator/SkDisplayInput.h',
'../src/animator/SkDisplayList.cpp',
'../src/animator/SkDisplayList.h',
'../src/animator/SkDisplayMath.cpp',
'../src/animator/SkDisplayMath.h',
'../src/animator/SkDisplayMovie.cpp',
'../src/animator/SkDisplayMovie.h',
'../src/animator/SkDisplayNumber.cpp',
'../src/animator/SkDisplayNumber.h',
'../src/animator/SkDisplayPost.cpp',
'../src/animator/SkDisplayPost.h',
'../src/animator/SkDisplayRandom.cpp',
'../src/animator/SkDisplayRandom.h',
'../src/animator/SkDisplayScreenplay.cpp',
'../src/animator/SkDisplayScreenplay.h',
'../src/animator/SkDisplayType.cpp',
'../src/animator/SkDisplayType.h',
'../src/animator/SkDisplayTypes.cpp',
'../src/animator/SkDisplayTypes.h',
'../src/animator/SkDisplayXMLParser.cpp',
'../src/animator/SkDisplayXMLParser.h',
'../src/animator/SkDraw3D.cpp',
'../src/animator/SkDraw3D.h',
'../src/animator/SkDrawable.cpp',
'../src/animator/SkDrawable.h',
'../src/animator/SkDrawBitmap.cpp',
'../src/animator/SkDrawBitmap.h',
'../src/animator/SkDrawBlur.cpp',
'../src/animator/SkDrawBlur.h',
'../src/animator/SkDrawClip.cpp',
'../src/animator/SkDrawClip.h',
'../src/animator/SkDrawColor.cpp',
'../src/animator/SkDrawColor.h',
'../src/animator/SkDrawDash.cpp',
'../src/animator/SkDrawDash.h',
'../src/animator/SkDrawDiscrete.cpp',
'../src/animator/SkDrawDiscrete.h',
'../src/animator/SkDrawEmboss.cpp',
'../src/animator/SkDrawEmboss.h',
'../src/animator/SkDrawExtraPathEffect.cpp',
'../src/animator/SkDrawFull.cpp',
'../src/animator/SkDrawFull.h',
'../src/animator/SkDrawGradient.cpp',
'../src/animator/SkDrawGradient.h',
'../src/animator/SkDrawGroup.cpp',
'../src/animator/SkDrawGroup.h',
'../src/animator/SkDrawLine.cpp',
'../src/animator/SkDrawLine.h',
'../src/animator/SkDrawMatrix.cpp',
'../src/animator/SkDrawMatrix.h',
'../src/animator/SkDrawOval.cpp',
'../src/animator/SkDrawOval.h',
'../src/animator/SkDrawPaint.cpp',
'../src/animator/SkDrawPaint.h',
'../src/animator/SkDrawPath.cpp',
'../src/animator/SkDrawPath.h',
'../src/animator/SkDrawPoint.cpp',
'../src/animator/SkDrawPoint.h',
'../src/animator/SkDrawRectangle.cpp',
'../src/animator/SkDrawRectangle.h',
'../src/animator/SkDrawSaveLayer.cpp',
'../src/animator/SkDrawSaveLayer.h',
'../src/animator/SkDrawShader.cpp',
'../src/animator/SkDrawShader.h',
'../src/animator/SkDrawText.cpp',
'../src/animator/SkDrawText.h',
'../src/animator/SkDrawTextBox.cpp',
'../src/animator/SkDrawTextBox.h',
'../src/animator/SkDrawTo.cpp',
'../src/animator/SkDrawTo.h',
'../src/animator/SkDrawTransparentShader.cpp',
'../src/animator/SkDrawTransparentShader.h',
'../src/animator/SkDump.cpp',
'../src/animator/SkDump.h',
'../src/animator/SkExtras.h',
'../src/animator/SkGetCondensedInfo.cpp',
'../src/animator/SkHitClear.cpp',
'../src/animator/SkHitClear.h',
'../src/animator/SkHitTest.cpp',
'../src/animator/SkHitTest.h',
'../src/animator/SkIntArray.h',
'../src/animator/SkMatrixParts.cpp',
'../src/animator/SkMatrixParts.h',
'../src/animator/SkMemberInfo.cpp',
'../src/animator/SkMemberInfo.h',
'../src/animator/SkOpArray.cpp',
'../src/animator/SkOpArray.h',
'../src/animator/SkOperand.h',
'../src/animator/SkOperand2.h',
'../src/animator/SkOperandInterpolator.h',
'../src/animator/SkOperandIterpolator.cpp',
'../src/animator/SkPaintParts.cpp',
'../src/animator/SkPaintParts.h',
'../src/animator/SkParseSVGPath.cpp',
'../src/animator/SkPathParts.cpp',
'../src/animator/SkPathParts.h',
'../src/animator/SkPostParts.cpp',
'../src/animator/SkPostParts.h',
'../src/animator/SkScript.cpp',
'../src/animator/SkScript.h',
'../src/animator/SkScript2.h',
'../src/animator/SkScriptCallBack.h',
'../src/animator/SkScriptDecompile.cpp',
'../src/animator/SkScriptRuntime.cpp',
'../src/animator/SkScriptRuntime.h',
'../src/animator/SkScriptTokenizer.cpp',
'../src/animator/SkSnapshot.cpp',
'../src/animator/SkSnapshot.h',
'../src/animator/SkTDArray_Experimental.h',
'../src/animator/SkTextOnPath.cpp',
'../src/animator/SkTextOnPath.h',
'../src/animator/SkTextToPath.cpp',
'../src/animator/SkTextToPath.h',
'../src/animator/SkTime.cpp',
'../src/animator/SkTypedArray.cpp',
'../src/animator/SkTypedArray.h',
'../src/animator/SkXMLAnimatorWriter.cpp',
'../src/animator/SkXMLAnimatorWriter.h',
],
'direct_dependent_settings': {
'include_dirs': [
'../include/animator',
],
},
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import collections
import enum
import functools
import itertools
import operator
import string
from typing import (Any, Callable, List, NamedTuple, Optional, Sequence, Union,
Tuple, Type)
import warnings
import numpy as onp
from ..util import partial, prod
from .. import core
from .. import ad_util
from .. import api
from .. import linear_util as lu
from .. import dtypes
from .. import lazy
from .. import lib
from ..config import flags
from ..core import Primitive
from ..abstract_arrays import (UnshapedArray, ShapedArray, ConcreteArray,
AbstractToken, array_types, make_shaped_array,
raise_to_shaped, abstract_token, canonicalize_shape)
from ..interpreters import partial_eval as pe
from ..interpreters import xla
from ..interpreters import pxla
from ..interpreters import ad
from ..interpreters import batching
from ..interpreters import masking
from ..util import curry, cache, safe_zip, unzip2, prod
from ..tree_util import build_tree, tree_unflatten, tree_map
from ..lib import pytree
from ..lib import xla_bridge
from ..lib import xla_client
xb = xla_bridge
xc = xla_client
xops = xla_client.ops
FLAGS = flags.FLAGS
_max = builtins.max
_min = builtins.max
_reduce = functools.reduce
Array = Any
DType = Any
Shape = Sequence[int]
@cache()
def broadcast_shapes(*shapes):
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes])
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(map(tuple, shapes))))
return canonicalize_shape(result_shape)
def _identity(x): return x
### traceables
def neg(x: Array) -> Array:
r"""Elementwise negation: :math:`-x`."""
return neg_p.bind(x)
def sign(x: Array) -> Array:
r"""Elementwise sign.
For floating-point inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`
For signed integer inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
0 & x = 0\\
1 & x > 0
\end{cases}`
For complex inputs, returns the complex phase, i.e.
:math:`\mathrm{sign}(x) = \frac{x}{|x|}`.
"""
return sign_p.bind(x)
def nextafter(x1: Array, x2: Array) -> Array:
r"""Returns the next representable value after `x1` in the direction of `x2`."""
return nextafter_p.bind(_brcast(x1, x2), _brcast(x2, x1))
def floor(x: Array) -> Array:
r"""Elementwise floor: :math:`\left\lfloor x \right\rfloor`."""
return floor_p.bind(x)
def ceil(x: Array) -> Array:
r"""Elementwise ceiling: :math:`\left\lceil x \right\rceil`."""
return ceil_p.bind(x)
def round(x: Array) -> Array:
r"""Elementwise round.
Rounds values to the nearest integer. Halfway values (e.g., `0.5`) are rounded
away from zero."""
return round_p.bind(x)
def is_finite(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{isfinite}`.
For each element x returns `True` if and only if x is not :math:`\pm\infty` or
:math:`\mathit{NaN}`.
"""
return is_finite_p.bind(x)
def exp(x: Array) -> Array:
r"""Elementwise exponential: :math:`e^x`."""
return exp_p.bind(x)
def expm1(x: Array) -> Array:
r"""Elementwise :math:`e^{x} - 1`."""
return expm1_p.bind(x)
def log(x: Array) -> Array:
r"""Elementwise natural logarithm: :math:`\mathrm{log}(x)`."""
return log_p.bind(x)
def log1p(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{log}(1 + x)`."""
return log1p_p.bind(x)
def tanh(x: Array) -> Array:
r"""Elementwise hyperbolic tangent: :math:`\mathrm{tanh}(x)`."""
return tanh_p.bind(x)
def sin(x: Array) -> Array:
r"""Elementwise sine: :math:`\mathrm{sin}(x)`."""
return sin_p.bind(x)
def cos(x: Array) -> Array:
r"""Elementwise cosine: :math:`\mathrm{cos}(x)`."""
return cos_p.bind(x)
def atan2(x: Array, y: Array) -> Array:
r"""Elementwise arc tangent of two variables:
:math:`\mathrm{atan}({x \over y})`."""
return atan2_p.bind(x, y)
def betainc(a: Array, b: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete beta integral."""
return regularized_incomplete_beta_p.bind(a, b, x)
def lgamma(x: Array) -> Array:
r"""Elementwise log gamma: :math:`\mathrm{log}(\Gamma(x))`."""
return lgamma_p.bind(x)
def digamma(x: Array) -> Array:
r"""Elementwise digamma: :math:`\psi(x)`."""
return digamma_p.bind(x)
def igamma(a: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete gamma function."""
return igamma_p.bind(a, x)
def igammac(a: Array, x: Array) -> Array:
r"""Elementwise complementary regularized incomplete gamma function."""
return igammac_p.bind(a, x)
def igamma_grad_a(a: Array, x: Array) -> Array:
r"""Elementwise derivative of the regularized incomplete gamma function."""
return igamma_grad_a_p.bind(a, x)
def bessel_i0e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 0:
:math:`\mathrm{i0e}(x) = e^{-|x|} \mathrm{i0}(x)`
"""
return bessel_i0e_p.bind(x)
def bessel_i1e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 1:
:math:`\mathrm{i1e}(x) = e^{-|x|} \mathrm{i1}(x)`
"""
return bessel_i1e_p.bind(x)
def erf(x: Array) -> Array:
r"""Elementwise error function: :math:`\mathrm{erf}(x)`."""
return erf_p.bind(x)
def erfc(x: Array) -> Array:
r"""Elementwise complementary error function:
:math:`\mathrm{erfc}(x) = 1 - \mathrm{erf}(x)`."""
return erfc_p.bind(x)
def erf_inv(x: Array) -> Array:
r"""Elementwise inverse error function: :math:`\mathrm{erf}^{-1}(x)`."""
return erf_inv_p.bind(x)
def real(x: Array) -> Array:
r"""Elementwise extract real part: :math:`\mathrm{Re}(x)`.
Returns the real part of a complex number.
"""
return real_p.bind(x)
def imag(x: Array) -> Array:
r"""Elementwise extract imaginary part: :math:`\mathrm{Im}(x)`.
Returns the imaginary part of a complex number.
"""
return imag_p.bind(x)
def complex(x: Array, y: Array) -> Array:
r"""Elementwise make complex number: :math:`x + jy`.
Builds a complex number from real and imaginary parts.
"""
return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x: Array) -> Array:
r"""Elementwise complex conjugate function: :math:`\overline{x}`."""
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: Array) -> Array:
r"""Elementwise absolute value: :math:`|x|`."""
return abs_p.bind(x)
def pow(x: Array, y: Array) -> Array:
r"""Elementwise power: :math:`x^y`."""
return pow_p.bind(x, y)
def integer_pow(x: Array, y: int) -> Array:
r"""Elementwise power: :math:`x^y`, where :math:`y` is a fixed integer."""
if y == 0:
return _ones(x)
elif y == 1:
return x
else:
return integer_pow_p.bind(x, y=y)
def sqrt(x: Array) -> Array:
r"""Elementwise square root: :math:`\sqrt{x}`."""
return sqrt_p.bind(x)
def rsqrt(x: Array) -> Array:
r"""Elementwise reciprocal square root: :math:`1 \over \sqrt{x}."""
return rsqrt_p.bind(x)
def bitwise_not(x: Array) -> Array:
r"""Elementwise NOT: :math:`\neg x`."""
return not_p.bind(x)
def bitwise_and(x: Array, y: Array) -> Array:
r"""Elementwise AND: :math:`x \wedge y`."""
return and_p.bind(x, y)
def bitwise_or(x: Array, y: Array) -> Array:
r"""Elementwise OR: :math:`x \vee y`."""
return or_p.bind(x, y)
def bitwise_xor(x: Array, y: Array) -> Array:
r"""Elementwise exclusive OR: :math:`x \oplus y`."""
return xor_p.bind(x, y)
def population_count(x: Array) -> Array:
r"""Elementwise popcount, count the number of set bits in each element."""
return population_count_p.bind(x)
def add(x: Array, y: Array) -> Array:
r"""Elementwise addition: :math:`x + y`."""
return add_p.bind(x, y)
def sub(x: Array, y: Array) -> Array:
r"""Elementwise subtraction: :math:`x - y`."""
return sub_p.bind(x, y)
def mul(x: Array, y: Array) -> Array:
r"""Elementwise multiplication: :math:`x \times y`."""
return mul_p.bind(x, y)
def div(x: Array, y: Array) -> Array:
r"""Elementwise division: :math:`x \over y`."""
return div_p.bind(x, y)
def rem(x: Array, y: Array) -> Array:
r"""Elementwise remainder: :math:`x \bmod y`."""
return rem_p.bind(x, y)
def max(x: Array, y: Array) -> Array:
r"""Elementwise maximum: :math:`\mathrm{max}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return max_p.bind(x, y)
def min(x: Array, y: Array) -> Array:
r"""Elementwise minimum: :math:`\mathrm{min}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return min_p.bind(x, y)
def shift_left(x: Array, y: Array) -> Array:
r"""Elementwise left shift: :math:`x \ll y`."""
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x: Array, y: Array) -> Array:
r"""Elementwise arithmetic right shift: :math:`x \gg y`."""
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x: Array, y: Array) -> Array:
r"""Elementwise logical right shift: :math:`x \gg y`."""
return shift_right_logical_p.bind(x, y)
def eq(x: Array, y: Array) -> Array:
r"""Elementwise equals: :math:`x = y`."""
return eq_p.bind(x, y)
def ne(x: Array, y: Array) -> Array:
r"""Elementwise not-equals: :math:`x \neq y`."""
return ne_p.bind(x, y)
def ge(x: Array, y: Array) -> Array:
r"""Elementwise greater-than-or-equals: :math:`x \geq y`."""
return ge_p.bind(x, y)
def gt(x: Array, y: Array) -> Array:
r"""Elementwise greater-than: :math:`x > y`."""
return gt_p.bind(x, y)
def le(x: Array, y: Array) -> Array:
r"""Elementwise less-than-or-equals: :math:`x \leq y`."""
return le_p.bind(x, y)
def lt(x: Array, y: Array) -> Array:
r"""Elementwise less-than: :math:`x < y`."""
return lt_p.bind(x, y)
def convert_element_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise cast.
Wraps XLA's `ConvertElementType
<https://www.tensorflow.org/xla/operation_semantics#convertelementtype>`_
operator, which performs an elementwise conversion from one type to another.
Similar to a C++ `static_cast`.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, cast elementwise to `new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
# Avoids dropping precision by casting Python scalars to the default Jax
# type. If we passed a Python scalar directly to the bind call below, it is
# cast to the default type as part of the calling convention.
if type(operand) in dtypes.python_scalar_dtypes:
operand = onp.asarray(operand, new_dtype)
old_dtype = dtypes.canonicalize_dtype(_dtype(operand))
if old_dtype == new_dtype:
return operand
if (dtypes.issubdtype(old_dtype, onp.complexfloating) and
not dtypes.issubdtype(new_dtype, onp.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, onp.ComplexWarning, stacklevel=2)
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, old_dtype=old_dtype)
def bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise bitcast.
Wraps XLA's `BitcastConvertType
<https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_
operator, which performs a bit cast from one type to another. The bitwidth
of the source and destination types must match.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, bitcast elementwise to
`new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
def clamp(min: Array, x: Array, max: Array) -> Array:
r"""Elementwise clamp.
Returns :math:`\mathrm{clamp}(x) = \begin{cases}
\mathit{min} & \text{if } x < \mathit{min},\\
\mathit{max} & \text{if } x > \mathit{max},\\
x & \text{otherwise}
\end{cases}`.
"""
return clamp_p.bind(min, x, max)
def concatenate(operands: Sequence[Array], dimension: int) -> Array:
"""Concatenates a sequence of arrays along `dimension`.
Wraps XLA's `Concatenate
<https://www.tensorflow.org/xla/operation_semantics#concatenate>`_
operator.
Args:
operands: a sequence of arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis.
dimension: the dimension along which to concatenate the arrays.
Returns:
An array containing the concatenation.
"""
return concatenate_p.bind(*operands, dimension=dimension)
Precision = xla_client.PrecisionConfig.Precision
Precision.__str__ = lambda precision: precision.name
PrecisionType = Any
class ConvDimensionNumbers(NamedTuple):
"""Describes batch, spatial, and feature dimensions of a convolution.
Args:
lhs_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
rhs_spec: a tuple of nonnegative integer dimension numbers containing
`(out feature dimension, in feature dimension, spatial dimensions...)`.
out_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
"""
lhs_spec: Sequence[int]
rhs_spec: Sequence[int]
out_spec: Sequence[int]
ConvGeneralDilatedDimensionNumbers = Union[
None, ConvDimensionNumbers, Tuple[str, str, str]]
def conv_general_dilated(
lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]] = None,
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
feature_group_count: int = 1, batch_group_count: int = 1,
precision: Optional[PrecisionType] = None) -> Array:
"""General n-dimensional convolution operator, with optional dilation.
Wraps XLA's `Conv
<https://www.tensorflow.org/xla/operation_semantics#conv_convolution>`_
operator.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: either `None`, a `ConvDimensionNumbers` object, or
a 3-tuple `(lhs_spec, rhs_spec, out_spec)`, where each element is a string
of length `n+2`.
feature_group_count: integer, default 1. See XLA HLO docs.
batch_group_count: integer, default 1. See XLA HLO docs.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
In the string case of `dimension_numbers`, each character identifies by
position:
- the batch dimensions in `lhs`, `rhs`, and the output with the character
'N',
- the feature dimensions in `lhs` and the output with the character 'C',
- the input and output feature dimensions in rhs with the characters 'I'
and 'O' respectively, and
- spatial dimension correspondences between lhs, rhs, and the output using
any distinct characters.
For example, to indicate dimension numbers consistent with the `conv` function
with two spatial dimensions, one could use `('NCHW', 'OIHW', 'NCHW')`. As
another example, to indicate dimension numbers consistent with the TensorFlow
Conv2D operation, one could use `('NHWC', 'HWIO', 'NHWC')`. When using the
latter form of convolution dimension specification, window strides are
associated with spatial dimension character labels according to the order in
which the labels appear in the `rhs_spec` string, so that `window_strides[0]`
is matched with the dimension corresponding to the first character
appearing in rhs_spec that is not `'I'` or `'O'`.
If `dimension_numbers` is `None`, the default is `('NCHW', 'OIHW', 'NCHW')`
(for a 2D convolution).
"""
dnums: ConvDimensionNumbers
dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):
raise ValueError(
"String padding is not implemented for transposed convolution "
"using this op. Please either exactly specify the required padding or "
"use conv_transpose.")
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dnums
rhs_shape = onp.take(rhs.shape, rhs_perm)[2:]
effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]
padding = padtype_to_pads(
onp.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape,
window_strides, padding)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=_canonicalize_precision(precision))
def dot(lhs: Array, rhs: Array, precision: Optional[PrecisionType] = None) -> Array:
"""Vector/vector, matrix/vector, and matrix/matrix multiplication.
Wraps XLA's `Dot
<https://www.tensorflow.org/xla/operation_semantics#dot>`_
operator.
For more general contraction, see the `dot_general` operator.
Args:
lhs: an array of rank 1 or 2.
rhs: an array of rank 1 or 2.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the product.
"""
if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]:
return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),
precision=precision)
else:
raise TypeError("Incompatible shapes for dot: got {} and {}.".format(
lhs.shape, rhs.shape))
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: Optional[PrecisionType] = None) -> Array:
"""More general contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
Args:
lhs: an array
rhs: an array
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the result.
"""
contract_dims_seq, batch_dims_seq = dimension_numbers
contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))
batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))
if not dtypes.issubdtype(lhs.dtype, onp.inexact):
# TODO(b/134526360): XLA doesn't support bool or integer dots, so we emit a
# sum of products instead.
lhs_contract_dims, rhs_contract_dims = contract_dims
lhs_batch_dims, rhs_batch_dims = batch_dims
lhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))
rhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))
lhs = transpose(lhs,
lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)
rhs = transpose(rhs,
rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)
new_lhs_shape = onp.insert(onp.array(onp.shape(lhs), dtype=onp.int64),
len(lhs_batch_dims) + len(lhs_noncontract_dims),
(1,) * len(rhs_noncontract_dims))
new_rhs_shape = onp.insert(onp.array(onp.shape(rhs), dtype=onp.int64),
len(lhs_batch_dims),
(1,) * len(lhs_noncontract_dims))
lhs = reshape(lhs, new_lhs_shape)
rhs = reshape(rhs, new_rhs_shape)
out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +
len(rhs_noncontract_dims))
op_product = bitwise_and if lhs.dtype == onp.bool_ else mul
op_sum = bitwise_or if lhs.dtype == onp.bool_ else add
return reduce(op_product(lhs, rhs), _zero(lhs), op_sum,
tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision))
def broadcast(operand: Array, sizes: Sequence[int]) -> Array:
"""Broadcasts an array, adding new major dimensions.
Wraps XLA's `Broadcast
<https://www.tensorflow.org/xla/operation_semantics#broadcast>`_
operator.
Args:
operand: an array
sizes: a sequence of integers, giving the sizes of new major dimensions
to add.
Returns:
An array containing the result.
"""
dims = tuple(range(len(sizes), len(sizes) + onp.ndim(operand)))
return broadcast_in_dim(operand, tuple(sizes) + onp.shape(operand), dims)
def broadcast_in_dim(operand: Array, shape: Shape,
broadcast_dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `BroadcastInDim
<https://www.tensorflow.org/xla/operation_semantics#broadcastindim>`_
operator.
"""
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
if onp.ndim(operand) == len(shape) and not len(broadcast_dimensions):
return operand
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def broadcast_to_rank(x: Array, rank: int) -> Array:
"""Adds leading dimensions of ``1`` to give ``x`` rank ``rank``."""
return broadcast(x, (1,) * (rank - x.ndim))
def reshape(operand: Array, new_sizes: Shape,
dimensions: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Reshape
<https://www.tensorflow.org/xla/operation_semantics#reshape>`_
operator.
"""
new_sizes = canonicalize_shape(new_sizes) # TODO
new_sizes = tuple(new_sizes)
same_shape = onp.shape(operand) == new_sizes
same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand)))
if onp.shape(operand) and same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if dimensions is None or same_dims else tuple(dimensions))
def pad(operand: Array, padding_value: Array,
padding_config: Sequence[Tuple[int, int, int]]) -> Array:
"""Wraps XLA's `Pad
<https://www.tensorflow.org/xla/operation_semantics#pad>`_
operator.
"""
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand: Array, dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Rev
<https://www.tensorflow.org/xla/operation_semantics#rev_reverse>`_
operator.
"""
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred: Array, on_true: Array, on_false: Array) -> Array:
"""Wraps XLA's `Select
<https://www.tensorflow.org/xla/operation_semantics#select>`_
operator.
"""
return select_p.bind(pred, on_true, on_false)
def slice(operand: Array, start_indices: Sequence[int],
limit_indices: Sequence[int],
strides: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Slice
<https://www.tensorflow.org/xla/operation_semantics#slice>`_
operator.
"""
if (onp.all(onp.equal(start_indices, 0))
and onp.all(onp.equal(limit_indices, operand.shape))
and strides is None):
return operand
else:
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides))
def dynamic_slice(operand: Array, start_indices: Sequence[Array],
slice_sizes: Shape) -> Array:
"""Wraps XLA's `DynamicSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicslice>`_
operator.
Args:
operand: an array to slice.
start_indices: a list of scalar indices, one per dimension.
slice_sizes: the size of the slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`.
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(operand, *start_indices,
slice_sizes=tuple(slice_sizes))
def dynamic_update_slice(operand: Array, update: Array,
start_indices: Array) -> Array:
"""Wraps XLA's `DynamicUpdateSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicupdateslice>`_
operator.
Args:
operand: an array to slice.
update: an array containing the new values to write onto `operand`.
start_indices: a list of scalar indices, one per dimension.
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, *start_indices)
class GatherDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
offset_dims: the set of dimensions in the `gather` output that offset into
an array sliced from `operand`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output.
collapsed_slice_dims: the set of dimensions `i` in `operand` that have
`slice_sizes[i] == 1` and that should not have a corresponding dimension
in the output of the gather. Must be a tuple of integers in ascending
order.
start_index_map: for each dimension in `start_indices`, gives the
corresponding dimension in `operand` that is to be sliced. Must be a
tuple of integers with size equal to `start_indices.shape[-1]`.
Unlike XLA's `GatherDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To gather scalar indices, add a trailing dimension of size 1.
"""
offset_dims: Sequence[int]
collapsed_slice_dims: Sequence[int]
start_index_map: Sequence[int]
def gather(operand: Array, start_indices: Array,
dimension_numbers: GatherDimensionNumbers,
slice_sizes: Shape) -> Array:
"""Gather operator.
Wraps `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_.
The semantics of gather are complicated, and its API might change in the
future. For most use cases, you should prefer `Numpy-style indexing
<https://docs.scipy.org/doc/numpy-1.16.0/reference/arrays.indexing.html>`_
(e.g., `x[:, (1,4,7), ...]`), rather than using `gather` directly.
Args:
operand: an array from which slices should be taken
start_indices: the indices at which slices should be taken
dimension_numbers: a `lax.GatherDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices` and the output relate.
slice_sizes: the size of each slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`.
Returns:
An array containing the gather output.
"""
return gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=canonicalize_shape(slice_sizes))
class ScatterDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
update_window_dims: the set of dimensions in the `updates` that are window
dimensions. Must be a tuple of integers in ascending
order, each representing a dimension number.
inserted_window_dims: the set of size 1 window dimensions that must be inserted
into the shape of `updates`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output. These are the
mirror image of `collapsed_slice_dims` in the case of `gather`.
scatter_dims_to_operand_dims: for each dimension in `scatter_indices`, gives
the corresponding dimension in `operand`. Must be a sequence of integers
with size equal to indices.shape[-1].
Unlike XLA's `ScatterDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To scatter scalar indices, add a trailing dimension of size 1.
"""
update_window_dims: Sequence[int]
inserted_window_dims: Sequence[int]
scatter_dims_to_operand_dims: Sequence[int]
def scatter_add(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-add operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
addition is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(add, _abstractify(_const(operand, 0)))
return scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_mul(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-multiply operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
multiplication is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(_const(operand, 1)))
return scatter_mul_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_min(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-min operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `min` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(min, _abstractify(_const(operand, 0)))
return scatter_min_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_max(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-max operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `max` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(max, _abstractify(_const(operand, 0)))
return scatter_max_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
# Define this outside of scatter to ensure cache hits.
_scatter_reduction_computation = lambda x, y: y
def scatter(operand: Array, scatter_indices:Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-update operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where updates
replace values from `operand`.
If multiple updates are performed to the same index of operand, they may be
applied in any order.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(_scatter_reduction_computation,
_abstractify(_const(operand, 0)))
return scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:
indices = concatenate([reshape(i, [i.shape[0], 1]) for i in idxs], 1)
indices = indices % onp.array([src.shape[ax] for ax in axes])
slice_sizes = list(src.shape)
for ax in axes:
slice_sizes[ax] = 1
offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=axes,
start_index_map=axes)
return gather(src, indices, dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def transpose(operand: Array, permutation: Sequence[int]) -> Array:
"""Wraps XLA's `Transpose
<https://www.tensorflow.org/xla/operation_semantics#transpose>`_
operator.
"""
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def reduce(operand: Array, init_value: Array, computation: Callable,
dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Reduce
<https://www.tensorflow.org/xla/operation_semantics#reduce>`_
operator.
"""
monoid_reducer = _get_monoid_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, dimensions)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_p.bind(operand, init_value, computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
@cache()
def _reduction_jaxpr(computation, aval):
pval = pe.PartialVal.unknown(aval)
comp = lu.wrap_init(lambda x, y: (computation(x, y),))
jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)
return jaxpr, consts
def _get_monoid_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_sum
if monoid_op is mul:
return aval.val == 1 and _reduce_prod
elif monoid_op is bitwise_or and dtype == onp.bool_:
return aval.val == _get_max_identity(dtype) and _reduce_or
elif monoid_op is bitwise_and and dtype == onp.bool_:
return aval.val == _get_min_identity(dtype) and _reduce_and
elif monoid_op is max:
return aval.val == _get_max_identity(dtype) and _reduce_max
elif monoid_op is min:
return aval.val == _get_min_identity(dtype) and _reduce_min
return None
def _get_max_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, onp.inexact):
return onp.array(-onp.inf, dtype)
elif dtypes.issubdtype(dtype, onp.integer):
return onp.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, onp.bool_):
return onp.array(False, onp.bool_)
def _get_min_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, onp.inexact):
return onp.array(onp.inf, dtype)
elif dtypes.issubdtype(dtype, onp.integer):
return onp.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, onp.bool_):
return onp.array(True, onp.bool_)
def _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:
return reduce_sum_p.bind(operand, axes=tuple(axes))
def _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand: Array, axes: Sequence[int]) -> Array:
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand: Array, axes: Sequence[int]) -> Array:
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand: Array, axes: Sequence[int]) -> Array:
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand: Array, init_value: Array, computation: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: str) -> Array:
"""Wraps XLA's `ReduceWindow
<https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_
operator.
"""
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _get_monoid_window_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
return None
def _reduce_window_sum(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_prod(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
init_value = _const(operand, 1)
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_max(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_min(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter(operand: Array, select: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: str, source: Array, init_value: Array,
scatter: Callable) -> Array:
select_jaxpr, select_consts = _reduction_jaxpr(select, _abstractify(init_value))
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter, _abstractify(init_value))
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter_add(source: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: str) -> Array:
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_gather_add(tangents: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: str) -> Array:
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def cumsum(operand: Array, axis: int) -> Array:
"""Computes a cumulative sum along `axis`."""
return cumsum_p.bind(operand, axis=int(axis))
def cumprod(operand: Array, axis: int) -> Array:
"""Computes a cumulative product along `axis`."""
return cumprod_p.bind(operand, axis=int(axis))
def sort(operand: Union[Array, Tuple[Array, ...]], dimension: int = -1
) -> Union[Array, Tuple[Array, ...]]:
"""Wraps XLA's `Sort
<https://www.tensorflow.org/xla/operation_semantics#sort>`_
operator.
"""
if isinstance(operand, tuple):
if len(operand) == 0:
raise TypeError("Sort requires at least one operand")
dimension = _canonicalize_axis(dimension, len(operand[0].shape))
return tuple(sort_p.bind(*operand, dimension=dimension))
else:
dimension = _canonicalize_axis(dimension, len(operand.shape))
return sort_p.bind(operand, dimension=dimension)[0]
def sort_key_val(keys: Array, values: Array,
dimension: int = -1) -> Tuple[Array, Array]:
"""Sorts ``keys`` along ``dimension`` and applies same permutation to ``values``."""
dimension = _canonicalize_axis(dimension, len(keys.shape))
k, v = sort_p.bind(keys, values, dimension=dimension)
return k, v
def top_k(operand: Array, k: int) -> Tuple[Array, Array]:
"""Returns top ``k`` values and their indices along the last axis of ``operand``."""
k = int(k)
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
return top_k_p.bind(operand, k=k)
def tie_in(x: Array, y: Array) -> Array:
"""Gives ``y`` a fake data dependence on ``x``.
When staging to XLA (e.g. running under jit or pmap), values that don't depend
on computation inputs are computed op-by-op, and folded into the XLA
computation as constants.
``tie_in`` provides a way to explicitly stage values into the computation.
When staging to XLA and ``x`` is already staged, then the result of ``tie_in``
is ``y``, but staged to XLA. Downstream use of the result will also be staged
to XLA.
"""
return tie_in_p.bind(x, y)
def full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:
"""Returns an array of `shape` filled with `fill_value`.
Arguments:
shape: sequence of integers, describing the shape of the output array.
fill_value: the value to fill the new array with.
dtype: the type of the output array, or `None`. If not `None`, `fill_value`
will be cast to `dtype`.
"""
shape = canonicalize_shape(shape)
if onp.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(onp.shape(fill_value)))
dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))
# TODO(mattjj): remove device_put when dtype conversion produces DeviceArray
fill_value = xla.device_put_p.bind(convert_element_type(fill_value, dtype))
return broadcast(fill_value, shape)
def iota(dtype: DType, size: int) -> Array:
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
size = size if type(size) is masking.Poly else int(size)
shape = canonicalize_shape((size,))
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, shape[0])
aval = ShapedArray(shape, dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:
"""Convenience wrapper around ``iota``."""
dtype = dtypes.canonicalize_dtype(dtype)
shape = canonicalize_shape(shape)
dimension = int(dimension)
return broadcast_in_dim(iota(dtype, shape[dimension]), shape, [dimension])
def _eye(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.eye, create a 2D array with ones on a diagonal.
This function exists for creating lazy identity matrices; that is,
materialization of the array is delayed and it may be fused into consumers to
avoid materialization at all."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.eye(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:
"""This function exists for creating lazy Kronecker delta arrays, particularly
for use in jax.numpy.einsum to express traces. It differs from ``eye`` in that
it can create arrays of any rank, but doesn't allow offsets."""
shape = tuple(map(int, shape))
axes = tuple(map(int, axes))
dtype = dtypes.canonicalize_dtype(dtype)
base_shape = tuple(onp.take(shape, axes))
lazy_expr = lazy.broadcast(lazy.delta(dtype, base_shape), shape, axes)
aval = ShapedArray(shape, dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _tri(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.tri, create a 2D array with ones below a diagonal.
This function exists for creating lazy triangular matrices, particularly for
use in jax.numpy.tri."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.tri(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def stop_gradient(x):
"""Stops gradient computation.
Operationally `stop_gradient` is the identity function, that is, it returns
argument `x` unchanged. However, `stop_gradient` prevents the flow of
gradients during forward or reverse-mode automatic differentiation. If there
are multiple nested gradient computations, `stop_gradient` stops gradients
for all of them.
For example:
>>> jax.grad(lambda x: x**2)(3.)
array(6., dtype=float32)
>>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.)
array(0., dtype=float32)
>>> jax.grad(jax.grad(lambda x: x**2))(3.)
array(2., dtype=float32)
>>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.)
array(0., dtype=float32)
"""
return tree_map(ad_util.stop_gradient_p.bind, x)
### convenience wrappers around traceables
def conv(lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: str, precision: Optional[PrecisionType] = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
"""
pads = padtype_to_pads(lhs.shape[2:], rhs.shape[2:], window_strides, padding)
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision)
def conv_with_general_padding(lhs: Array, rhs: Array,
window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]],
rhs_dilation: Optional[Sequence[int]],
precision: Optional[PrecisionType] = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision)
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(onp.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
"""Flip ndarray 'x' along each axis specified in axes tuple."""
for axis in axes:
x = onp.flip(x, axis)
return x
def conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
transpose_kernel: bool = False,
precision: Optional[PrecisionType] = None) -> Array:
"""Convenience wrapper for calculating the N-d convolution "transpose".
This function directly calculates a fractionally strided conv rather than
indirectly calculating the gradient (transpose) of a forward convolution.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
strides: sequence of `n` integers, sets fractional stride.
padding: 'SAME', 'VALID' will set as transpose of corresponding forward
conv, or a sequence of `n` integer 2-tuples describing before-and-after
padding for each `n` spatial dimension.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: tuple of dimension descriptors as in
lax.conv_general_dilated. Defaults to tensorflow convention.
transpose_kernel: if True flips spatial axes and swaps the input/output
channel axes of the kernel. This makes the output of this function identical
to the gradient-derived functions like keras.layers.Conv2DTranspose
applied to the same kernel. For typical use in neural nets this is completely
pointless and just makes input/output channel specification confusing.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
Transposed N-d convolution, with output padding following the conventions of
keras.layers.Conv2DTranspose.
"""
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) > 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
# Set dimensional layout defaults if not specified.
if dimension_numbers is None:
if ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = onp.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
# Calculate correct output shape given padding and strides.
pads: Union[str, Sequence[Tuple[int, int]]]
if padding in {'SAME', 'VALID'}:
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(effective_k_size, strides)]
else:
pads = padding
if transpose_kernel:
# flip spatial dims and swap input / output channel axes
rhs = _flip_axes(rhs, onp.array(dn.rhs_spec)[2:])
rhs = onp.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,
precision=precision)
def full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,
shape: Optional[Shape] = None) -> Array:
"""Create a full array like np.full based on the example array `x`.
Args:
x: example array-like, used for shape and dtype information.
fill_value: a scalar value to fill the entries of the output array.
dtype: optional, a dtype parameter for the output ndarray.
shape: optional, a shape parameter for the output ndarray.
Returns:
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
fill_shape = onp.shape(x) if shape is None else canonicalize_shape(shape)
fill_value = tie_in(x, fill_value)
return full(fill_shape, fill_value, dtype or _dtype(x))
def collapse(operand: Array, start_dimension: int, stop_dimension: int) -> Array:
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand: Array, start_index: Optional[int],
limit_index: Optional[int],
stride: int = 1, axis: int = 0)-> Array:
"""Convenience wrapper around slice applying to only one dimension."""
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
# translate `None`
len_axis = operand.shape[axis]
start_index_int = int(start_index) if start_index is not None else 0
limit_index_int = int(limit_index) if limit_index is not None else len_axis
# translate negative indices
if start_index_int < 0:
start_index_int = start_index_int + len_axis
if limit_index_int < 0:
limit_index_int = limit_index_int + len_axis
axis = int(axis)
start_indices[axis] = start_index_int
limit_indices[axis] = limit_index_int
strides[axis] = int(stride)
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand: Array, index: int, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around slice to perform int indexing."""
index, axis = int(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_slice_in_dim(operand: Array, start_index: Array,
slice_size: int, axis: int = 0) -> Array:
"""Convenience wrapper around dynamic_slice applying to one dimension."""
start_indices = [_zero(start_index)] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
start_indices[axis] = start_index
slice_sizes[axis] = int(slice_size)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand: Array, index: Array, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around dynamic_slice to perform int indexing."""
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_update_slice_in_dim(operand: Array, update: Array,
start_index: Array, axis: int) -> Array:
axis = int(axis)
start_indices = [_zero(start_index)] * _ndim(operand)
start_indices[axis] = start_index
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand: Array, update: Array, index: Array,
axis: int) -> Array:
axis = int(axis)
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
ax = axis % _ndim(operand)
update = reshape(update, operand.shape[:ax] + (1,) + operand.shape[ax+1:])
return dynamic_update_slice_in_dim(operand, update, index, axis)
def batch_matmul(lhs: Array, rhs: Array,
precision: Optional[PrecisionType] = None) -> Array:
"""Batch matrix multiplication."""
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),
precision=precision)
# These functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def square(x: Array) -> Array:
r"""Elementwise square: :math:`x^2`."""
return integer_pow(x, 2)
def reciprocal(x: Array) -> Array:
r"""Elementwise reciprocal: :math:`1 \over x`."""
return integer_pow(x, -1)
def _upcast_fp16_for_computation(f):
@functools.wraps(f)
def f_wrapped(x):
dtype = _dtype(x)
if dtype == onp.float16 or dtype == dtypes.bfloat16:
return convert_element_type(
f(convert_element_type(x, onp.float32)), dtype)
return f(x)
return f_wrapped
@api.jit
@_upcast_fp16_for_computation
def tan(x: Array) -> Array:
r"""Elementwise tangent: :math:`\mathrm{tan}(x)`."""
return div(sin(x), cos(x))
@api.jit
def asin(x: Array) -> Array:
r"""Elementwise arc sine: :math:`\mathrm{asin}(x)`."""
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
@api.jit
def acos(x: Array) -> Array:
r"""Elementwise arc cosine: :math:`\mathrm{acos}(x)`."""
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, onp.pi))
def atan(x: Array) -> Array:
r"""Elementwise arc tangent: :math:`\mathrm{atan}(x)`."""
return atan2(x, _const(x, 1))
def sinh(x: Array) -> Array:
r"""Elementwise hyperbolic sine: :math:`\mathrm{sinh}(x)`."""
return sinh_p.bind(x)
def cosh(x: Array) -> Array:
r"""Elementwise hyperbolic cosine: :math:`\mathrm{cosh}(x)`."""
return cosh_p.bind(x)
def asinh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic sine: :math:`\mathrm{asinh}(x)`."""
return asinh_p.bind(x)
def acosh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic cosine: :math:`\mathrm{acosh}(x)`."""
return acosh_p.bind(x)
def atanh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic tangent: :math:`\mathrm{atanh}(x)`."""
return atanh_p.bind(x)
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = tracer.shape[0]
# return (index_in_dim(tracer, i, keepdims=False) for i in range(n))
return iter([index_in_dim(tracer, i, keepdims=False) for i in range(n)])
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x):
return full_like(x, 0)
for t in itertools.chain(dtypes.python_scalar_dtypes.keys(), array_types,
[xla.DeviceArray, pxla.ShardedDeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[xla.DeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array
### primitives
_input_dtype = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: onp.abs(onp.zeros((), dtype)).dtype
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs):
assert all(isinstance(arg, UnshapedArray) for arg in args), args
least_specialized = _max(
map(type, args), key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
return ConcreteArray(prim.impl(*[x.val for x in args], **kwargs))
elif least_specialized is ShapedArray:
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
elif least_specialized is UnshapedArray:
return UnshapedArray(dtype_rule(*args, **kwargs))
else:
raise TypeError(args, least_specialized)
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(xops, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(onp.dtype(aval.dtype).name)
accepted_typenames = (t.__name__ for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,
translation_rule=translation_rule)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(dtypes.issubdtype(aval_dtype, t) for t in types):
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(onp.dtype(aval_dtype).name)
typenames = ', '.join(t.__name__ for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = onp.array([aval.shape for aval in avals if aval.shape])
if not shapes.size:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return tuple(result_shape)
def naryop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule)
batching.defbroadcasting(prim)
masking.defnaryop(prim)
return prim
standard_naryop = partial(naryop, _input_dtype)
def _broadcast_translate(translate: Callable):
# Decorator for translation rules which adds explicit broadcasting of
# positional arguments. This is necessary only for a handful of primitives
# whose XLA implementations do not support broadcasting.
def _broadcast_array(array, array_shape, result_shape):
if array_shape == result_shape:
return array
bcast_dims = tuple(range(len(result_shape) - len(array_shape),
len(result_shape)))
result = xops.BroadcastInDim(array, result_shape, bcast_dims)
return result
def _broadcasted_translation_rule(c, *args, **kwargs):
shapes = [c.get_shape(arg).dimensions() for arg in args]
result_shape = broadcast_shapes(*shapes)
args = [_broadcast_array(arg, arg_shape, result_shape)
for arg, arg_shape in zip(args, shapes)]
return translate(c, *args, **kwargs)
return _broadcasted_translation_rule
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# get two extra ops in them: a reshape and a broadcast_in_dim (or sometimes just
# a broadcast). but saving the shape info with the primitives isn't great either
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# Used in jvprules to make naryop broadcasting explicit for transposability.
# Requires shape info during jvp tracing, which isn't strictly necessary.
# We don't need full numpy broadcasting, but otherwise the logic is the same
# so we reuse the broadcast_shapes function after filtering out scalars.
shapes = tuple(filter(None, map(onp.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if onp.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = onp.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = onp.where(onp.equal(x_shape, shape))
squeezed_dimensions, = onp.where(onp.not_equal(x_shape, shape))
inshape = onp.delete(x_shape, squeezed_dimensions)
return broadcast_in_dim(reshape(x, inshape), shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_float = {onp.floating}
_complex = {onp.complexfloating}
_complex_elem_types = {onp.float32, onp.float64}
_int = {onp.integer}
_bool = {onp.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
_bool_or_int = _int | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear(neg_p, lambda t: [neg(t)])
def _sign_translation_rule(c, x):
shape = c.get_shape(x)
dtype = shape.numpy_dtype()
if dtypes.issubdtype(dtype, onp.unsignedinteger):
zero = xb.constant(c, onp.array(0, dtype=dtype))
dims = c.get_shape(x).dimensions()
return xops.Select(xops.Eq(x, zero), xops.Broadcast(zero, dims),
xops.Broadcast(xb.constant(c, onp.array(1, dtype=dtype)),
dims))
return xops.Sign(x)
sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)
ad.defjvp_zero(sign_p)
nextafter_p = standard_naryop(
[_float, _float], 'nextafter',
translation_rule=lambda c, x1, x2: xops.NextAfter(x1, x2))
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
round_p = standard_unop(_float, 'round')
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(onp.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans))))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
atan2_p = standard_naryop([_float, _float], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),
lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))
sinh_p = standard_unop(_float | _complex, 'sinh')
ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))
cosh_p = standard_unop(_float | _complex, 'cosh')
ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))
asinh_p = standard_unop(_float | _complex, 'asinh')
ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))
acosh_p = standard_unop(_float | _complex, 'acosh')
ad.defjvp(acosh_p,
lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))
atanh_p = standard_unop(_float | _complex, 'atanh')
ad.defjvp(atanh_p,
lambda g, x: mul(g, reciprocal((_one(x) - x) * (_one(x) + x))))
regularized_incomplete_beta_p = standard_naryop(
[_float, _float, _float], 'regularized_incomplete_beta',
translation_rule=_broadcast_translate(
partial(standard_translate, 'regularized_incomplete_beta')))
def betainc_gradx(g, a, b, x):
lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
partial_x = exp((b - 1) * log1p(-x) +
(a - 1) * log(x) - lbeta)
return partial_x * g
def betainc_grad_not_implemented(g, a, b, x):
raise ValueError("Betainc gradient with respect to a and b not supported.")
ad.defjvp(regularized_incomplete_beta_p,
betainc_grad_not_implemented,
betainc_grad_not_implemented,
betainc_gradx)
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
igamma_p = standard_naryop(
[_float, _float], 'igamma',
translation_rule=_broadcast_translate(partial(standard_translate, 'igamma')))
igamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',
translation_rule=_broadcast_translate(partial(standard_translate,
'igamma_grad_a')))
def igamma_gradx(g, a, x):
return _brcast(g, a, x) * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))
def igamma_grada(g, a, x):
return _brcast(g, a, x) * igamma_grad_a(a, x)
ad.defjvp(igamma_p, igamma_grada, igamma_gradx)
igammac_p = standard_naryop(
[_float, _float], 'igammac',
translation_rule=_broadcast_translate(partial(standard_translate, 'igammac')))
def igammac_gradx(g, a, x):
return -igamma_gradx(g, a, x)
def igammac_grada(g, a, x):
return -igamma_grada(g, a, x)
ad.defjvp(igammac_p, igammac_grada, igammac_gradx)
bessel_i0e_p = standard_unop(_float, 'bessel_i0e')
ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))
bessel_i1e_p = standard_unop(_float, 'bessel_i1e')
def _bessel_i1e_jvp(g, y, x):
eps = dtypes.finfo(_dtype(x)).eps
x_is_not_tiny = abs(x) > eps
safe_x = select(x_is_not_tiny, x, full_like(x, eps))
dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))
dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))
return g * dy_dx
ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, onp.sqrt(onp.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear(real_p, lambda t: [complex(t, onp.zeros((), _dtype(t)))])
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g)))
_complex_dtype = lambda dtype, *args: (onp.zeros((), dtype) + onp.zeros((), onp.complex64)).dtype
complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))])
conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
if dtypes.issubdtype(input_dtype, onp.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.translations[conj_p] = lambda c, x, **kwargs: xops.Conj(x)
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
def _abs_jvp_rule(g, ans, x):
if _iscomplex(x):
return _maybe_real(mul(g, div(_maybe_conj(x),
_replace_zero(convert_element_type(ans, _dtype(x))))))
else:
return select(ge(x, _zero(x)), g, neg(g))
ad.defjvp2(abs_p, _abs_jvp_rule)
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))
rsqrt_p = standard_unop(_float | _complex, 'rsqrt')
ad.defjvp2(rsqrt_p,
lambda g, ans, x:
mul(g, mul(_const(x, -0.5), pow(x, _const(x, -1.5)))))
pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, ans, x, y):
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return mul(_brcast(g, y), jac)
def _pow_jvp_rhs(g, ans, x, y):
return mul(_brcast(g, x), mul(log(_replace_zero(x)), ans))
ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
def _integer_pow_dtype_rule(x, *, y):
dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)
if y < 0 and dtypes.issubdtype(dtype, onp.integer):
raise TypeError("Integers cannot be raised to negative powers, got "
f"integer_pow({x}, {y})")
return dtype
def _integer_pow_translation_rule(c, x, *, y):
if y == 0:
shape = c.get_shape(x)
return xb.constant(c, onp.array(1, dtype=shape.numpy_dtype()))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else xops.Mul(acc, x)
y >>= 1
if y > 0:
x = xops.Mul(x, x)
return xops.Reciprocal(acc) if is_reciprocal else acc
def _integer_pow_jvp(g, x, *, y):
return g if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))
integer_pow_p = standard_primitive(
_attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',
translation_rule=_integer_pow_translation_rule)
batching.defvectorized(integer_pow_p)
masking.defvectorized(integer_pow_p)
ad.defjvp(integer_pow_p, _integer_pow_jvp)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_bool_or_int, 'not')
and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')
ad.defjvp_zero(and_p)
or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')
ad.defjvp_zero(xor_p)
population_count_p = standard_unop(_bool_or_int, 'population_count')
def _add_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases we
# instantiate zeros for convenience, it doesn't always hold.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, t]
add_p = standard_naryop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = _add_transpose
def _sub_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases
# we instantiate zeros for convenience, it doesn't always hold.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, neg(t) if t is not ad_util.zero else ad_util.zero]
sub_p = standard_naryop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
ad.primitive_transposes[sub_p] = _sub_transpose
mul_p = standard_naryop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)
def _div_transpose_rule(cotangent, x, y):
assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)
res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y)
return res, None
div_p = standard_naryop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: mul(mul(neg(_brcast(g, x)), x), integer_pow(y, -2)))
ad.primitive_transposes[div_p] = _div_transpose_rule
rem_p = standard_naryop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y))))
def _broadcasting_select(c, which, x, y):
"""Wrapper around XLA `Select` that broadcasts its arguments."""
which_shape, x_shape, y_shape = (
c.get_shape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return xops.Select(which, x, y)
def _minmax_translation_rule(c, x, y, *, minmax=None, cmp=None):
dtype = c.get_shape(x).numpy_dtype()
if dtypes.issubdtype(dtype, onp.complexfloating):
rx = xops.Real(x)
ry = xops.Real(y)
return _broadcasting_select(
c, xops.Select(xops.Eq(rx, ry), cmp(xops.Imag(x), xops.Imag(y)),
cmp(rx, ry)),
x, y)
return minmax(x, y)
max_p = standard_naryop([_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Max, cmp=xops.Gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p = standard_naryop([_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Min, cmp=xops.Lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_naryop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def _convert_element_type_shape_rule(operand, *, new_dtype, old_dtype):
return operand.shape
def _convert_element_type_dtype_rule(operand, *, new_dtype, old_dtype):
return new_dtype
def _convert_element_type_translation_rule(c, operand, *, new_dtype, old_dtype):
if (dtypes.issubdtype(old_dtype, onp.complexfloating) and
not dtypes.issubdtype(new_dtype, onp.complexfloating)):
operand = xops.Real(operand)
new_etype = xla_client.dtype_to_etype(new_dtype)
return xops.ConvertElementType(operand, new_element_type=new_etype)
def _convert_element_type_transpose_rule(t, *, new_dtype, old_dtype):
assert t.dtype == new_dtype, (t.dtype, new_dtype)
return [convert_element_type_p.bind(t, new_dtype=old_dtype,
old_dtype=new_dtype)]
convert_element_type_p = standard_primitive(
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
'convert_element_type', _convert_element_type_translation_rule)
ad.deflinear(convert_element_type_p, _convert_element_type_transpose_rule)
batching.defvectorized(convert_element_type_p)
masking.defvectorized(convert_element_type_p)
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
return new_dtype
def _bitcast_convert_type_translation_rule(c, operand, *, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return xops.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
masking.defvectorized(bitcast_convert_type_p)
def _conv_general_dilated_shape_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
**unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
if not batch_group_count > 0:
msg = ("conv_general_dilated batch_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(batch_group_count))
lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]
if lhs_batch_count % batch_group_count != 0:
msg = ("conv_general_dilated batch_group_count must divide lhs batch "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(batch_group_count, lhs_batch_count))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of batch_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
batch_ground_count))
if not batch_group_count > 0 and feature_group_count > 0:
msg = ("At most one of batch_group_count and feature_group_count may be > "
"1, got batch_group_count={} and feature_group_count={}")
raise ValueError(msg.format(batch_group_count, feature_group_count))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(onp.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(onp.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,
batch_group_count)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return naryop_dtype_rule(_input_dtype, [_float, _float],
'conv_general_dilated', lhs, rhs)
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
# Understanding the convolution transpose rules:
# Ignoring the spatial dimensions, let m = batch, j = input feature,
# k = output feature.
#
# Convolution computes the following contraction:
# Forward: [m, j] [j, k] -> [m, k]
#
# The transposes are similar to the rules for transposing a matmul:
# LHS transpose: [m, k] [k, j] -> [m, j]
# RHS transpose: [j, m] [m, k] -> [j, k]
#
# With feature grouping, we have the following signatures:
# Forward: [m, gj] [j, gk] -> [m, gk]
# LHS transpose: [m, gk] [k, gj] -> [m, gj]
# --> implemented as feature grouping after transposing the group from the
# kernel input features to the kernel output features.
# RHS transpose: [gj, m] [m, gk] -> [j, gk]
# --> which is batch grouping.
#
# With batch grouping, we have the following signatures:
# Forward: [gm,j] [j,gk]->[m,gk]
# LHS transpose: [m, gk][gk, j] -> [gm, j]
# --> implemented as feature grouping with transposing the group on the kernel
# and the output.
# RHS transpose: [j, gm][m, gk] -> [j, gk]
# --> which is feature grouping.
def _conv_general_dilated_transpose_lhs(
g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
assert batch_group_count == 1 or feature_group_count == 1
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
# in addition to switching the dims in the spec, need to move the feature
# group axis into the transposed rhs's output feature dim
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
elif batch_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
feature_group_count = batch_group_count
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
out = conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=1, precision=precision)
if batch_group_count > 1:
out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)
out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)
return out
def _conv_general_dilated_transpose_rhs(
g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers: ConvDimensionNumbers, feature_group_count: int,
batch_group_count: int, lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
if onp.size(g) == 0:
# Avoids forming degenerate convolutions where the RHS has spatial size 0.
return ad_util.zero
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
assert batch_group_count == 1 or feature_group_count == 1
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision)
def _conv_general_dilated_translation_rule(
c, lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count, precision,
**unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
return xops.ConvGeneralDilated(lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision_config=_precision_config(precision))
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count, precision, **unused_kwargs):
assert batch_group_count == 1 or feature_group_count == 1
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
if batch_group_count > 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
batch_group_count *= lhs.shape[lhs_bdim]
else:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
feature_group_count *= lhs.shape[lhs_bdim]
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
if batch_group_count == 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
else:
new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),
batch_group_count, lhs)
new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),
lhs_spec[0] + 1,
new_lhs)
new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1 and batch_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# groups need to be outermost, so we need to factor them out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put groups back in. We do something
# similar on the output. An alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
group_count = (feature_group_count if feature_group_count > 1
else batch_group_count)
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
conv_general_dilated_p = standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', _conv_general_dilated_translation_rule)
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(onp.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return reshape(x, shape)
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
config.operand_precision.extend((precision, precision))
return config
return None
def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
if not onp.all(onp.equal(lhs_batch, rhs_batch)):
msg = ("dot_general requires same lhs and rhs batch dimension numbers, "
"got {} and {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_batch_shape = onp.take(lhs.shape, lhs_batch)
rhs_batch_shape = onp.take(rhs.shape, rhs_batch)
if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):
msg = ("dot_general requires lhs batch dimensions to precede contracting "
"and non-contracting dimensions, got lhs_batch {}.")
raise TypeError(msg.format(lhs_batch))
if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):
msg = ("dot_general requires rhs batch dimensions to precede contracting "
"and non-contracting dimensions, got rhs_batch {}.")
raise TypeError(msg.format(rhs_batch))
lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)
if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
batch_shape = tuple(onp.take(lhs.shape, lhs_batch))
lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)
lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)
rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision):
return naryop_dtype_rule(_input_dtype, [_num, _num], 'dot_general', lhs, rhs)
def _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(onp.take(x_contract, onp.argsort(y_contract)))
out_axes = onp.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims, precision=precision),
tuple(out_axes))
def _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(
g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision):
# there are three kinds of dimensions in a dot_general:
# - contraction dimensions appear in lhs and rhs but not the result
# - batch dimensions appear in lhs, rhs, and result
# - tensor product dimensions appear in the result and one of lhs or rhs
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs, rhs = batched_args
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
if lbd is not None and rbd is not None:
# adding a batch dimension
if lbd != 0:
lhs = batching.moveaxis(lhs, lbd, 0)
if rbd != 0:
rhs = batching.moveaxis(rhs, rbd, 0)
lhs_batch = (0,) + tuple(onp.add(1, lhs_batch))
rhs_batch = (0,) + tuple(onp.add(1, rhs_batch))
lhs_contract = tuple(onp.add(1, lhs_contract))
rhs_contract = tuple(onp.add(1, rhs_contract))
result_batch_dim = 0
else:
# adding a tensor product dimension
if lbd is not None:
if lhs_batch == () or lbd > onp.max(lhs_batch):
# can avoid transposes
bump_lhs_contract = onp.greater_equal(lhs_contract, lbd)
lhs_contract = tuple(onp.add(lhs_contract, bump_lhs_contract))
result_batch_dim = lbd - len(lhs_contract) + sum(bump_lhs_contract)
else:
# move the new dimension to the end of lhs to avoid changing batch dims
lhs = batching.moveaxis(lhs, lbd, lhs.ndim - 1)
# lhs tensor product dims in result come after batch dims
result_batch_dim = lhs.ndim - len(lhs_contract) - 1
else:
if rhs_batch == () or rbd > onp.max(rhs_batch):
# can avoid transposes
bump_rhs_contract = onp.greater_equal(rhs_contract, rbd)
rhs_contract = tuple(onp.add(rhs_contract, bump_rhs_contract))
result_batch_dim = (rbd + (lhs.ndim - len(lhs_contract) - len(lhs_batch))
- (len(rhs_contract) - sum(bump_rhs_contract)))
else:
# move the new dimension to the end of rhs to avoid changing batch dims
rhs = batching.moveaxis(rhs, rbd, rhs.ndim - 1)
# rhs tensor product dims in result come after batch dims + lhs tensor
# product dims
result_batch_dim = (lhs.ndim - len(lhs_contract) - len(lhs_batch) +
rhs.ndim - len(rhs_contract) - 1)
new_dimension_numbers = [(lhs_contract, rhs_contract), (lhs_batch, rhs_batch)]
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision)
return batched_out, int(result_batch_dim)
def _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision):
return xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision))
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision):
lhs, rhs = padded_vals
lhs_shape, rhs_shape = logical_shapes
lhs_ndim, rhs_ndim = len(lhs_shape), len(rhs_shape)
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
# we need only mask the lhs contraction dimensions
if len(lhs_contract) == 0:
return dot_general(lhs, rhs, dimension_numbers, precision=precision)
else:
masks = [broadcasted_iota(onp.int32, lhs.shape, d) < lhs_shape[d]
for d in lhs_contract]
mask_intersection = masks[0]
for mask in masks[1:]:
mask_intersection &= mask
masked_lhs = select(mask_intersection, lhs, zeros_like_array(lhs))
return dot_general(masked_lhs, rhs, dimension_numbers, precision=precision)
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
def _broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def _broadcast_batch_rule(batched_args, batch_dims, *, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
_broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = _broadcast_batch_rule
def _broadcast_in_dim_impl(operand, *, shape, broadcast_dimensions):
if type(operand) is xla.DeviceArray:
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
aval = ShapedArray(shape, _dtype(operand))
lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(broadcast_in_dim_p, operand, shape=shape,
broadcast_dimensions=broadcast_dimensions)
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
operand_ndim = onp.ndim(operand)
if operand_ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim; got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim))
if len(shape) < operand_ndim:
msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '
'to the operand shape; got operand ndim {} and target broadcast ndim {}.')
raise TypeError(msg.format(operand_ndim, len(shape)))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))
if any(operand.shape[i] != 1 and operand.shape[i] != shape[broadcast_dimensions[i]]
for i in range(operand_ndim)):
msg = ('broadcast_in_dim operand dimension sizes must either be 1, or be '
'equal to their corresponding dimensions in the target broadcast shape; '
'got operand of shape {}, target broadcast shape {}, '
'broadcast_dimensions {} ')
raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))
if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or
tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):
msg = ('broadcast_in_dim broadcast_dimensions must be strictly increasing; '
'got broadcast_dimensions {}')
raise TypeError(msg.format(broadcast_dimensions))
return shape
def _broadcast_in_dim_transpose_rule(t, *, shape, broadcast_dimensions):
axes = tuple(onp.delete(range(len(shape)), broadcast_dimensions))
return [_reduce_sum(t, axes)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.moveaxis(operand, bdim, 0)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(onp.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
broadcast_in_dim_p.def_impl(_broadcast_in_dim_impl)
ad.deflinear(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len(set(operand.ndim for operand in operands)) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
shapes = onp.array([operand.shape for operand in operands])
if not 0 <= dimension < shapes.shape[1]:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
if not onp.all(onp.delete(shapes[0] == shapes, dimension, axis=1)):
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: dimension {} for shapes {}.")
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return xops.ConcatInDim(c, operands, dimension)
def _concatenate_transpose_rule(t, *operands, dimension):
operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape
for o in operands]
if t is ad_util.zero:
return [ad_util.zero if ad.is_undefined_primal(o) else None for o in operands]
else:
limit_points = onp.cumsum([shape[dimension] for shape in operand_shapes])
starts = onp.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = onp.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if ad.is_undefined_primal(o) else None
for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, *, dimension):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.moveaxis(op, bdim, 0) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
# The concatenate_p masking rule requires use of a while-loop construct and so
# is defined in lax_control_flow.py
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _pad_dtype_rule(operand, padding_value, *, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
return _input_dtype(operand, padding_value)
def _pad_shape_rule(operand, padding_value, *, padding_config):
lo, hi, interior = zip(*padding_config)
out_shape = onp.add(onp.add(onp.add(lo, hi), operand.shape),
onp.multiply(interior, onp.subtract(operand.shape, 1)))
return tuple(out_shape)
def _pad_transpose(t, operand, padding_value, *, padding_config):
if t is ad_util.zero:
return [ad_util.zero if ad.is_undefined_primal(operand) else None,
ad_util.zero if ad.is_undefined_primal(padding_value) else None]
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = zip(onp.negative(lo), onp.negative(hi), onp.zeros_like(interior))
unpadded = pad(t, onp.array(0., t.dtype), unpad_config)
return slice(unpadded, onp.zeros_like(lo), unpadded.shape, onp.add(interior, 1))
t_operand = t_op() if ad.is_undefined_primal(operand) else None
t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, *, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
def _pad_translation_rule(c, operand, padding_value, *, padding_config):
return xops.Pad(operand, padding_value,
xc.make_padding_config(padding_config))
pad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',
translation_rule=_pad_translation_rule)
ad.deflinear(pad_p, _pad_transpose)
ad.primitive_transposes[pad_p] = _pad_transpose
batching.primitive_batchers[pad_p] = _pad_batch_rule
# We have a nonstandard reshape impl so that we can be lazy about data movement.
def _reshape_impl(operand, *, new_sizes, dimensions):
old_sizes = onp.shape(operand)
if type(operand) is xla.DeviceArray and dimensions is None:
bcast_dims = _is_singleton_reshape(old_sizes, new_sizes)
if bcast_dims is not None:
aval = ShapedArray(new_sizes, operand.dtype)
lazy_expr = lazy.broadcast(operand._lazy_expr, new_sizes, bcast_dims)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
if type(operand) is pxla.ShardedDeviceArray and dimensions is None:
array = _reshape_sharded_device_array(operand, new_sizes, old_sizes)
if array is not None:
return array
return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,
dimensions=dimensions)
def _is_singleton_reshape(old, new):
# A singleton reshape is one where only singleton dimensions are added. We
# want to detect them because they can be expressed as (lazy) broadcasts.
old, new = iter(old), iter(new)
d1, d2 = next(old, None), next(new, None)
bcast_dims = []
i = 0
while True:
if d1 is d2 is None:
return bcast_dims
elif d1 == d2:
bcast_dims.append(i)
i += 1
d1, d2 = next(old, None), next(new, None)
elif d2 == 1:
i += 1
d2 = next(new, None)
else:
return None
def _reshape_sharded_device_array(array, new_sizes, old_sizes):
"""Returns None if `array` could not be efficiently reshaped.
This function is primarily to support soft_pmap, although these optimizations
could be useful when directly calling reshape as well.
"""
# TODO(jekbradbury): the axis split/merge logic below assumes that
# ShardedDevicesArrays are always sharded across their leading axes. Remove
# this constraint, especially if/when we add APIs that produce sharding across
# interior axes.
if any(num_shards != 1 for num_shards
in array.sharding_spec.shards_per_axis[1:]):
return None
# TODO(skye): handle replicated buffers
if array.sharding_spec.replication_factor != 1:
return None
# ShardedDevicesArrays require all buffers to have the same shape
chunk_shape = array.device_buffers[0].shape().dimensions()
chunk_size = chunk_shape[0] if len(chunk_shape) > 0 else 1
if _is_axis_merge(old_sizes, new_sizes):
num_chunks, ragged = divmod(new_sizes[0], chunk_size)
if ragged: return None
aval = ShapedArray(new_sizes, array.dtype)
sharding_spec = pxla.ShardingSpec(
shards_per_axis=(num_chunks,) + (1,) * (len(new_sizes) - 1),
is_axis_materialized=(True,) * len(new_sizes),
replication_factor=1)
return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)
if _is_axis_split(old_sizes, new_sizes):
split_axis_size, ragged = divmod(old_sizes[0], chunk_size)
if ragged: return None
if new_sizes[0] != split_axis_size: return None
aval = ShapedArray(new_sizes, array.dtype)
sharding_spec = pxla._pmap_sharding_spec(
new_sizes[0], new_sizes[0], ShapedArray(new_sizes[1:], array.dtype), True)
return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)
return None
def _is_axis_merge(s1, s2):
# TODO(skye): we might still be able to handle these cases as merges, I
# haven't thought about it much.
if len(s1) < 2 or len(s2) < 1: return False
return s1[2:] == s2[1:] and s1[0] * s1[1] == s2[0]
def _is_axis_split(s1, s2):
return _is_axis_merge(s2, s1)
def _reshape_shape_rule(operand, *, new_sizes, dimensions):
if not onp.all(onp.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(onp.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, onp.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(onp.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, onp.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, *, new_sizes, dimensions):
return operand.dtype
def _reshape_translation_rule(c, operand, *, new_sizes, dimensions):
if dimensions is None:
return xops.Reshape(operand, new_sizes)
else:
return xops.Reshape(operand, dimensions, new_sizes)
def _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):
assert ad.is_undefined_primal(operand)
if dimensions is None:
return [reshape(t, operand.aval.shape)]
else:
return [transpose(reshape(t, onp.take(operand.aval.shape, dimensions)),
onp.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
if dimensions is not None:
dimensions = (0,) + tuple(onp.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
reshape_p.def_impl(_reshape_impl)
ad.deflinear2(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
def _rev_shape_rule(operand, *, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if dimensions and not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _transpose_impl(operand, *, permutation):
if type(operand) is xla.DeviceArray:
lazy_expr = lazy.transpose(operand._lazy_expr, permutation)
aval = ShapedArray(lazy_expr.shape, operand.dtype)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(transpose_p, operand, permutation=permutation)
def _transpose_shape_rule(operand, *, permutation):
if not isinstance(permutation, (tuple, list, onp.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(onp.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, *, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
transpose_p.def_impl(_transpose_impl)
ad.deflinear(transpose_p,
lambda t, permutation: [transpose(t, onp.argsort(permutation))])
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
def _select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def _select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not dtypes.issubdtype(pred.dtype, onp.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def _select_transpose_rule(t, pred, on_true, on_false):
assert not ad.is_undefined_primal(pred)
if t is ad_util.zero:
return [None,
ad_util.zero if ad.is_undefined_primal(on_true) else None,
ad_util.zero if ad.is_undefined_primal(on_false) else None]
else:
zeros = full_like(t, 0)
return [None,
select(pred, t, zeros) if ad.is_undefined_primal(on_true) else None,
select(pred, zeros, t) if ad.is_undefined_primal(on_false) else None]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
pred, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if pred_bdim == ot_bdim == of_bdim:
if onp.shape(pred) == onp.shape(on_true):
return select(pred, on_true, on_false), pred_bdim
else:
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])
return select(pred, on_true, on_false), pred_bdim
elif onp.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:
if ot_bdim == of_bdim:
return select(pred, on_true, on_false), ot_bdim
elif onp.shape(on_true) == onp.shape(on_false):
on_false = batching.moveaxis(on_false, of_bdim, ot_bdim)
return select(pred, on_true, on_false), ot_bdim
pred = batching.bdim_at_front(pred, pred_bdim, size) if onp.shape(pred) else pred
if not onp.shape(on_true) == onp.shape(on_false) == ():
on_true = batching.bdim_at_front(on_true, ot_bdim, size)
on_false = batching.bdim_at_front(on_false, of_bdim, size)
assert onp.shape(on_true) == onp.shape(on_false)
if 0 < onp.ndim(pred) < onp.ndim(on_true):
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [0])
if onp.ndim(pred) > onp.ndim(on_true):
assert onp.ndim(on_true) == 0
on_true = broadcast(on_true, pred.shape)
on_false = broadcast(on_false, pred.shape)
return select(pred, on_true, on_false), 0
select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')
ad.defjvp(select_p,
None,
lambda g, b, x, y: select(b, g, _zeros(g)),
lambda g, b, x, y: select(b, _zeros(g), g))
ad.primitive_transposes[select_p] = _select_transpose_rule
batching.primitive_batchers[select_p] = _select_batch_rule
def _slice_shape_rule(operand, *, start_indices, limit_indices, strides):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if not onp.all(onp.less_equal(limit_indices, operand.shape)):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not onp.all(onp.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if not onp.all(onp.greater_equal(limit_indices, start_indices)):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = onp.ones(operand.ndim, onp.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not onp.all(onp.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
result_shape = onp.floor_divide(
onp.add(onp.subtract(limit_indices, start_indices), strides) - 1, strides)
return tuple(result_shape)
def _slice_translation_rule(c, operand, *, start_indices, limit_indices,
strides):
return xops.Slice(operand, start_indices, limit_indices,
strides or [1] * len(start_indices))
def _slice_transpose_rule(t, operand, *, start_indices, limit_indices, strides):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if strides is None or onp.all(onp.equal(strides, 1)):
pads = zip(start_indices, onp.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = onp.add(onp.add(start_indices, 1),
onp.multiply(onp.subtract(t.shape, 1), strides))
pads = zip(start_indices, onp.subtract(operand_shape, real_limits),
onp.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape
return [result]
def _slice_batching_rule(batched_args, batch_dims, *, start_indices,
limit_indices, strides):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
slice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',
_slice_translation_rule)
ad.deflinear2(slice_p, _slice_transpose_rule)
batching.primitive_batchers[slice_p] = _slice_batching_rule
def _dynamic_slice_shape_rule(operand, *start_indices, slice_sizes):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not onp.all(onp.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not onp.all(onp.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def _dynamic_slice_dtype_rule(operand, *start_indices, slice_sizes):
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):
msg = ("index arguments to dynamic_slice must be integers of the same "
"type, got: {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_slice_translation_rule(c, operand, *start_indices, slice_sizes):
return xops.DynamicSlice(operand, start_indices, slice_sizes)
def _dynamic_slice_jvp(primals, tangents, *, slice_sizes):
tangent_out = ad_util.zero
if tangents[0] is not ad_util.zero:
tangent_out = dynamic_slice(tangents[0], primals[1:], slice_sizes)
return dynamic_slice(primals[0], primals[1:], slice_sizes), tangent_out
def _dynamic_slice_transpose_rule(t, operand, *start_indices, slice_sizes):
assert ad.is_undefined_primal(operand)
assert all(not ad.is_undefined_primal(s) for s in start_indices)
operand_shape = operand.aval.shape
zeros = full(operand_shape, tie_in(t, _zero(t)))
return ([dynamic_update_slice(zeros, t, start_indices)] +
[None] * len(start_indices))
def _batch_dynamic_slice_indices(indices, bdims):
size = next((x.shape[i] for x, i in zip(indices, bdims) if i is not None), -1)
if size < 0:
return concatenate([reshape(i, [1]) for i in indices], 0), None
indices = concatenate(
[broadcast_in_dim(x, (size, 1),
broadcast_dimensions=((0,) if i is not None else ()))
for x, i in zip(indices, bdims)],
dimension=1)
return indices, 0
def _dynamic_slice_batching_rule(batched_args, batch_dims, *, slice_sizes):
# A dynamic slice is a special case of gather; we can delegate to the gather
# batching rule.
# TODO(phawkins): consider removing dynamic_slice entirely and using gather
# always.
operand, *start_indices = batched_args
operand_bd, *start_idx_bds = batch_dims
operand_shape = (operand.shape if operand_bd is batching.not_mapped
else tuple(onp.delete(operand.shape, operand_bd)))
dims = tuple(range(len(operand_shape)))
dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),
start_index_map=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_indices, start_idx_bds)
return _gather_batching_rule(
[operand, index], [operand_bd, index_bdim], dimension_numbers=dnums,
slice_sizes=slice_sizes)
dynamic_slice_p = standard_primitive(
_dynamic_slice_shape_rule, _dynamic_slice_dtype_rule, 'dynamic_slice',
_dynamic_slice_translation_rule)
ad.primitive_jvps[dynamic_slice_p] = _dynamic_slice_jvp # TODO
ad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule
batching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule
def _dynamic_update_slice_shape_rule(operand, update, *start_indices):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not onp.all(onp.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def _dynamic_update_slice_dtype_rule(operand, update, *start_indices):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):
msg = ("index arguments to dynamic_update_slice must be integers of the "
"same type, got {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_update_slice_jvp(primals, tangents):
operand, update = primals[:2]
start_indices = primals[2:]
g_operand, g_update = tangents[:2]
val_out = dynamic_update_slice(operand, update, start_indices)
if g_operand is ad_util.zero and g_update is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_update = ad.instantiate_zeros(update, g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def _dynamic_update_slice_transpose_rule(t, operand, update, *start_indices):
assert all(not ad.is_undefined_primal(x) for x in start_indices)
if ad.is_undefined_primal(update):
update_shape = update.aval.shape
else:
update_shape = update.shape
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if ad.is_undefined_primal(operand) else None
update_t = ds(t, start_indices, update_shape) if ad.is_undefined_primal(update) else None
return [operand_t, update_t] + [None] * len(start_indices)
def _dynamic_update_slice_translation_rule(c, operand, update, *start_indices):
return xops.DynamicUpdateSlice(operand, update, start_indices)
def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
# A dynamic update slice is a special case of scatter; we can delegate to the
# scatter batching rule.
# TODO(phawkins): consider removing dynamic_update_slice entirely and using
# scatter always.
operand, update, *start_idx = batched_args
operand_bd, update_bd, *start_idx_bd = batch_dims
update_shape = (update.shape if update_bd is batching.not_mapped
else tuple(onp.delete(update.shape, update_bd)))
dims = tuple(range(len(update_shape)))
dnums = ScatterDimensionNumbers(update_window_dims=dims,
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)
return _scatter_batching_rule(
scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
update_jaxpr=None, update_consts=None, dimension_numbers=dnums)
dynamic_update_slice_p = standard_primitive(
_dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,
'dynamic_update_slice', _dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
_dynamic_update_slice_transpose_rule
batching.primitive_batchers[dynamic_update_slice_p] = \
_dynamic_update_slice_batching_rule
def _gather_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is GatherDimensionNumbers
proto = xla_client.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _gather_dtype_rule(operand, start_indices, **kwargs):
if not dtypes.issubdtype(start_indices.dtype, onp.integer):
raise ValueError("start_indices must have an integer type")
return dtypes.canonicalize_dtype(operand.dtype)
def _gather_shape_rule(operand, start_indices, *, dimension_numbers,
slice_sizes):
if len(operand.shape) != len(slice_sizes):
msg = ("slice_sizes must have rank equal to the gather operand; "
"operand.shape={}, slice_sizes={}".format(operand.shape, slice_sizes))
raise ValueError(msg)
result_rank = len(dimension_numbers.offset_dims) + start_indices.ndim - 1
start_indices_shape = iter(start_indices.shape[:-1])
slice_sizes = iter(onp.delete(slice_sizes, dimension_numbers.collapsed_slice_dims))
return tuple(next(slice_sizes) if i in dimension_numbers.offset_dims
else next(start_indices_shape) for i in range(result_rank))
def _gather_translation_rule(c, operand, start_indices, *, dimension_numbers,
slice_sizes):
indices_shape = c.get_shape(start_indices)
return xops.Gather(
operand, start_indices,
_gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes,
indices_are_sorted=False)
def _gather_jvp_rule(g, operand, start_indices, *, dimension_numbers,
slice_sizes):
return gather(g, start_indices, dimension_numbers, slice_sizes)
def _gather_transpose_rule(t, operand, start_indices, *, dimension_numbers,
slice_sizes):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if t is ad_util.zero:
return [ad_util.zero, ad_util.zero]
zeros = full(operand_shape, tie_in(t, _zero(t)))
scatter_dnums = ScatterDimensionNumbers(
update_window_dims=dimension_numbers.offset_dims,
inserted_window_dims=dimension_numbers.collapsed_slice_dims,
scatter_dims_to_operand_dims=dimension_numbers.start_index_map)
return [scatter_add(zeros, start_indices, t, scatter_dnums), ad_util.zero]
def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
slice_sizes):
operand, start_indices = batched_args
operand_bdim, start_indices_bdim = batch_dims
if operand_bdim is not None and start_indices_bdim is None:
operand = batching.moveaxis(operand, operand_bdim, 0)
slice_sizes = (operand.shape[0],) + slice_sizes
offset_dims = (0,) + tuple(onp.add(1, dimension_numbers.offset_dims))
collapsed_slice_dims = tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
start_index_map = tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
elif operand_bdim is None and start_indices_bdim is not None:
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,
start_index_map=dimension_numbers.start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
else:
# move our batch dimensions to the front to preserve sanity
operand = batching.moveaxis(operand, operand_bdim, 0)
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
# Example: user code had start_indices shape (3, 4, 5), and we have to deal
# with start_indices shape (7, 3, 4, 5). We transform that to a
# start_indices of shape (7, 3, 4, 6) where we concatenated an iota that
# counts along our batch dimension to the front of the ndindex.
count_shape = list(start_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)
start_indices = concatenate([counts, start_indices], len(count_shape) - 1)
slice_sizes = (1,) + slice_sizes
collapsed_slice_dims = (0,) + tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
start_index_map = (0,) + tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
gather_p = standard_primitive(
_gather_shape_rule, _gather_dtype_rule, 'gather',
_gather_translation_rule)
ad.defjvp(gather_p, _gather_jvp_rule, None)
ad.primitive_transposes[gather_p] = _gather_transpose_rule
batching.primitive_batchers[gather_p] = _gather_batching_rule
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is ScatterDimensionNumbers
proto = xla_client.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):
if not dtypes.issubdtype(scatter_indices.dtype, onp.integer):
raise ValueError("scatter_indices must have an integer type")
_check_same_dtypes("scatter", False, operand.dtype, updates.dtype)
return dtypes.canonicalize_dtype(operand.dtype)
def _scatter_shape_rule(operand, scatter_indices, updates, **kwargs):
return operand.shape
def _scatter_translation_rule(c, operand, scatter_indices, updates,
update_jaxpr, update_consts, dimension_numbers):
dtype = c.get_shape(operand).numpy_dtype()
init_value = xb.constant(c, onp.array(0, dtype))
update_computation = _reduction_computation(
c, update_jaxpr, update_consts, init_value)
indices_shape = c.get_shape(scatter_indices)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
_scatter_dimensions_proto(indices_shape, dimension_numbers),
False, False)
def _scatter_add_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
val_out = scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers)
if g_operand is ad_util.zero and g_updates is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
tangent_out = scatter_add_p.bind(
g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers)
return val_out, tangent_out
def _scatter_add_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = t
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_mul_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = scatter_mul(t, scatter_indices, updates,
dimension_numbers=dimension_numbers)
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(mul(t, operand), scatter_indices,
dimension_numbers=gather_dnums, slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_jaxpr, update_consts, dimension_numbers):
operand, scatter_indices, updates = batched_args
operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims
del update_jaxpr, update_consts # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operand = batching.bdim_at_front(operand, operand_bdim, size)
operand_bdim = 0
updates = batching.bdim_at_front(updates, updates_bdim, size)
if scatter_indices_bdim is None:
inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims))
update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims))
scatter_dims_to_operand_dims = tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
# see the third case in _gather_batching_rule for comparison and comments
scatter_indices = batching.bdim_at_front(
scatter_indices, scatter_indices_bdim, size)
count_shape = list(scatter_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)
scatter_indices = concatenate([counts, scatter_indices],
len(count_shape) - 1)
update_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims))
inserted_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.inserted_window_dims))
scatter_dims_to_operand_dims = (0,) + tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
_scatter_translation_rule)
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
partial(_scatter_batching_rule, scatter_add))
scatter_mul_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',
_scatter_translation_rule)
def _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers, **kw):
return mul(x, scatter_add(zeros_like_array(x), i, g,
dimension_numbers=dimension_numbers))
ad.defjvp(scatter_mul_p,
lambda g, x, i, y, **kw: scatter_mul_p.bind(g, i, y, **kw),
None,
_scatter_mul_jvp_rhs)
ad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule
batching.primitive_batchers[scatter_mul_p] = (
partial(_scatter_batching_rule, scatter_mul))
# TODO(jlebar): Add derivatives.
scatter_min_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
_scatter_translation_rule)
batching.primitive_batchers[scatter_min_p] = (
partial(_scatter_batching_rule, scatter_min))
# TODO(jlebar): Add derivatives.
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
_scatter_translation_rule)
batching.primitive_batchers[scatter_max_p] = (
partial(_scatter_batching_rule, scatter_max))
def _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
dnums = dimension_numbers
if g_operand is ad_util.zero and g_updates is ad_util.zero:
val_out = scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dnums)
tangent_out = ad_util.zero
return val_out, tangent_out
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
# If there are overlapping indices in the scatter, it is unspecified which
# update "wins". So we use the following perhaps surprising scheme:
# a) attach a positive ID to each update in updates, forming (value, id) pairs
# (using a new array dimension because scatter doesn't actually support
# pairs).
# b) perform the scatter, yielding (value, id) updates, which we split apart.
# c) perform the inverse gather on the ids (similar to
# _scatter_add_transpose), and use it to build a mask for the tangent of
# `updates`.
# d) perform a scatter-add on the masked JVP values. A benefit of using
# scatter-add here is that we don't need a `scatter` transpose rule.
# a) add unique positive IDs (iotas) to the updates, and zeros to the operand.
operand_shape = operand.shape
updates_shape = updates.shape
updates_dtype = _dtype(updates)
new_operand = reshape(operand, (1,) + operand_shape)
new_operand = pad(new_operand, _zero(operand),
((0, 1, 0),) + tuple((0, 0, 0) for _ in operand_shape))
# We specify the dtype here in case `updates_shape` is an empty tuple, in
# which case numpy defaults to float64.
ids_shape = onp.array(updates_shape, dtype=onp.int32)
ids_shape[dnums.update_window_dims,] = 1
num_ids = onp.prod(ids_shape)
update_ids = add(reshape(iota(updates_dtype, num_ids), ids_shape),
_ones(updates))
# TODO(phawkins): there is a potential bug here if the number of updates
# is large enough to overflow the number of mantissa bits in a float so IDs
# end up colliding. We could also utilize the exponent and sign bits, with a
# little more work.
assert num_ids < (2 ** dtypes.finfo(updates_dtype).nmant)
updates = reshape(updates, (1,) + updates_shape)
reshaped_update_ids = reshape(update_ids, (1,) + updates_shape)
updates_and_ids = concatenate((updates, reshaped_update_ids), 0)
new_dnums = ScatterDimensionNumbers(
update_window_dims=(0,) + tuple(d + 1 for d in dnums.update_window_dims),
inserted_window_dims=tuple(d + 1 for d in dnums.inserted_window_dims),
scatter_dims_to_operand_dims=tuple(d + 1 for d in dnums.scatter_dims_to_operand_dims))
outputs = scatter_p.bind(
new_operand, scatter_indices, updates_and_ids, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=new_dnums)
val_out = index_in_dim(outputs, 0, keepdims=False)
scattered_ids = index_in_dim(outputs, 1, keepdims=False)
# b) compute the inverse gather that "undoes" the scatter on the id values.
gather_dnums = GatherDimensionNumbers(
offset_dims=dnums.update_window_dims,
collapsed_slice_dims=dnums.inserted_window_dims,
start_index_map=dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(scattered_ids.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dnums.update_window_dims[pos]])
pos += 1
gathered_update_ids = gather(scattered_ids, scatter_indices,
dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
# c) mask off input JVP elements that do not correspond to a primal output.
masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
g_operand, _zeros(g_operand))
masked_g_updates = select(eq(update_ids, gathered_update_ids),
g_updates, _zeros(g_updates))
# d) perform a scatter-add to compute the tangent output.
tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,
dimension_numbers=dnums)
return val_out, tangent_out
scatter_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter',
_scatter_translation_rule)
ad.primitive_jvps[scatter_p] = _scatter_jvp
batching.primitive_batchers[scatter_p] = (
partial(_scatter_batching_rule, scatter))
def _reduce_shape_rule(operand, init_value, *, computation, jaxpr, consts,
dimensions):
return tuple(onp.delete(operand.shape, dimensions))
def _reduce_translation_rule(c, operand, init_value, *, computation, jaxpr,
consts, dimensions):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return xops.Reduce(c, [operand], [init_value], xla_computation, dimensions)
def _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr, consts,
dimensions):
operand, init_value = batched_args
operand_bdim, init_value_bdim = batch_dims
if init_value_bdim is None:
assert operand_bdim is not None
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - int(onp.sum(onp.less(dimensions, operand_bdim)))
return reduce(operand, init_value, computation, new_dimensions), new_operand_bdim
else:
raise NotImplementedError # loop and stack
def _reduction_computation(c, jaxpr, consts, init_value):
shape = c.get_shape(init_value)
axis_env = xla.AxisEnv(1) # no parallel primitives inside reductions
subc = xla_bridge.make_computation_builder("reduction_computation")
assert len(consts) == 0, "Reduction computations cannot have constants"
args = [xb.parameter(subc, 0, shape), xb.parameter(subc, 1, shape)]
out, = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, '', *args)
return subc.build(out)
def _masking_defreducer(prim, identity):
masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)
def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,
axes):
(padded_val,), (logical_shape,) = padded_vals, logical_shapes
padded_shape = masking.padded_shape_as_value(padded_val.shape)
masks = [broadcasted_iota(onp.int32, padded_shape, i) < d
for i, d in enumerate(logical_shape) if i in axes]
mask = _reduce(operator.and_, masks)
masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))
return prim.bind(masked_val, axes=axes)
reduce_p = standard_primitive(_reduce_shape_rule, _input_dtype, 'reduce',
_reduce_translation_rule)
batching.primitive_batchers[reduce_p] = _reduce_batch_rule
def _reduce_number_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, onp.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, onp.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _reduce_sum_shape_rule(operand, *, axes):
return _reduce_op_shape_rule(operand, axes=axes)
def _reduce_sum_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, onp.array(0, dtype))],
xla.primitive_subcomputation(add_p, scalar, scalar),
axes)
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
broadcast_dimensions = tuple(onp.delete(onp.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(
_reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
_masking_defreducer(reduce_sum_p,
lambda shape, dtype: onp.broadcast_to(onp.array(0, dtype), shape))
def _reduce_op_shape_rule(operand, *, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_prod_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, onp.array(1, dtype))],
xla.primitive_subcomputation(mul_p, scalar, scalar), axes)
def _reduce_prod_jvp_rule(primals, tangents, *, axes):
operand, = primals
tangent, = tangents
input_shape = onp.array(operand.shape)
n = onp.prod(input_shape[list(axes)])
non_axes = onp.delete(onp.arange(len(input_shape)), axes)
# Move the reduced axes to the front, and flatten them to 1D.
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
operand = reshape(operand, new_shape, permutation)
tangent = reshape(tangent, new_shape, permutation)
def _reduce_prod_tree(x, axis=0):
"""Reduce by repeatedly splitting the array and multiplying."""
while x.shape[axis] > 1:
n = x.shape[axis]
n1 = (n + 1) // 2
n2 = n - n1
x1 = slice_in_dim(x, 0, n1)
x2 = slice_in_dim(x, n1, None)
if n2 != n1:
paddings = [(0, 0, 0)] * len(x.shape)
paddings[axis] = (0, 1, 0)
x2 = pad(x2, _const(x, 1), paddings)
x = x1 * x2
shape = list(x.shape)
del shape[axis]
return reshape(x, shape)
return api.jvp(_reduce_prod_tree, (operand,), (tangent,))
reduce_prod_p = standard_primitive(
_reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),
'reduce_prod', _reduce_prod_translation_rule)
ad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule
batching.defreducer(reduce_prod_p)
def _reduce_chooser_shape_rule(operand, *, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, identity(dtype))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
def _reduce_chooser_jvp_rule(g, ans, operand, *, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != onp.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(onp.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, c, operand, *, axes):
scalar = ShapedArray((), onp.bool_)
return xops.Reduce(c, [operand], [xb.constant(c, identity(onp.bool_))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_or', _reduce_or_translation_rule)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_and', _reduce_and_translation_rule)
batching.defreducer(reduce_and_p)
def _reduce_window_shape_rule(operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_translation_rule(c, operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, init_value, xla_computation, window_dimensions,
window_strides, (), (), pads)
def _generic_reduce_window_batch_rule(
batched_args, batch_dims, *, jaxpr, consts, window_dimensions,
window_strides, padding):
operand, init = batched_args
bdim, init_bdim = batch_dims
if init_bdim is not None:
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
def reduce_window(x, window_dimensions, window_strides, padding):
return reduce_window_p.bind(
x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding)
return _reduce_window_batch_rule(reduce_window, (operand,), (bdim,),
window_dimensions, window_strides, padding)
reduce_window_p = standard_primitive(
_reduce_window_shape_rule, _input_dtype, 'reduce_window',
_reduce_window_translation_rule)
batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule
def _reduce_window_sum_shape_rule(operand, *, window_dimensions, window_strides,
padding):
if not dtypes.issubdtype(operand.dtype, onp.number):
msg = "operand to reduce_window_sum must have a number dtype, got {}"
raise TypeError(msg.format(onp.dtype(operand.dtype).name))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_sum_translation_rule(c, operand, *, window_dimensions,
window_strides, padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, onp.array(0, dtype)),
xla.primitive_subcomputation(add_p, scalar, scalar), window_dimensions,
window_strides, (), (), pads)
def _reduce_window_sum_transpose_rule(cotangent, operand, *, window_dimensions,
window_strides, padding):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
in_pads = padtype_to_pads(input_shape, window_dimensions, window_strides,
padding)
ones = [1] * len(input_shape)
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, in_pads,
ones, ones)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, ones,
xla_client.PaddingType.VALID)
assert result.shape == input_shape
return [result]
def _reduce_window_batch_rule(reduce_window, batched_args, bdims, *,
window_dimensions, window_strides, padding):
operand, = batched_args
bdim, = bdims
if bdim is not None:
window_dimensions = \
window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]
window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]
operand = reduce_window(
operand, window_dimensions, window_strides, padding)
return operand, bdim
reduce_window_sum_p = standard_primitive(
_reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
_reduce_window_sum_translation_rule)
ad.deflinear2(reduce_window_sum_p, _reduce_window_sum_transpose_rule)
batching.primitive_batchers[reduce_window_sum_p] = partial(
_reduce_window_batch_rule, _reduce_window_sum)
def _reduce_window_chooser_translation_rule(
prim, identity, c, operand, *, window_dimensions, window_strides, padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, identity(dtype)),
xla.primitive_subcomputation(prim, scalar, scalar), window_dimensions,
window_strides, (), (), pads)
def _reduce_window_chooser_jvp_rule(prim, g, operand, *, window_dimensions,
window_strides, padding):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding)
def _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions)
_check_shapelike("reduce_window", "window_strides", window_strides)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding):
pads = padtype_to_pads(operand_shape, window_dimensions, window_strides, padding)
operand_padded = onp.add(operand_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(
onp.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
_reduce_window_max_translation_rule = partial(
_reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
_reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))
batching.primitive_batchers[reduce_window_max_p] = partial(
_reduce_window_batch_rule, _reduce_window_max)
_reduce_window_min_translation_rule = partial(
_reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
_reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))
_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,
_reduce_window_min)
batching.primitive_batchers[reduce_window_min_p] = partial(
_reduce_window_batch_rule, _reduce_window_min)
def _select_and_scatter_shape_rule(
operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def _select_and_scatter_translation(
c, operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, pads, source,
init_value, scatter)
select_and_scatter_p = standard_primitive(
_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
_select_and_scatter_translation)
def _select_and_scatter_add_shape_rule(
source, operand, *, select_prim, window_dimensions, window_strides,
padding):
return operand.shape
def _select_and_scatter_add_translation(
c, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
select = xla.primitive_subcomputation(select_prim, scalar, scalar)
scatter = xla.primitive_subcomputation(add_p, scalar, scalar)
zero = xb.constant(c, onp.array(0, dtype))
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, pads, source, zero,
scatter)
def _select_and_scatter_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_scatter_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_scatter_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_scatter_add_transpose(
t, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand)
source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [source_t, None]
def _select_and_scatter_add_batch_rule(batched_args, batch_dims, **kwargs):
source, operand = batched_args
s_bdims, o_bdims = batch_dims
if s_bdims is not None and o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.moveaxis(source, s_bdims, 0)
operand = batching.moveaxis(operand, o_bdims, 0)
outputs = [
_select_and_scatter_add(s, o, **kwargs) for s, o in zip(source, operand)]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif s_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.moveaxis(source, s_bdims, 0)
outputs = [
_select_and_scatter_add(s, operand, **kwargs) for s in source]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
operand = batching.moveaxis(operand, o_bdims, 0)
outputs = [
_select_and_scatter_add(source, o, **kwargs) for o in operand]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
select_and_scatter_add_p = standard_primitive(
_select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
_select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
_select_and_scatter_add_transpose
ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp
batching.primitive_batchers[select_and_scatter_add_p] = \
_select_and_scatter_add_batch_rule
def _select_and_gather_add_shape_rule(
tangents, operand, *, select_prim, window_dimensions, window_strides,
padding):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
_UINT_DTYPES = {
16: onp.uint16,
32: onp.uint32,
64: onp.uint64,
}
_INT_DTYPES = {
16: onp.int16,
32: onp.int32,
64: onp.int64,
}
def _select_and_gather_add_translation(
c, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, max_bits=64):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
etype = shape.xla_element_type()
nbits = dtypes.finfo(dtype).bits
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda c, dtype, x: xb.constant(c, onp.array(x, dtype=dtype),
canonicalize_types=False)
if double_word_reduction:
# TODO(b/73062247): XLA doesn't yet implement ReduceWindow on tuples, so
# we implement a pair-wise ReduceWindow by packing two k-bit values into
# 2k-bit unsigned integer using bit tricks.
word_dtype = _UINT_DTYPES[nbits]
double_word_dtype = _UINT_DTYPES[nbits * 2]
word_type = xla_client.dtype_to_etype(word_dtype)
double_word_type = xla_client.dtype_to_etype(double_word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
a = xops.ConvertElementType(a, double_word_type)
b = xops.ConvertElementType(b, double_word_type)
a = xops.ShiftLeft(a, const(c, double_word_dtype, nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.ShiftRightLogical(t, const(c, double_word_dtype, nbits))
return xops.BitcastConvertType(xops.ConvertElementType(st, word_type), etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ConvertElementType(t, word_type), etype)
else:
# The double-word trick above only works if we have a sufficiently large
# type. As an alternative, we can pack two half words into a single word,
# at the cost of precision.
# TODO(b/73062247): add support for tuple reductions and remove this case.
warnings.warn("Using reduced precision for gradient of reduce-window "
"min/max operator to work around missing XLA support for "
"pair-reductions. This is likely from a second or "
"higher derivative of a max-pooling operation.")
r_nbits = nbits // 2
# Drop/round the bottom mantissa bits.
nexp = dtypes.finfo(dtype).nexp
nmant = r_nbits - nexp - 1
double_word_dtype = word_dtype = _UINT_DTYPES[nbits]
word_type = xla_client.dtype_to_etype(word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)
b = xops.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
b = xops.ShiftRightLogical(b, const(c, word_dtype, r_nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))
return xops.BitcastConvertType(st, etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ShiftLeft(t, const(c, word_dtype, r_nbits)),
etype)
def reducer():
c = xla_bridge.make_computation_builder("select_and_gather_pair_reducer")
x = xb.parameter(c, 0,
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
y = xb.parameter(c, 1,
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
assert select_prim is ge_p or select_prim is le_p
which = xops.Ge if select_prim is ge_p else xops.Le
xops.Select(which(fst(c, x), fst(c, y)), x, y)
return c.build()
assert select_prim is ge_p or select_prim is le_p, select_prim
init = -onp.inf if select_prim is ge_p else onp.inf
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
out = xops.ReduceWindowWithGeneralPadding(
pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)),
reducer(), window_dimensions, window_strides, (), (), pads)
return snd(out)
def _select_and_gather_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_gather_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_gather_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_gather_add_transpose(
t, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(tangents) and not ad.is_undefined_primal(operand)
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [result, None]
def _select_and_gather_add_batching_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding):
t, x = batched_args
t_bdim, x_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
t = batching.bdim_at_front(t, t_bdim, size)
x = batching.bdim_at_front(x, x_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
out = _select_and_gather_add(t, x, select_prim, window_dimensions,
window_strides, padding)
return (out, 0)
select_and_gather_add_p = standard_primitive(
_select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
_select_and_gather_add_translation)
ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp
ad.primitive_transposes[select_and_gather_add_p] = \
_select_and_gather_add_transpose
batching.primitive_batchers[select_and_gather_add_p] = \
_select_and_gather_add_batching_rule
xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(
_select_and_gather_add_translation,
max_bits=32)
# Parallel prefix-scan. See:
# https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda
# and
# Blelloch, Guy E. 1990. "Prefix Sums and Their Applications.", Technical Report
# CMU-CS-90-190, School of Computer Science, Carnegie Mellon University.
#
# Unlike the Blelloch algorithm, we use an out-of-place algorithm that uses 2n
# space. This is somewhat wasteful if we are interested only in the output of
# the forward pass, but more memory-efficient if we intend to differentiate
# through the implementation of the scan.
def _prescan_power_of_two(x, axis: int, op: Callable, unit):
n = x.shape[axis]
assert n != 0 and n & (n - 1) == 0, "n must be a power of 2"
# Upsweep
xs = []
for d in range(0, n.bit_length() - 1):
x1 = slice_in_dim(x, 0, None, stride=2, axis=axis)
xs.append(x1)
x2 = slice_in_dim(x, 1, None, stride=2, axis=axis)
x = op(x1, x2)
total = x
# Downsweep
x = full_like(total, unit)
pad_left = [(0, 0, 0)] * len(x.shape)
pad_left[axis] = (1, 0, 1)
pad_right = [(0, 0, 0)] * len(x.shape)
pad_right[axis] = (0, 1, 1)
for w in reversed(xs):
x1 = pad(x, _const(x, 0), pad_right)
x2 = pad(x, _const(x, 0), pad_left)
w = pad(w, _const(x, 0), pad_left)
x = x1 + op(x2, w)
return x, total
def _parallel_prefix_scan(x, axis: int, op: Callable, unit):
n = x.shape[axis]
if n == 0:
return x
# Pads to the next largest power of two
nbits = n.bit_length()
if n == (1 << (nbits - 1)):
nbits -= 1
padding = [(0, 0, 0)] * len(x.shape)
padding[axis] = (0, (1 << nbits) - n, 0)
x = pad(x, _const(x, unit), padding)
x, total = _prescan_power_of_two(x, axis, op, unit)
return concatenate((slice_in_dim(x, 1, n, axis=axis), total), dimension=axis)
_cumsum_prefix_scan = partial(_parallel_prefix_scan, op=add, unit=0)
_cumprod_prefix_scan = partial(_parallel_prefix_scan, op=mul, unit=1)
def _cumred_shape_rule(x, *, axis: int):
if axis < 0 or axis >= x.ndim:
raise ValueError(
"axis {} is out of bounds for array of shape {}".format(axis, x.shape))
return x.shape
def _cumsum_transpose_rule(t, *, axis: int):
return [rev(cumsum(rev(t, (axis,)), axis=axis), (axis,))]
def _cumprod_jvp_rule(primals, tangents, *, axis: int):
# Irrespective of backend, we always use the parallel prefix scan
# implementation when differentiating because reduce_window is not
# arbitrarily differentiable.
return api.jvp(partial(_cumprod_prefix_scan, axis=axis), primals, tangents)
def _cumred_tpu_translation_rule(window_reduce: Callable, unit, x, *,
axis: int):
# On TPU, an implementation using reduce_window is handled specially by the
# compiler and is efficient. On other backends, it is O(n^2).
n = x.shape[axis]
if n == 0:
return x
padding = [(0, 0, 0)] * x.ndim
padding[axis] = (n - 1, 0, 0)
x = pad(x, _const(x, unit), padding)
strides = [1] * x.ndim
window_dims = [1] * x.ndim
window_dims[axis] = n
return window_reduce(x, window_dims, strides, xla_client.PaddingType.VALID)
def _cumred_batch_rule(prim, batched_args, batch_dims, *, axis: int):
operand, = batched_args
bdim, = batch_dims
axis = axis if axis < bdim else axis + 1
return prim.bind(operand, axis=axis), bdim
cumsum_p = standard_primitive(
_cumred_shape_rule, partial(_reduce_number_dtype_rule, "cumsum"),
'cumsum', xla.lower_fun(_cumsum_prefix_scan, multiple_results=False))
ad.deflinear(cumsum_p, _cumsum_transpose_rule)
xla.backend_specific_translations['tpu'][cumsum_p] = xla.lower_fun(
partial(_cumred_tpu_translation_rule, _reduce_window_sum, 0),
multiple_results=False)
batching.primitive_batchers[cumsum_p] = partial(_cumred_batch_rule, cumsum_p)
cumprod_p = standard_primitive(
_cumred_shape_rule, partial(_reduce_number_dtype_rule, "cumprod"),
'cumprod', xla.lower_fun(_cumprod_prefix_scan, multiple_results=False))
ad.primitive_jvps[cumprod_p] = _cumprod_jvp_rule
xla.backend_specific_translations['tpu'][cumprod_p] = xla.lower_fun(
partial(_cumred_tpu_translation_rule, _reduce_window_prod, 1),
multiple_results=False)
batching.primitive_batchers[cumprod_p] = partial(_cumred_batch_rule, cumprod_p)
def _sort_abstract_eval(*args, **kwargs):
args = tuple(raise_to_shaped(arg) for arg in args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _float_to_int_for_sort(x):
# Switch from a floating point value to a integer value in such a way that
# when using the integer value to compare, we get the same result for normal
# values, and -nan is treated as the smallest value, and nan is treated as
# the largest value.
# If f is a float, and
# x = bit_cast<int32>(f);
# y = x < 0 ? int32_max - x : x;
# then y is ordered as an int32 such that finite values have the obvious
# order, -0 is ordered before 0, and -NaN and NaN appear at the beginning
# and end of the ordering.
# Note that in order to avoid -x to overflow, we calculate
# int32_max - x as unsigned, and then convert back to signed.
if x.dtype == dtypes.bfloat16:
x = convert_element_type(x, onp.float32)
nbits = onp.finfo(x).bits
signed_dtype = _INT_DTYPES[nbits]
unsigned_dtype = _UINT_DTYPES[nbits]
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
flipped = bitcast_convert_type(
sub(unsigned_dtype(onp.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
# Default comparator that sorts the operands only on their first arguments.
# For floating point types, a total order is created where
# -NaN < -infinity < ... < -0 < 0 < ... < infinity < NaN.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
# This code adds complex-number support to the algorithm from:
# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33
def _sort_lt_comparator(*operands):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
x, y = operands[:2]
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if onp.issubdtype(x.dtype, onp.complexfloating):
x_keys = [_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))]
y_keys = [_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))]
elif onp.issubdtype(x.dtype, onp.floating):
x_keys = [_float_to_int_for_sort(x)]
y_keys = [_float_to_int_for_sort(y)]
else:
x_keys = [x]
y_keys = [y]
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else lt(xk, yk))
return p
def _sort_translation_rule(c, *operands, dimension):
types = [c.get_shape(x).xla_element_type() for x in operands]
subc = xla_bridge.make_computation_builder("sort_lt_comparator")
params = [xb.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))
for i, typ in enumerate(types) for j in range(2)]
result = xla.lower_fun(_sort_lt_comparator,
multiple_results=False)(subc, *params)
comparator = subc.build(result)
out = xops.Sort(c, operands, dimension=dimension, is_stable=True,
comparator=comparator)
return out if len(operands) != 1 else xops.Tuple(c, [out])
def _sort_jvp(primals, tangents, *, dimension):
shape = primals[0].shape
iotas = []
for dim, size in enumerate(shape):
dtype = onp.int32 if size < onp.iinfo(onp.int32).max else onp.int64
iotas.append(broadcasted_iota(dtype, shape, dim))
primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension)
idx = tuple(primals[-1] if i == dimension else iotas[i]
for i in range(len(shape)))
tangents_out = tuple(ad_util.zero if t is ad_util.zero else t[idx]
for t in tangents)
return tuple(primals[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = onp.delete(onp.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return sort_p.bind(*new_args, dimension=new_dimension), bdims
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(xla.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
xla.translations[sort_p] = _sort_translation_rule
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _top_k_abstract_eval(operand, *, k):
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
shape = list(operand.shape)
if shape[-1] < k:
msg = "k argument to top_k must be no larger than minor dimension; {} vs {}"
raise ValueError(msg.format(k, shape))
shape[-1] = k
return (ShapedArray(shape, operand.dtype),
ShapedArray(shape, onp.dtype(onp.int32)))
def _top_k_jvp(primals, tangents, *, k):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k)
if tangent is ad_util.zero:
tangents_out = (ad_util.zero, ad_util.zero)
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = []
for i in range(rank-1):
_iota = iota(k_idxs.dtype, idx_shape[i])
_iota = tie_in(operand, _iota)
_iota = broadcast_in_dim(_iota, gather_index_shape, (i,))
gather_indices.append(_iota)
gather_indices.append(reshape(k_idxs, gather_index_shape))
gather_indices = concatenate(gather_indices, dimension=rank)
slice_sizes = (1,) * rank
dnums = GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=tuple(range(rank)),
start_index_map=tuple(range(rank)))
tangents_out = (gather(tangent, gather_indices, dnums, slice_sizes),
ad_util.zero)
return primals_out, tangents_out
def _top_k_batch_rule(batched_args, batch_dims, *, k):
operand, = batched_args
bdim, = batch_dims
if bdim == operand.ndim-1:
perm = onp.arange(operand.ndim)
perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]
top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)
return (transpose(top_k_v, perm),
transpose(top_k_i, perm)), (bdim, bdim)
else:
return top_k(operand, k=k), (bdim, bdim)
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(xla.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
xla.translations[top_k_p] = partial(standard_translate, 'top_k')
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _tie_in_transpose_rule(t):
return [ad_util.zero, t]
def _tie_in_batch_rule(batched_args, batch_dims):
y = tie_in(*batched_args)
_, bdim_y = batch_dims
return y, bdim_y
tie_in_p = Primitive('tie_in')
tie_in_p.def_impl(lambda x, y: y)
tie_in_p.def_abstract_eval(lambda x, y: raise_to_shaped(y))
xla.translations[tie_in_p] = lambda c, x, y: y
ad.deflinear(tie_in_p, _tie_in_transpose_rule)
batching.primitive_batchers[tie_in_p] = _tie_in_batch_rule
masking.masking_rules[tie_in_p] = lambda vals, logical_shapes: vals[1]
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.zero
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
xla.translations[ad_util.stop_gradient_p] = lambda c, x: x
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
def create_token(x):
"""Creates an XLA token value with no preconditions for sequencing effects.
Experimental.
Args:
x: a dummy argument used to tie the CreateToken operator into a trace. The
value of `x` is ignored.
"""
# x is a dummy argument used to tie the operator into a trace.
return create_token_p.bind(x)
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(xla.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda _: abstract_token)
xla.translations[create_token_p] = lambda c, _: xops.CreateToken(c)
def after_all(*operands):
"""Merges one or more XLA token values. Experimental.
Wraps the XLA AfterAll operator."""
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
def _after_all_translation_rule(c, *operands):
return xops.AfterAll(c, operands)
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(xla.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
xla.translations[after_all_p] = _after_all_translation_rule
def infeed(token, shape=None):
"""Consumes an infeed value of `shape` from the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
"""
flat_shapes, treedef = pytree.flatten(shape)
for shape in flat_shapes:
if not isinstance(shape, ShapedArray):
raise TypeError("shape argument to infeed must be a pytree of "
"ShapedArray values, got {}".format(shape))
xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes))
return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])
def _infeed_abstract_eval(token, *, shapes):
if token is not abstract_token:
raise TypeError("First argument to infeed must be a token")
return shapes + (abstract_token,)
def _infeed_translation_rule(c, token, *, shapes):
shape = tuple(xla.aval_to_xla_shape(x).with_major_to_minor_layout_if_absent()
for x in shapes)
xs_and_token = xops.InfeedWithToken(token,
xla_client.Shape.tuple_shape(shape))
xs = xops.GetTupleElement(xs_and_token, 0)
token = xops.GetTupleElement(xs_and_token, 1)
outs = [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]
return xops.Tuple(c, outs)
infeed_p = Primitive("infeed")
infeed_p.multiple_results = True
infeed_p.def_impl(partial(xla.apply_primitive, infeed_p))
infeed_p.def_abstract_eval(_infeed_abstract_eval)
xla.translations[infeed_p] = _infeed_translation_rule
def outfeed(token, xs):
"""Outfeeds value `xs` to the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
"""
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs)
def _outfeed_abstract_eval(token, *xs):
if token is not abstract_token:
raise TypeError("First argument to outfeed must be a token")
return abstract_token
def _outfeed_translation_rule(c, token, *xs):
t = xops.Tuple(c, xs)
return xops.OutfeedWithToken(t, token, c.get_shape(t))
outfeed_p = Primitive("outfeed")
outfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))
outfeed_p.def_abstract_eval(_outfeed_abstract_eval)
xla.translations[outfeed_p] = _outfeed_translation_rule
def rng_uniform(a, b, shape):
"""Stateful PRNG generator. Experimental and its use is discouraged.
Returns uniformly distributed random numbers in the range [a, b)
You should use jax.random for most purposes; this function exists only for
niche use cases with special performance requirements.
This API may be removed at any time.
"""
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return ShapedArray(shape, a.dtype)
def _rng_uniform_translation_rule(c, a, b, *, shape):
xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)
return xops.RngUniform(a, b, xla_shape)
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
xla.translations[rng_uniform_p] = _rng_uniform_translation_rule
### util
_ndim = onp.ndim
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not onp.all(onp.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return onp.where(shape == 0, 0,
onp.multiply(dilation, onp.subtract(shape, 1)) + 1)
def _ceil_divide(x1, x2):
return -onp.floor_divide(onp.negative(x1), x2)
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == PaddingType.SAME:
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = onp.maximum(0, (out_shape - 1) * window_strides +
window_shape - in_shape)
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *ttypes):
"""Check that dtypes agree, possibly ignoring float precision."""
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
types = list(map(onp.dtype, ttypes)) # canonicalize
if ignore_fp_precision:
types = [
onp.floating if dtypes.issubdtype(dtype, onp.floating)
else onp.complexfloating if dtypes.issubdtype(dtype, onp.complexfloating)
else dtype for dtype in types]
if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, types))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not onp.all(onp.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):
"""Compute the shape tuple of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = onp.add(lhs_shape[2:], onp.sum(onp.array(pads).reshape(-1, 2),
axis=1))
out_space = onp.floor_divide(
onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
assert lhs_shape[0] % batch_group_count == 0
out_shape = (lhs_shape[0] // batch_group_count, rhs_shape[0])
return tuple(out_shape + tuple(out_space))
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(onp.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = onp.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, onp.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = onp.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))
if not (obj_arr >= 0).all():
msg = "{} {} must have every element be nonnegative, got {}."
raise TypeError(msg.format(fun_name, arg_name, obj))
def _dynamic_slice_indices(operand, start_indices):
if not isinstance(start_indices, (tuple, list)):
if start_indices.ndim != 1:
raise ValueError("Slice indices must be a 1D sequence, got {}"
.format(start_indices.shape))
start_indices = [reshape(slice(start_indices, [i], [i+1]), ())
for i in range(operand.ndim)]
else:
start_indices = [onp.asarray(i, dtype=dtypes.int_) if isinstance(i, int)
else i for i in start_indices]
if len(start_indices) != operand.ndim:
msg = ("Length of slice indices must match number of operand dimensions ({} "
"vs {})")
raise ValueError(msg.format(len(start_indices), operand.shape))
# map int over operand.shape to raise any dynamic-shape errors
return [select(lt(i, _const(i, 0)), add(i, _const(i, int(d))), i)
for i, d in zip(start_indices, operand.shape)]
def _const(example, val):
if dtypes.is_python_scalar(example):
return dtypes.scalar_type_of(example)(val)
return onp.array(val, _dtype(example))
_zeros: Callable = partial(full_like, fill_value=0)
_zero: Callable = partial(full_like, shape=(), fill_value=0)
_ones: Callable = partial(full_like, fill_value=1)
_one: Callable = partial(full_like, shape=(), fill_value=1)
_twos: Callable = partial(full_like, fill_value=2)
_two: Callable = partial(full_like, shape=(), fill_value=2)
dtype: Callable = dtypes.result_type
_dtype: Callable = dtypes.result_type
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), onp.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
blacklist = set(itertools.chain(*removed_lists))
return [i for i in original if i not in blacklist]
def _canonicalize_precision(precision):
if precision is None:
return None
if isinstance(precision, Precision):
return precision
else:
msg = "Precision argument must be None or a lax.Precision value; got {}"
raise ValueError(msg.format(precision))
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers):
"""Converts convolution `dimension_numbers` to a `ConvDimensionNumbers`.
Args:
lhs_shape: tuple of nonnegative integers, shape of the convolution input.
rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.
dimension_numbers: None or a tuple/list of strings or a ConvDimensionNumbers
object following the convolution dimension number specification format in
xla_client.py.
Returns:
A `ConvDimensionNumbers` object that represents `dimension_numbers` in the
canonical form used by lax functions.
"""
if isinstance(dimension_numbers, ConvDimensionNumbers):
return dimension_numbers
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exactly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = onp.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (onp.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = dtypes.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _check_user_dtype_supported(dtype, fun_name=None):
onp_dtype = onp.dtype(dtype)
if onp_dtype.kind not in "biufc" and onp_dtype.type != dtypes.bfloat16:
msg = f"JAX only supports number and bool dtypes, got dtype {dtype}"
raise TypeError(msg)
if dtype is not None and onp_dtype != dtypes.canonicalize_dtype(dtype):
msg = ("Explicitly requested dtype {} {} is not available, "
"and will be truncated to dtype {}. To enable more dtypes, set the "
"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell "
"environment variable. "
"See https://github.com/google/jax#current-gotchas for more.")
fun_name = "requested in {}".format(fun_name) if fun_name else ""
truncated_dtype = dtypes.canonicalize_dtype(dtype).name
warnings.warn(msg.format(dtype, fun_name , truncated_dtype))
def _canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims)."""
axis = int(axis)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
return axis
|
"""Top-level package for santa-helpers."""
__author__ = """Magdalena Rother"""
__email__ = 'rother.magdalena@gmail.com'
__version__ = '0.0.1'
from .neighbors import neighbors # noqa
from .parse import parse_grid_to_dict # noqa
|
import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from base import TestBaseClass
class TestClassOelintVarsBugtrackerIsUrl(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.bugtrackerisurl'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "what_/the/f"
'''
},
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "what_/the/f"
'''
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.bugtrackerisurl'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "https://foo.com"
'''
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
|
from django.apps import AppConfig
class SpacetradingConfig(AppConfig):
name = 'spacetrading'
|
#!/usr/bin/python3
"""
Beautiful command line parsing
@author chairs
"""
import inspect, sys
from collections import namedtuple
from collections import defaultdict
from subprocess import DEVNULL
class Cookie (object):
"""
Main decorator object
@param name of application
"""
def __init__ (self, app_name, notes=()):
self.optarg = namedtuple('optarg',
['full', 'abbrev', 'default'])
self.name = str(app_name)
self.notes = notes
def __parse (self, args):
"""
Parse command line arguments from argv, built to be simple
and as fast as possible to avoid application overhead
@param command line arguments
@return necessary destinations and identifiers
"""
ordered = list(); full = abbrev = dict()
args = args + ['']
i = 0
while i < len(args) - 1:
token = args[i]
next_token = args[i + 1]
# the full argument case
if token.startswith('--'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
full[token[2:]] = next_token
i += 2
# the shorthand argument case (more common)
elif token.startswith('-'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
abbrev[token[1:]] = next_token
i += 2
else:
ordered.append(token)
i += 1
return ordered, full, abbrev
def __construct_ordered (self, params):
"""
Build the ordered parameters (those without flags, positional)
@param parameters from parse
@return all exclusively oredered arguments
"""
return [key for key, arg in params.items() if arg.default == inspect._empty]
def __construct_optional (self, params):
"""
Build the optional parameters (those with flags, switches)
@param parameters from parse
@return all exclusively optional arguments
"""
args = []
filtered = {
key: arg.default for key, arg in params.items() if arg.default != inspect._empty}
for key, default in filtered.items():
arg = self.optarg(full=key, abbrev=key[0].lower(), default=default)
args.append(arg)
args_full = args_abbrev = dict()
# resolve possible conflicts
known_count = defaultdict(int)
for arg in args:
args_full[arg.full] = arg
if known_count[arg.abbrev] == 0: args_abbrev[arg.abbrev] = arg
elif known_count[arg.abbrev] == 1:
# establish abbreviation
new_abbrev = arg.apprev.upper()
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
else:
new_abbrev = arg.apprev.upper() + str(known_count[arg.abbrev])
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
known_count[arg.abbrev] += 1
return args_full, args_abbrev
def __resolve (self, args, signature):
"""
Resolve arguments final destinations
@param args arguments from construction
@param signatures
@return final destinations
"""
ordered, opt_parsed_full, opt_parsed_abbrev = self.__parse(args[1:])
ordered_def = self.__construct_ordered(signature.parameters)
if len(ordered) != len(ordered_def):
raise Exception('wrong number of oredered arguments')
opt_parsed = dict()
opt_parsed.update(opt_parsed_full)
opt_parsed.update(opt_parsed_abbrev)
opt_def_full, opt_def_abbrev = self.__construct_optional(signature.parameters)
optional = {o.full: o.default for o in opt_def_full.values()}
opt_def = dict()
opt_def.update(opt_def_full)
opt_def.update(opt_def_abbrev)
for key, value in opt_parsed.items():
if key not in opt_def: raise Exception('resolution error')
d = opt_def[key]
optional[d.full] = value
return ordered, optional
def __usage_outline (self, signature):
"""
Nice formatted help message to outline usage
@param signature for arguments
"""
ordered = self.__construct_ordered(signature.parameters)
full, _ = self.__construct_optional(signature.parameters)
ordered_str = ' '.join(name.upper() for name in ordered)
optional_str = ' '.join('\n[-{} | --{} {}],'.format(
opt.abbrev, opt.full, opt.full.upper()) for opt in full.values())
optional_str = ''.join(optional_str.split(',')[::2])
return '{} {}'.format(ordered_str, optional_str)
def get_args (self, function):
"""
The main decorator, the glue
"""
def wrapper ():
sig = inspect.signature(function)
try:
ordered, optional = self.__resolve(sys.argv, sig)
except Exception:
self.outline = ('Usage: ', sys.argv[0], self.__usage_outline(sig,))
print(*self.outline)
if not self.notes == ():
print('\n'.join(self.notes) + '\n'+'\t'*1 + 'respectively')
return
function(*ordered, **optional)
return wrapper
def run (self, function_name, silent=False):
restore = sys.stdout
if silent:
sys.stdout = open('/dev/null', 'w').close()
function_name()
sys.stdout = restore
|
# -*- coding: utf-8 -*-
import time
import datetime
from aiocron import asyncio
from aiocron import crontab
import pytest
class CustomError(Exception):
pass
def test_str():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
def t():
pass
assert '* * * * *' in str(t)
def test_cron():
loop = asyncio.new_event_loop()
future = asyncio.Future(loop=loop)
@crontab('* * * * * *', start=False, loop=loop)
def t():
future.set_result(1)
t.start()
loop.run_until_complete(future)
t.stop()
assert future.result() == 1
def test_raise():
loop = asyncio.new_event_loop()
future = asyncio.Future(loop=loop)
@crontab('* * * * * *', start=False, loop=loop)
def t():
loop.call_later(1, future.set_result, 1)
raise ValueError()
t.start()
loop.run_until_complete(future)
t.stop()
assert future.result() == 1
def test_next():
loop = asyncio.new_event_loop()
def t():
return 1
t = crontab('* * * * * *', func=t, loop=loop)
future = asyncio.ensure_future(t.next(), loop=loop)
loop.run_until_complete(future)
assert future.result() == 1
def test_null_callback():
loop = asyncio.new_event_loop()
t = crontab('* * * * * *', loop=loop)
assert t.handle is None # not started
future = asyncio.ensure_future(t.next(4), loop=loop)
loop.run_until_complete(future)
assert future.result() == (4,)
def test_next_raise():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
def t():
raise CustomError()
future = asyncio.ensure_future(t.next(), loop=loop)
with pytest.raises(CustomError):
loop.run_until_complete(future)
def test_coro_next():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
async def t():
return 1
future = asyncio.ensure_future(t.next(), loop=loop)
loop.run_until_complete(future)
assert future.result() == 1
def test_coro_next_raise():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
async def t():
raise CustomError()
future = asyncio.ensure_future(t.next(), loop=loop)
with pytest.raises(CustomError):
loop.run_until_complete(future)
def test_next_dst(monkeypatch):
now = datetime.datetime.now()
class mydatetime:
@classmethod
def now(cls, tzinfo=None):
return datetime.datetime(
now.year + 1, 10, 29, 2, 58, 58,
tzinfo=tzinfo
)
monkeypatch.setattr('aiocron.datetime', mydatetime)
monkeypatch.setattr('dateutil.tz.time.timezone', -3600)
monkeypatch.setattr('dateutil.tz.time.altzone', -7200)
monkeypatch.setattr('dateutil.tz.time.daylight', 1)
monkeypatch.setattr('dateutil.tz.time.tzname', ('CET', 'CEST'))
loop = asyncio.new_event_loop()
t = crontab('* * * * *', loop=loop)
t.initialize()
# last hit in DST
a = t.get_next()
time.sleep(3)
# first hit after DST
b = t.get_next()
assert b - a == 60
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures as fx
import futurist
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
from oslotest import output
import sqlalchemy
import testtools
from nova.compute import rpcapi as compute_rpcapi
from nova import conductor
from nova import context
from nova.db.sqlalchemy import api as session
from nova import exception
from nova.network import neutron as neutron_api
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import test
from nova.tests import fixtures
from nova.tests.unit import conf_fixture
from nova.tests.unit import fake_instance
from nova import utils
CONF = cfg.CONF
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
class TestOSAPIFixture(testtools.TestCase):
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.objects.Service.create')
def test_responds_to_version(self, mock_service_create, mock_get):
"""Ensure the OSAPI server responds to calls sensibly."""
self.useFixture(output.CaptureOutput())
self.useFixture(fixtures.StandardLogging())
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
# request the API root, which provides us the versions of the API
resp = api.api_request('/', strip_version=True)
self.assertEqual(200, resp.status_code, resp.content)
# request a bad root url, should be a 404
#
# NOTE(sdague): this currently fails, as it falls into the 300
# dispatcher instead. This is a bug. The test case is left in
# here, commented out until we can address it.
#
# resp = api.api_request('/foo', strip_version=True)
# self.assertEqual(resp.status_code, 400, resp.content)
# request a known bad url, and we should get a 404
resp = api.api_request('/foo')
self.assertEqual(404, resp.status_code, resp.content)
class TestDatabaseFixture(testtools.TestCase):
def test_fixture_reset(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
engine = session.get_engine()
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
# insert a 6th instance type, column 5 below is an int id
# which has a constraint on it, so if new standard instance
# types are added you have to bump it.
conn.execute("insert into instance_types VALUES "
"(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'"
", 1.0, 40, 0, 0, 1, 0)")
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database())
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_api_fixture_reset(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database(database='api'))
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_fixture_cleanup(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database()
self.useFixture(fix)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the db contains nothing
engine = session.get_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;")
def test_api_fixture_cleanup(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database(database='api')
self.useFixture(fix)
# No data inserted by migrations so we need to add a row
engine = session.get_api_engine()
conn = engine.connect()
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# Manually do the cleanup that addCleanup will do
fix.cleanup()
# Ensure the db contains nothing
engine = session.get_api_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
class TestDatabaseAtVersionFixture(testtools.TestCase):
def test_fixture_schema_version(self):
self.useFixture(conf_fixture.ConfFixture())
# In/after 317 aggregates did have uuid
self.useFixture(fixtures.DatabaseAtVersion(318))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertTrue(hasattr(aggregate.c, 'uuid'))
# Before 317, aggregates had no uuid
self.useFixture(fixtures.DatabaseAtVersion(316))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertFalse(hasattr(aggregate.c, 'uuid'))
engine.dispose()
def test_fixture_after_database_fixture(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.DatabaseAtVersion(318))
class TestDefaultFlavorsFixture(testtools.TestCase):
@mock.patch("nova.objects.flavor.Flavor._send_notification")
def test_flavors(self, mock_send_notification):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
self.useFixture(fixtures.DefaultFlavorsFixture())
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(6, len(rows), "Rows %s" % rows)
class TestIndirectionAPIFixture(testtools.TestCase):
def test_indirection_api(self):
# Should initially be None
self.assertIsNone(obj_base.NovaObject.indirection_api)
# make sure the fixture correctly sets the value
fix = fixtures.IndirectionAPIFixture('foo')
self.useFixture(fix)
self.assertEqual('foo', obj_base.NovaObject.indirection_api)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the initial value is restored
self.assertIsNone(obj_base.NovaObject.indirection_api)
class TestSpawnIsSynchronousFixture(testtools.TestCase):
def test_spawn_patch(self):
orig_spawn = utils.spawn_n
fix = fixtures.SpawnIsSynchronousFixture()
self.useFixture(fix)
self.assertNotEqual(orig_spawn, utils.spawn_n)
def test_spawn_passes_through(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
tester = mock.MagicMock()
utils.spawn_n(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
def test_spawn_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_n_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
def test_spawn_n_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
class TestSynchronousThreadPoolExecutorFixture(testtools.TestCase):
def test_submit_passes_through(self):
self.useFixture(fixtures.SynchronousThreadPoolExecutorFixture())
tester = mock.MagicMock()
executor = futurist.GreenThreadPoolExecutor()
future = executor.submit(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
result = future.result()
self.assertEqual(tester.function.return_value, result)
class TestBannedDBSchemaOperations(testtools.TestCase):
def test_column(self):
column = sqlalchemy.Column()
with fixtures.BannedDBSchemaOperations(['Column']):
self.assertRaises(exception.DBNotAllowed,
column.drop)
self.assertRaises(exception.DBNotAllowed,
column.alter)
def test_table(self):
table = sqlalchemy.Table()
with fixtures.BannedDBSchemaOperations(['Table']):
self.assertRaises(exception.DBNotAllowed,
table.drop)
self.assertRaises(exception.DBNotAllowed,
table.alter)
class TestAllServicesCurrentFixture(testtools.TestCase):
@mock.patch('nova.objects.Service._db_service_get_minimum_version')
def test_services_current(self, mock_db):
mock_db.return_value = {'nova-compute': 123}
self.assertEqual(123, service_obj.Service.get_minimum_version(
None, 'nova-compute'))
mock_db.assert_called_once_with(None, ['nova-compute'],
use_slave=False)
mock_db.reset_mock()
compute_rpcapi.LAST_VERSION = 123
self.useFixture(fixtures.AllServicesCurrent())
self.assertIsNone(compute_rpcapi.LAST_VERSION)
self.assertEqual(service_obj.SERVICE_VERSION,
service_obj.Service.get_minimum_version(
None, 'nova-compute'))
self.assertFalse(mock_db.called)
class TestNoopConductorFixture(testtools.TestCase):
@mock.patch('nova.conductor.api.ComputeTaskAPI.resize_instance')
def test_task_api_not_called(self, mock_resize):
self.useFixture(fixtures.NoopConductorFixture())
conductor.ComputeTaskAPI().resize_instance()
self.assertFalse(mock_resize.called)
@mock.patch('nova.conductor.api.API.wait_until_ready')
def test_api_not_called(self, mock_wait):
self.useFixture(fixtures.NoopConductorFixture())
conductor.API().wait_until_ready()
self.assertFalse(mock_wait.called)
class TestSingleCellSimpleFixture(testtools.TestCase):
def test_single_cell(self):
self.useFixture(fixtures.SingleCellSimple())
cml = objects.CellMappingList.get_all(None)
self.assertEqual(1, len(cml))
def test_target_cell(self):
self.useFixture(fixtures.SingleCellSimple())
with context.target_cell(mock.sentinel.context, None) as c:
self.assertIs(mock.sentinel.context, c)
class TestWarningsFixture(test.TestCase):
def test_invalid_uuid_errors(self):
"""Creating an oslo.versionedobject with an invalid UUID value for a
UUIDField should raise an exception.
"""
valid_migration_kwargs = {
"created_at": timeutils.utcnow().replace(microsecond=0),
"updated_at": None,
"deleted_at": None,
"deleted": False,
"id": 123,
"uuid": uuids.migration,
"source_compute": "compute-source",
"dest_compute": "compute-dest",
"source_node": "node-source",
"dest_node": "node-dest",
"dest_host": "host-dest",
"old_instance_type_id": 42,
"new_instance_type_id": 84,
"instance_uuid": "fake-uuid",
"status": "migrating",
"migration_type": "resize",
"hidden": False,
"memory_total": 123456,
"memory_processed": 12345,
"memory_remaining": 111111,
"disk_total": 234567,
"disk_processed": 23456,
"disk_remaining": 211111,
}
# this shall not throw FutureWarning
objects.migration.Migration(**valid_migration_kwargs)
invalid_migration_kwargs = copy.deepcopy(valid_migration_kwargs)
invalid_migration_kwargs["uuid"] = "fake_id"
self.assertRaises(FutureWarning, objects.migration.Migration,
**invalid_migration_kwargs)
class TestDownCellFixture(test.TestCase):
def test_fixture(self):
# The test setup creates two cell mappings (cell0 and cell1) by
# default. Let's first list servers across all cells while they are
# "up" to make sure that works as expected. We'll create a single
# instance in cell1.
ctxt = context.get_admin_context()
cell1 = self.cell_mappings[test.CELL1_NAME]
with context.target_cell(ctxt, cell1) as cctxt:
inst = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst:
delattr(inst, 'id')
inst.create()
# Now list all instances from all cells (should get one back).
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
self.assertEqual(0, len(results[objects.CellMapping.CELL0_UUID]))
self.assertEqual(1, len(results[cell1.uuid]))
# Now do the same but with the DownCellFixture which should result
# in exception results from both cells.
with fixtures.DownCellFixture():
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for result in results.values():
self.assertIsInstance(result, db_exc.DBError)
def test_fixture_when_explicitly_passing_down_cell_mappings(self):
# The test setup creates two cell mappings (cell0 and cell1) by
# default. We'll create one instance per cell and pass cell0 as
# the down cell. We should thus get db_exc.DBError for cell0 and
# correct InstanceList object from cell1.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
with fixtures.DownCellFixture([cell0]):
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for cell_uuid, result in results.items():
if cell_uuid == cell0.uuid:
self.assertIsInstance(result, db_exc.DBError)
else:
self.assertIsInstance(result, objects.InstanceList)
self.assertEqual(1, len(result))
self.assertEqual(inst2.uuid, result[0].uuid)
def test_fixture_for_an_individual_down_cell_targeted_call(self):
# We have cell0 and cell1 by default in the setup. We try targeting
# both the cells. We should get a db error for the down cell and
# the correct result for the up cell.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
def dummy_tester(ctxt, cell_mapping, uuid):
with context.target_cell(ctxt, cell_mapping) as cctxt:
return objects.Instance.get_by_uuid(cctxt, uuid)
# Scenario A: We do not pass any down cells, fixture automatically
# assumes the targeted cell is down whether its cell0 or cell1.
with fixtures.DownCellFixture():
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell1, inst2.uuid)
# Scenario B: We pass cell0 as the down cell.
with fixtures.DownCellFixture([cell0]):
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell0, inst1.uuid)
# Scenario C: We get the correct result from the up cell
# when targeted.
result = dummy_tester(ctxt, cell1, inst2.uuid)
self.assertEqual(inst2.uuid, result.uuid)
class TestNeutronFixture(test.NoDBTestCase):
def setUp(self):
super(TestNeutronFixture, self).setUp()
self.neutron = self.useFixture(fixtures.NeutronFixture(self))
def test_list_ports_with_resource_request_non_admin_client(self):
ctxt = context.get_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNone(ports[0]['resource_request'])
def test_list_ports_with_resource_request_admin_client(self):
ctxt = context.get_admin_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNotNone(ports[0]['resource_request'])
|
"""Several HTML builders."""
import html
import os
import posixpath
import re
import sys
from datetime import datetime
from os import path
from typing import IO, Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Type
from urllib.parse import quote
from docutils import nodes
from docutils.core import publish_parts
from docutils.frontend import OptionParser
from docutils.io import DocTreeInput, StringOutput
from docutils.nodes import Node
from docutils.utils import relative_path
from sphinx import __display_version__, package_dir
from sphinx import version_info as sphinx_version
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.config import ENUM, Config
from sphinx.domains import Domain, Index, IndexEntry
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.environment.adapters.toctree import TocTree
from sphinx.errors import ConfigError, ThemeError
from sphinx.highlighting import PygmentsBridge
from sphinx.locale import _, __
from sphinx.search import js_index
from sphinx.theming import HTMLThemeFactory
from sphinx.util import isurl, logging, md5, progress_message, status_iterator
from sphinx.util.docutils import is_html5_writer_available, new_document
from sphinx.util.fileutil import copy_asset
from sphinx.util.i18n import format_date
from sphinx.util.inventory import InventoryFile
from sphinx.util.matching import DOTFILES, Matcher, patmatch
from sphinx.util.osutil import copyfile, ensuredir, os_path, relative_uri
from sphinx.util.tags import Tags
from sphinx.writers.html import HTMLTranslator, HTMLWriter
# HTML5 Writer is available or not
if is_html5_writer_available():
from sphinx.writers.html5 import HTML5Translator
html5_ready = True
else:
html5_ready = False
#: the filename for the inventory of objects
INVENTORY_FILENAME = 'objects.inv'
logger = logging.getLogger(__name__)
return_codes_re = re.compile('[\r\n]+')
def get_stable_hash(obj: Any) -> str:
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
in unpredictable order due to hash randomization in newer Pythons.
"""
if isinstance(obj, dict):
return get_stable_hash(list(obj.items()))
elif isinstance(obj, (list, tuple)):
obj = sorted(get_stable_hash(o) for o in obj)
return md5(str(obj).encode()).hexdigest()
def convert_locale_to_language_tag(locale: Optional[str]) -> Optional[str]:
"""Convert a locale string to a language tag (ex. en_US -> en-US).
refs: BCP 47 (:rfc:`5646`)
"""
if locale:
return locale.replace('_', '-')
else:
return None
class Stylesheet(str):
"""A metadata of stylesheet.
To keep compatibility with old themes, an instance of stylesheet behaves as
its filename (str).
"""
attributes: Dict[str, str] = None
filename: str = None
priority: int = None
def __new__(cls, filename: str, *args: str, priority: int = 500, **attributes: Any
) -> "Stylesheet":
self = str.__new__(cls, filename)
self.filename = filename
self.priority = priority
self.attributes = attributes
self.attributes.setdefault('rel', 'stylesheet')
self.attributes.setdefault('type', 'text/css')
if args: # old style arguments (rel, title)
self.attributes['rel'] = args[0]
self.attributes['title'] = args[1]
return self
class JavaScript(str):
"""A metadata of javascript file.
To keep compatibility with old themes, an instance of javascript behaves as
its filename (str).
"""
attributes: Dict[str, str] = None
filename: str = None
priority: int = None
def __new__(cls, filename: str, priority: int = 500, **attributes: str) -> "JavaScript":
self = str.__new__(cls, filename)
self.filename = filename
self.priority = priority
self.attributes = attributes
return self
class BuildInfo:
"""buildinfo file manipulator.
HTMLBuilder and its family are storing their own envdata to ``.buildinfo``.
This class is a manipulator for the file.
"""
@classmethod
def load(cls, f: IO) -> "BuildInfo":
try:
lines = f.readlines()
assert lines[0].rstrip() == '# Sphinx build info version 1'
assert lines[2].startswith('config: ')
assert lines[3].startswith('tags: ')
build_info = BuildInfo()
build_info.config_hash = lines[2].split()[1].strip()
build_info.tags_hash = lines[3].split()[1].strip()
return build_info
except Exception as exc:
raise ValueError(__('build info file is broken: %r') % exc) from exc
def __init__(self, config: Config = None, tags: Tags = None, config_categories: List[str] = []) -> None: # NOQA
self.config_hash = ''
self.tags_hash = ''
if config:
values = {c.name: c.value for c in config.filter(config_categories)}
self.config_hash = get_stable_hash(values)
if tags:
self.tags_hash = get_stable_hash(sorted(tags))
def __eq__(self, other: "BuildInfo") -> bool: # type: ignore
return (self.config_hash == other.config_hash and
self.tags_hash == other.tags_hash)
def dump(self, f: IO) -> None:
f.write('# Sphinx build info version 1\n'
'# This file hashes the configuration used when building these files.'
' When it is not found, a full rebuild will be done.\n'
'config: %s\n'
'tags: %s\n' %
(self.config_hash, self.tags_hash))
class StandaloneHTMLBuilder(Builder):
"""
Builds standalone HTML docs.
"""
name = 'html'
format = 'html'
epilog = __('The HTML pages are in %(outdir)s.')
copysource = True
allow_parallel = True
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
indexer_format: Any = js_index
indexer_dumps_unicode = True
# create links to original images from images [True/False]
html_scaled_image_link = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
supported_remote_images = True
supported_data_uri_images = True
searchindex_filename = 'searchindex.js'
add_permalinks = True
allow_sharp_as_current_path = True
embedded = False # for things like HTML help or Qt help: suppresses sidebar
search = True # for things like HTML help and Apple help: suppress search
use_index = False
download_support = True # enable download role
imgpath: str = None
domain_indices: List[Tuple[str, Type[Index], List[Tuple[str, List[IndexEntry]]], bool]] = [] # NOQA
def __init__(self, app: Sphinx) -> None:
super().__init__(app)
# CSS files
self.css_files: List[Stylesheet] = []
# JS files
self.script_files: List[JavaScript] = []
def init(self) -> None:
self.build_info = self.create_build_info()
# basename of images directory
self.imagedir = '_images'
# section numbers for headings in the currently visited document
self.secnumbers: Dict[str, Tuple[int, ...]] = {}
# currently written docname
self.current_docname: str = None
self.init_templates()
self.init_highlighter()
self.init_css_files()
self.init_js_files()
html_file_suffix = self.get_builder_config('file_suffix', 'html')
if html_file_suffix is not None:
self.out_suffix = html_file_suffix
html_link_suffix = self.get_builder_config('link_suffix', 'html')
if html_link_suffix is not None:
self.link_suffix = html_link_suffix
else:
self.link_suffix = self.out_suffix
self.use_index = self.get_builder_config('use_index', 'html')
def create_build_info(self) -> BuildInfo:
return BuildInfo(self.config, self.tags, ['html'])
def _get_translations_js(self) -> str:
candidates = [path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs] + \
[path.join(package_dir, 'locale', self.config.language,
'LC_MESSAGES', 'sphinx.js'),
path.join(sys.prefix, 'share/sphinx/locale',
self.config.language, 'sphinx.js')]
for jsfile in candidates:
if path.isfile(jsfile):
return jsfile
return None
def _get_style_filename(self) -> str:
if self.config.html_style is not None:
return self.config.html_style
elif self.theme:
return self.theme.get_config('theme', 'stylesheet')
else:
return 'default.css'
def get_theme_config(self) -> Tuple[str, Dict]:
return self.config.html_theme, self.config.html_theme_options
def init_templates(self) -> None:
theme_factory = HTMLThemeFactory(self.app)
themename, themeoptions = self.get_theme_config()
self.theme = theme_factory.create(themename)
self.theme_options = themeoptions.copy()
self.create_template_bridge()
self.templates.init(self, self.theme)
def init_highlighter(self) -> None:
# determine Pygments style and create the highlighter
if self.config.pygments_style is not None:
style = self.config.pygments_style
elif self.theme:
style = self.theme.get_config('theme', 'pygments_style', 'none')
else:
style = 'sphinx'
self.highlighter = PygmentsBridge('html', style)
if self.theme:
dark_style = self.theme.get_config('theme', 'pygments_dark_style', None)
else:
dark_style = None
if dark_style is not None:
self.dark_highlighter = PygmentsBridge('html', dark_style)
self.app.add_css_file('pygments_dark.css',
media='(prefers-color-scheme: dark)',
id='pygments_dark_css')
else:
self.dark_highlighter = None
def init_css_files(self) -> None:
self.css_files = []
self.add_css_file('pygments.css', priority=200)
self.add_css_file(self._get_style_filename(), priority=200)
for filename, attrs in self.app.registry.css_files:
self.add_css_file(filename, **attrs)
for filename, attrs in self.get_builder_config('css_files', 'html'):
attrs.setdefault('priority', 800) # User's CSSs are loaded after extensions'
self.add_css_file(filename, **attrs)
def add_css_file(self, filename: str, **kwargs: Any) -> None:
if '://' not in filename:
filename = posixpath.join('_static', filename)
self.css_files.append(Stylesheet(filename, **kwargs))
def init_js_files(self) -> None:
self.script_files = []
self.add_js_file('documentation_options.js', id="documentation_options",
data_url_root='', priority=200)
# Remove frameworks and compatability module below in Sphinx 6.0
# xref RemovedInSphinx60Warning
self.add_js_file('jquery.js', priority=200)
self.add_js_file('underscore.js', priority=200)
self.add_js_file('_sphinx_javascript_frameworks_compat.js', priority=200)
self.add_js_file('doctools.js', priority=200)
for filename, attrs in self.app.registry.js_files:
self.add_js_file(filename, **attrs)
for filename, attrs in self.get_builder_config('js_files', 'html'):
attrs.setdefault('priority', 800) # User's JSs are loaded after extensions'
self.add_js_file(filename, **attrs)
if self._get_translations_js():
self.add_js_file('translations.js')
def add_js_file(self, filename: str, **kwargs: Any) -> None:
if filename and '://' not in filename:
filename = posixpath.join('_static', filename)
self.script_files.append(JavaScript(filename, **kwargs))
@property
def default_translator_class(self) -> Type[nodes.NodeVisitor]: # type: ignore
if not html5_ready or self.config.html4_writer:
return HTMLTranslator
else:
return HTML5Translator
@property
def math_renderer_name(self) -> str:
name = self.get_builder_config('math_renderer', 'html')
if name is not None:
# use given name
return name
else:
# not given: choose a math_renderer from registered ones as possible
renderers = list(self.app.registry.html_inline_math_renderers)
if len(renderers) == 1:
# only default math_renderer (mathjax) is registered
return renderers[0]
elif len(renderers) == 2:
# default and another math_renderer are registered; prior the another
renderers.remove('mathjax')
return renderers[0]
else:
# many math_renderers are registered. can't choose automatically!
return None
def get_outdated_docs(self) -> Iterator[str]:
try:
with open(path.join(self.outdir, '.buildinfo')) as fp:
buildinfo = BuildInfo.load(fp)
if self.build_info != buildinfo:
logger.debug('[build target] did not match: build_info ')
yield from self.env.found_docs
return
except ValueError as exc:
logger.warning(__('Failed to read build info file: %r'), exc)
except OSError:
# ignore errors on reading
pass
if self.templates:
template_mtime = self.templates.newest_template_mtime()
else:
template_mtime = 0
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
logger.debug('[build target] did not in env: %r', docname)
yield docname
continue
targetname = self.get_outfilename(docname)
try:
targetmtime = path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = max(path.getmtime(self.env.doc2path(docname)),
template_mtime)
if srcmtime > targetmtime:
logger.debug(
'[build target] targetname %r(%s), template(%s), docname %r(%s)',
targetname,
datetime.utcfromtimestamp(targetmtime),
datetime.utcfromtimestamp(template_mtime),
docname,
datetime.utcfromtimestamp(path.getmtime(self.env.doc2path(docname))),
)
yield docname
except OSError:
# source doesn't exist anymore
pass
def get_asset_paths(self) -> List[str]:
return self.config.html_extra_path + self.config.html_static_path
def render_partial(self, node: Node) -> Dict[str, str]:
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
doc = new_document('<partial node>')
doc.append(node)
writer = HTMLWriter(self)
return publish_parts(reader_name='doctree',
writer=writer,
source_class=DocTreeInput,
settings_overrides={'output_encoding': 'unicode'},
source=doc)
def prepare_writing(self, docnames: Set[str]) -> None:
# create the search indexer
self.indexer = None
if self.search:
from sphinx.search import IndexBuilder
lang = self.config.html_search_language or self.config.language
self.indexer = IndexBuilder(self.env, lang,
self.config.html_search_options,
self.config.html_search_scorer)
self.load_indexer(docnames)
self.docwriter = HTMLWriter(self)
self.docsettings: Any = OptionParser(
defaults=self.env.settings,
components=(self.docwriter,),
read_config_files=True).get_default_values()
self.docsettings.compact_lists = bool(self.config.html_compact_lists)
# determine the additional indices to include
self.domain_indices = []
# html_domain_indices can be False/True or a list of index names
indices_config = self.config.html_domain_indices
if indices_config:
for domain_name in sorted(self.env.domains):
domain: Domain = self.env.domains[domain_name]
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapse = indexcls(domain).generate()
if content:
self.domain_indices.append(
(indexname, indexcls, content, collapse))
# format the "last updated on" string, only once is enough since it
# typically doesn't include the time of day
lufmt = self.config.html_last_updated_fmt
if lufmt is not None:
self.last_updated = format_date(lufmt or _('%b %d, %Y'),
language=self.config.language)
else:
self.last_updated = None
# If the logo or favicon are urls, keep them as-is, otherwise
# strip the relative path as the files will be copied into _static.
logo = self.config.html_logo or ''
favicon = self.config.html_favicon or ''
if not isurl(logo):
logo = path.basename(logo)
if not isurl(favicon):
favicon = path.basename(favicon)
self.relations = self.env.collect_relations()
rellinks: List[Tuple[str, str, str, str]] = []
if self.use_index:
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, _content, _collapse in self.domain_indices:
# if it has a short name
if indexcls.shortname:
rellinks.append((indexname, indexcls.localname,
'', indexcls.shortname))
# back up script_files and css_files to allow adding JS/CSS files to a specific page.
self._script_files = list(self.script_files)
self._css_files = list(self.css_files)
self.globalcontext = {
'embedded': self.embedded,
'project': self.config.project,
'release': return_codes_re.sub('', self.config.release),
'version': self.config.version,
'last_updated': self.last_updated,
'copyright': self.config.copyright,
'master_doc': self.config.root_doc,
'root_doc': self.config.root_doc,
'use_opensearch': self.config.html_use_opensearch,
'docstitle': self.config.html_title,
'shorttitle': self.config.html_short_title,
'show_copyright': self.config.html_show_copyright,
'show_search_summary': self.config.html_show_search_summary,
'show_sphinx': self.config.html_show_sphinx,
'has_source': self.config.html_copy_source,
'show_source': self.config.html_show_sourcelink,
'sourcelink_suffix': self.config.html_sourcelink_suffix,
'file_suffix': self.out_suffix,
'link_suffix': self.link_suffix,
'script_files': self.script_files,
'language': convert_locale_to_language_tag(self.config.language),
'css_files': self.css_files,
'sphinx_version': __display_version__,
'sphinx_version_tuple': sphinx_version,
'style': self._get_style_filename(),
'rellinks': rellinks,
'builder': self.name,
'parents': [],
'logo': logo,
'favicon': favicon,
'html5_doctype': html5_ready and not self.config.html4_writer,
}
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
self.theme.get_options(self.theme_options).items())
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict[str, Any]:
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
parents = []
rellinks = self.globalcontext['rellinks'][:]
related = self.relations.get(docname)
titles = self.env.titles
if related and related[2]:
try:
next = {
'link': self.get_relative_uri(docname, related[2]),
'title': self.render_partial(titles[related[2]])['title']
}
rellinks.append((related[2], next['title'], 'N', _('next')))
except KeyError:
next = None
if related and related[1]:
try:
prev = {
'link': self.get_relative_uri(docname, related[1]),
'title': self.render_partial(titles[related[1]])['title']
}
rellinks.append((related[1], prev['title'], 'P', _('previous')))
except KeyError:
# the relation is (somehow) not in the TOC tree, handle
# that gracefully
prev = None
while related and related[0]:
try:
parents.append(
{'link': self.get_relative_uri(docname, related[0]),
'title': self.render_partial(titles[related[0]])['title']})
except KeyError:
pass
related = self.relations.get(related[0])
if parents:
# remove link to the master file; we have a generic
# "back to index" link already
parents.pop()
parents.reverse()
# title rendered as HTML
title_node = self.env.longtitles.get(docname)
title = self.render_partial(title_node)['title'] if title_node else ''
# Suffix for the document
source_suffix = self.env.doc2path(docname, False)[len(docname):]
# the name for the copied source
if self.config.html_copy_source:
sourcename = docname + source_suffix
if source_suffix != self.config.html_sourcelink_suffix:
sourcename += self.config.html_sourcelink_suffix
else:
sourcename = ''
# metadata for the document
meta = self.env.metadata.get(docname)
# local TOC and global TOC tree
self_toc = TocTree(self.env).get_toc_for(docname, self)
toc = self.render_partial(self_toc)['fragment']
return {
'parents': parents,
'prev': prev,
'next': next,
'title': title,
'meta': meta,
'body': body,
'metatags': metatags,
'rellinks': rellinks,
'sourcename': sourcename,
'toc': toc,
# only display a TOC if there's more than one item to show
'display_toc': (self.env.toc_num_entries[docname] > 1),
'page_source_suffix': source_suffix,
}
def write_doc(self, docname: str, doctree: nodes.document) -> None:
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
self.fignumbers = self.env.toc_fignumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
body = self.docwriter.parts['fragment']
metatags = self.docwriter.clean_meta
ctx = self.get_doc_context(docname, body, metatags)
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname: str, doctree: nodes.document) -> None:
self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir)
self.post_process_images(doctree)
title_node = self.env.longtitles.get(docname)
title = self.render_partial(title_node)['title'] if title_node else ''
self.index_page(docname, doctree, title)
def finish(self) -> None:
self.finish_tasks.add_task(self.gen_indices)
self.finish_tasks.add_task(self.gen_pages_from_extensions)
self.finish_tasks.add_task(self.gen_additional_pages)
self.finish_tasks.add_task(self.copy_image_files)
self.finish_tasks.add_task(self.copy_download_files)
self.finish_tasks.add_task(self.copy_static_files)
self.finish_tasks.add_task(self.copy_extra_files)
self.finish_tasks.add_task(self.write_buildinfo)
# dump the search index
self.handle_finish()
@progress_message(__('generating indices'))
def gen_indices(self) -> None:
# the global general index
if self.use_index:
self.write_genindex()
# the global domain-specific indices
self.write_domain_indices()
def gen_pages_from_extensions(self) -> None:
# pages from extensions
for pagelist in self.events.emit('html-collect-pages'):
for pagename, context, template in pagelist:
self.handle_page(pagename, context, template)
@progress_message(__('writing additional pages'))
def gen_additional_pages(self) -> None:
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
logger.info(pagename + ' ', nonl=True)
self.handle_page(pagename, {}, template)
# the search page
if self.search:
logger.info('search ', nonl=True)
self.handle_page('search', {}, 'search.html')
# the opensearch xml file
if self.config.html_use_opensearch and self.search:
logger.info('opensearch ', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
def write_genindex(self) -> None:
# the total count of lines for each index letter, used to distribute
# the entries into two columns
genindex = IndexEntries(self.env).create_index(self)
indexcounts = []
for _k, entries in genindex:
indexcounts.append(sum(1 + len(subitems)
for _, (_, subitems, _) in entries))
genindexcontext = {
'genindexentries': genindex,
'genindexcounts': indexcounts,
'split_index': self.config.html_split_index,
}
logger.info('genindex ', nonl=True)
if self.config.html_split_index:
self.handle_page('genindex', genindexcontext,
'genindex-split.html')
self.handle_page('genindex-all', genindexcontext,
'genindex.html')
for (key, entries), count in zip(genindex, indexcounts):
ctx = {'key': key, 'entries': entries, 'count': count,
'genindexentries': genindex}
self.handle_page('genindex-' + key, ctx,
'genindex-single.html')
else:
self.handle_page('genindex', genindexcontext, 'genindex.html')
def write_domain_indices(self) -> None:
for indexname, indexcls, content, collapse in self.domain_indices:
indexcontext = {
'indextitle': indexcls.localname,
'content': content,
'collapse_index': collapse,
}
logger.info(indexname + ' ', nonl=True)
self.handle_page(indexname, indexcontext, 'domainindex.html')
def copy_image_files(self) -> None:
if self.images:
stringify_func = ImageAdapter(self.app.env).get_original_image_uri
ensuredir(path.join(self.outdir, self.imagedir))
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity,
stringify_func=stringify_func):
dest = self.images[src]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except Exception as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
def copy_download_files(self) -> None:
def to_relpath(f: str) -> str:
return relative_path(self.srcdir, f)
# copy downloadable files
if self.env.dlfiles:
ensuredir(path.join(self.outdir, '_downloads'))
for src in status_iterator(self.env.dlfiles, __('copying downloadable files... '),
"brown", len(self.env.dlfiles), self.app.verbosity,
stringify_func=to_relpath):
try:
dest = path.join(self.outdir, '_downloads', self.env.dlfiles[src][1])
ensuredir(path.dirname(dest))
copyfile(path.join(self.srcdir, src), dest)
except OSError as err:
logger.warning(__('cannot copy downloadable file %r: %s'),
path.join(self.srcdir, src), err)
def create_pygments_style_file(self) -> None:
"""create a style file for pygments."""
with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f:
f.write(self.highlighter.get_stylesheet())
if self.dark_highlighter:
with open(path.join(self.outdir, '_static', 'pygments_dark.css'), 'w') as f:
f.write(self.dark_highlighter.get_stylesheet())
def copy_translation_js(self) -> None:
"""Copy a JavaScript file for translations."""
jsfile = self._get_translations_js()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js'))
def copy_stemmer_js(self) -> None:
"""Copy a JavaScript file for stemmer."""
if self.indexer is not None:
if hasattr(self.indexer, 'get_js_stemmer_rawcodes'):
for jsfile in self.indexer.get_js_stemmer_rawcodes():
copyfile(jsfile, path.join(self.outdir, '_static', path.basename(jsfile)))
else:
jsfile = self.indexer.get_js_stemmer_rawcode()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', '_stemmer.js'))
def copy_theme_static_files(self, context: Dict) -> None:
def onerror(filename: str, error: Exception) -> None:
logger.warning(__('Failed to copy a file in html_static_file: %s: %r'),
filename, error)
if self.theme:
for entry in self.theme.get_theme_dirs()[::-1]:
copy_asset(path.join(entry, 'static'),
path.join(self.outdir, '_static'),
excluded=DOTFILES, context=context,
renderer=self.templates, onerror=onerror)
def copy_html_static_files(self, context: Dict) -> None:
def onerror(filename: str, error: Exception) -> None:
logger.warning(__('Failed to copy a file in html_static_file: %s: %r'),
filename, error)
excluded = Matcher(self.config.exclude_patterns + ["**/.*"])
for entry in self.config.html_static_path:
copy_asset(path.join(self.confdir, entry),
path.join(self.outdir, '_static'),
excluded, context=context, renderer=self.templates, onerror=onerror)
def copy_html_logo(self) -> None:
if self.config.html_logo and not isurl(self.config.html_logo):
copy_asset(path.join(self.confdir, self.config.html_logo),
path.join(self.outdir, '_static'))
def copy_html_favicon(self) -> None:
if self.config.html_favicon and not isurl(self.config.html_favicon):
copy_asset(path.join(self.confdir, self.config.html_favicon),
path.join(self.outdir, '_static'))
def copy_static_files(self) -> None:
try:
with progress_message(__('copying static files')):
ensuredir(path.join(self.outdir, '_static'))
# prepare context for templates
context = self.globalcontext.copy()
if self.indexer is not None:
context.update(self.indexer.context_for_searchtool())
self.create_pygments_style_file()
self.copy_translation_js()
self.copy_stemmer_js()
self.copy_theme_static_files(context)
self.copy_html_static_files(context)
self.copy_html_logo()
self.copy_html_favicon()
except OSError as err:
logger.warning(__('cannot copy static file %r'), err)
def copy_extra_files(self) -> None:
"""copy html_extra_path files."""
try:
with progress_message(__('copying extra files')):
excluded = Matcher(self.config.exclude_patterns)
for extra_path in self.config.html_extra_path:
entry = path.join(self.confdir, extra_path)
copy_asset(entry, self.outdir, excluded)
except OSError as err:
logger.warning(__('cannot copy extra file %r'), err)
def write_buildinfo(self) -> None:
try:
with open(path.join(self.outdir, '.buildinfo'), 'w') as fp:
self.build_info.dump(fp)
except OSError as exc:
logger.warning(__('Failed to write build info file: %r'), exc)
def cleanup(self) -> None:
# clean up theme stuff
if self.theme:
self.theme.cleanup()
def post_process_images(self, doctree: Node) -> None:
"""Pick the best candidate for an image and link down-scaled images to
their high res version.
"""
Builder.post_process_images(self, doctree)
if self.config.html_scaled_image_link and self.html_scaled_image_link:
for node in doctree.findall(nodes.image):
if not any((key in node) for key in ['scale', 'width', 'height']):
# resizing options are not given. scaled image link is available
# only for resized images.
continue
elif isinstance(node.parent, nodes.reference):
# A image having hyperlink target
continue
elif 'no-scaled-link' in node['classes']:
# scaled image link is disabled for this node
continue
uri = node['uri']
reference = nodes.reference('', '', internal=True)
if uri in self.images:
reference['refuri'] = posixpath.join(self.imgpath,
self.images[uri])
else:
reference['refuri'] = uri
node.replace_self(reference)
reference.append(node)
def load_indexer(self, docnames: Iterable[str]) -> None:
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
if self.indexer_dumps_unicode:
with open(searchindexfn, encoding='utf-8') as ft:
self.indexer.load(ft, self.indexer_format)
else:
with open(searchindexfn, 'rb') as fb:
self.indexer.load(fb, self.indexer_format)
except (OSError, ValueError):
if keep:
logger.warning(__('search index couldn\'t be loaded, but not all '
'documents will be built: the index will be '
'incomplete.'))
# delete all entries for files that will be rebuilt
self.indexer.prune(keep)
def index_page(self, pagename: str, doctree: nodes.document, title: str) -> None:
# only index pages with title
if self.indexer is not None and title:
filename = self.env.doc2path(pagename, base=None)
metadata = self.env.metadata.get(pagename, {})
if 'nosearch' in metadata:
self.indexer.feed(pagename, filename, '', new_document(''))
else:
self.indexer.feed(pagename, filename, title, doctree)
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwargs: Any) -> str:
if 'includehidden' not in kwargs:
kwargs['includehidden'] = False
if kwargs.get('maxdepth') == '':
kwargs.pop('maxdepth')
return self.render_partial(TocTree(self.env).get_toctree_for(
docname, self, collapse, **kwargs))['fragment']
def get_outfilename(self, pagename: str) -> str:
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename: str, ctx: Dict) -> None:
def has_wildcard(pattern: str) -> bool:
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
customsidebar = None
# default sidebars settings for selected theme
if self.theme.name == 'alabaster':
# provide default settings for alabaster (for compatibility)
# Note: this will be removed before Sphinx-2.0
try:
# get default sidebars settings from alabaster (if defined)
theme_default_sidebars = self.theme.config.get('theme', 'sidebars')
if theme_default_sidebars:
sidebars = [name.strip() for name in theme_default_sidebars.split(',')]
except Exception:
# fallback to better default settings
sidebars = ['about.html', 'navigation.html', 'relations.html',
'searchbox.html', 'donate.html']
else:
theme_default_sidebars = self.theme.get_config('theme', 'sidebars', None)
if theme_default_sidebars:
sidebars = [name.strip() for name in theme_default_sidebars.split(',')]
# user sidebar settings
html_sidebars = self.get_builder_config('sidebars', 'html')
for pattern, patsidebars in html_sidebars.items():
if patmatch(pagename, pattern):
if matched:
if has_wildcard(pattern):
# warn if both patterns contain wildcards
if has_wildcard(matched):
logger.warning(__('page %s matches two patterns in '
'html_sidebars: %r and %r'),
pagename, matched, pattern)
# else the already matched pattern is more specific
# than the present one, because it contains no wildcard
continue
matched = pattern
sidebars = patsidebars
if sidebars is None:
# keep defaults
pass
ctx['sidebars'] = sidebars
ctx['customsidebar'] = customsidebar
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname: str, typ: str = None) -> str:
return quote(docname) + self.link_suffix
def handle_page(self, pagename: str, addctx: Dict, templatename: str = 'page.html',
outfilename: str = None, event_arg: Any = None) -> None:
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
ctx['encoding'] = self.config.html_output_encoding
default_baseuri = self.get_target_uri(pagename)
# in the singlehtml builder, default_baseuri still contains an #anchor
# part, which relative_uri doesn't really like...
default_baseuri = default_baseuri.rsplit('#', 1)[0]
if self.config.html_baseurl:
ctx['pageurl'] = posixpath.join(self.config.html_baseurl,
pagename + self.out_suffix)
else:
ctx['pageurl'] = None
def pathto(otheruri: str, resource: bool = False, baseuri: str = default_baseuri) -> str: # NOQA
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
elif not resource:
otheruri = self.get_target_uri(otheruri)
uri = relative_uri(baseuri, otheruri) or '#'
if uri == '#' and not self.allow_sharp_as_current_path:
uri = baseuri
return uri
ctx['pathto'] = pathto
def hasdoc(name: str) -> bool:
if name in self.env.all_docs:
return True
elif name == 'search' and self.search:
return True
elif name == 'genindex' and self.get_builder_config('use_index', 'html'):
return True
return False
ctx['hasdoc'] = hasdoc
ctx['toctree'] = lambda **kwargs: self._get_local_toctree(pagename, **kwargs)
self.add_sidebars(pagename, ctx)
ctx.update(addctx)
# revert script_files and css_files
self.script_files[:] = self._script_files
self.css_files[:] = self._css_files
self.update_page_context(pagename, templatename, ctx, event_arg)
newtmpl = self.app.emit_firstresult('html-page-context', pagename,
templatename, ctx, event_arg)
if newtmpl:
templatename = newtmpl
# sort JS/CSS before rendering HTML
try:
# Convert script_files to list to support non-list script_files (refs: #8889)
ctx['script_files'] = sorted(list(ctx['script_files']), key=lambda js: js.priority)
except AttributeError:
# Skip sorting if users modifies script_files directly (maybe via `html_context`).
# refs: #8885
#
# Note: priority sorting feature will not work in this case.
pass
try:
ctx['css_files'] = sorted(list(ctx['css_files']), key=lambda css: css.priority)
except AttributeError:
pass
try:
output = self.templates.render(templatename, ctx)
except UnicodeError:
logger.warning(__("a Unicode error occurred when rendering the page %s. "
"Please make sure all config values that contain "
"non-ASCII content are Unicode strings."), pagename)
return
except Exception as exc:
raise ThemeError(__("An error happened in rendering the page %s.\nReason: %r") %
(pagename, exc)) from exc
if not outfilename:
outfilename = self.get_outfilename(pagename)
# outfilename's path is in general different from self.outdir
ensuredir(path.dirname(outfilename))
try:
with open(outfilename, 'w', encoding=ctx['encoding'],
errors='xmlcharrefreplace') as f:
f.write(output)
except OSError as err:
logger.warning(__("error writing file %s: %s"), outfilename, err)
if self.copysource and ctx.get('sourcename'):
# copy the source file for the "show source" link
source_name = path.join(self.outdir, '_sources',
os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
def update_page_context(self, pagename: str, templatename: str,
ctx: Dict, event_arg: Any) -> None:
pass
def handle_finish(self) -> None:
if self.indexer:
self.finish_tasks.add_task(self.dump_search_index)
self.finish_tasks.add_task(self.dump_inventory)
@progress_message(__('dumping object inventory'))
def dump_inventory(self) -> None:
InventoryFile.dump(path.join(self.outdir, INVENTORY_FILENAME), self.env, self)
def dump_search_index(self) -> None:
with progress_message(__('dumping search index in %s') % self.indexer.label()):
self.indexer.prune(self.env.all_docs)
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
if self.indexer_dumps_unicode:
with open(searchindexfn + '.tmp', 'w', encoding='utf-8') as ft:
self.indexer.dump(ft, self.indexer_format)
else:
with open(searchindexfn + '.tmp', 'wb') as fb:
self.indexer.dump(fb, self.indexer_format)
os.replace(searchindexfn + '.tmp', searchindexfn)
def convert_html_css_files(app: Sphinx, config: Config) -> None:
"""This converts string styled html_css_files to tuple styled one."""
html_css_files: List[Tuple[str, Dict]] = []
for entry in config.html_css_files:
if isinstance(entry, str):
html_css_files.append((entry, {}))
else:
try:
filename, attrs = entry
html_css_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid css_file: %r, ignored'), entry)
continue
config.html_css_files = html_css_files # type: ignore
def convert_html_js_files(app: Sphinx, config: Config) -> None:
"""This converts string styled html_js_files to tuple styled one."""
html_js_files: List[Tuple[str, Dict]] = []
for entry in config.html_js_files:
if isinstance(entry, str):
html_js_files.append((entry, {}))
else:
try:
filename, attrs = entry
html_js_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid js_file: %r, ignored'), entry)
continue
config.html_js_files = html_js_files # type: ignore
def setup_css_tag_helper(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up css_tag() template helper.
.. note:: This set up function is added to keep compatibility with webhelper.
"""
pathto = context.get('pathto')
def css_tag(css: Stylesheet) -> str:
attrs = []
for key in sorted(css.attributes):
value = css.attributes[key]
if value is not None:
attrs.append('%s="%s"' % (key, html.escape(value, True)))
attrs.append('href="%s"' % pathto(css.filename, resource=True))
return '<link %s />' % ' '.join(attrs)
context['css_tag'] = css_tag
def setup_js_tag_helper(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up js_tag() template helper.
.. note:: This set up function is added to keep compatibility with webhelper.
"""
pathto = context.get('pathto')
def js_tag(js: JavaScript) -> str:
attrs = []
body = ''
if isinstance(js, JavaScript):
for key in sorted(js.attributes):
value = js.attributes[key]
if value is not None:
if key == 'body':
body = value
elif key == 'data_url_root':
attrs.append('data-url_root="%s"' % pathto('', resource=True))
else:
attrs.append('%s="%s"' % (key, html.escape(value, True)))
if js.filename:
attrs.append('src="%s"' % pathto(js.filename, resource=True))
else:
# str value (old styled)
attrs.append('src="%s"' % pathto(js, resource=True))
if attrs:
return '<script %s>%s</script>' % (' '.join(attrs), body)
else:
return '<script>%s</script>' % body
context['js_tag'] = js_tag
def setup_resource_paths(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up relative resource paths."""
pathto = context.get('pathto')
# favicon_url
favicon = context.get('favicon')
if favicon and not isurl(favicon):
context['favicon_url'] = pathto('_static/' + favicon, resource=True)
else:
context['favicon_url'] = favicon
# logo_url
logo = context.get('logo')
if logo and not isurl(logo):
context['logo_url'] = pathto('_static/' + logo, resource=True)
else:
context['logo_url'] = logo
def validate_math_renderer(app: Sphinx) -> None:
if app.builder.format != 'html':
return
name = app.builder.math_renderer_name # type: ignore
if name is None:
raise ConfigError(__('Many math_renderers are registered. '
'But no math_renderer is selected.'))
elif name not in app.registry.html_inline_math_renderers:
raise ConfigError(__('Unknown math_renderer %r is given.') % name)
def validate_html_extra_path(app: Sphinx, config: Config) -> None:
"""Check html_extra_paths setting."""
for entry in config.html_extra_path[:]:
extra_path = path.normpath(path.join(app.confdir, entry))
if not path.exists(extra_path):
logger.warning(__('html_extra_path entry %r does not exist'), entry)
config.html_extra_path.remove(entry)
elif (path.splitdrive(app.outdir)[0] == path.splitdrive(extra_path)[0] and
path.commonpath([app.outdir, extra_path]) == app.outdir):
logger.warning(__('html_extra_path entry %r is placed inside outdir'), entry)
config.html_extra_path.remove(entry)
def validate_html_static_path(app: Sphinx, config: Config) -> None:
"""Check html_static_paths setting."""
for entry in config.html_static_path[:]:
static_path = path.normpath(path.join(app.confdir, entry))
if not path.exists(static_path):
logger.warning(__('html_static_path entry %r does not exist'), entry)
config.html_static_path.remove(entry)
elif (path.splitdrive(app.outdir)[0] == path.splitdrive(static_path)[0] and
path.commonpath([app.outdir, static_path]) == app.outdir):
logger.warning(__('html_static_path entry %r is placed inside outdir'), entry)
config.html_static_path.remove(entry)
def validate_html_logo(app: Sphinx, config: Config) -> None:
"""Check html_logo setting."""
if (config.html_logo and
not path.isfile(path.join(app.confdir, config.html_logo)) and
not isurl(config.html_logo)):
logger.warning(__('logo file %r does not exist'), config.html_logo)
config.html_logo = None # type: ignore
def validate_html_favicon(app: Sphinx, config: Config) -> None:
"""Check html_favicon setting."""
if (config.html_favicon and
not path.isfile(path.join(app.confdir, config.html_favicon)) and
not isurl(config.html_favicon)):
logger.warning(__('favicon file %r does not exist'), config.html_favicon)
config.html_favicon = None # type: ignore
class _stable_repr_object():
def __repr__(self):
return '<object>'
UNSET = _stable_repr_object()
def migrate_html_add_permalinks(app: Sphinx, config: Config) -> None:
"""Migrate html_add_permalinks to html_permalinks*."""
html_add_permalinks = config.html_add_permalinks
if html_add_permalinks is UNSET:
return
# RemovedInSphinx60Warning
logger.warning(__('html_add_permalinks has been deprecated since v3.5.0. '
'Please use html_permalinks and html_permalinks_icon instead.'))
if not html_add_permalinks:
config.html_permalinks = False # type: ignore[attr-defined]
return
config.html_permalinks_icon = html.escape( # type: ignore[attr-defined]
html_add_permalinks
)
# for compatibility
import sphinxcontrib.serializinghtml # NOQA
import sphinx.builders.dirhtml # NOQA
import sphinx.builders.singlehtml # NOQA
def setup(app: Sphinx) -> Dict[str, Any]:
# builders
app.add_builder(StandaloneHTMLBuilder)
# config values
app.add_config_value('html_theme', 'alabaster', 'html')
app.add_config_value('html_theme_path', [], 'html')
app.add_config_value('html_theme_options', {}, 'html')
app.add_config_value('html_title',
lambda self: _('%s %s documentation') % (self.project, self.release),
'html', [str])
app.add_config_value('html_short_title', lambda self: self.html_title, 'html')
app.add_config_value('html_style', None, 'html', [str])
app.add_config_value('html_logo', None, 'html', [str])
app.add_config_value('html_favicon', None, 'html', [str])
app.add_config_value('html_css_files', [], 'html')
app.add_config_value('html_js_files', [], 'html')
app.add_config_value('html_static_path', [], 'html')
app.add_config_value('html_extra_path', [], 'html')
app.add_config_value('html_last_updated_fmt', None, 'html', [str])
app.add_config_value('html_sidebars', {}, 'html')
app.add_config_value('html_additional_pages', {}, 'html')
app.add_config_value('html_domain_indices', True, 'html', [list])
app.add_config_value('html_add_permalinks', UNSET, 'html')
app.add_config_value('html_permalinks', True, 'html')
app.add_config_value('html_permalinks_icon', '¶', 'html')
app.add_config_value('html_use_index', True, 'html')
app.add_config_value('html_split_index', False, 'html')
app.add_config_value('html_copy_source', True, 'html')
app.add_config_value('html_show_sourcelink', True, 'html')
app.add_config_value('html_sourcelink_suffix', '.txt', 'html')
app.add_config_value('html_use_opensearch', '', 'html')
app.add_config_value('html_file_suffix', None, 'html', [str])
app.add_config_value('html_link_suffix', None, 'html', [str])
app.add_config_value('html_show_copyright', True, 'html')
app.add_config_value('html_show_search_summary', True, 'html')
app.add_config_value('html_show_sphinx', True, 'html')
app.add_config_value('html_context', {}, 'html')
app.add_config_value('html_output_encoding', 'utf-8', 'html')
app.add_config_value('html_compact_lists', True, 'html')
app.add_config_value('html_secnumber_suffix', '. ', 'html')
app.add_config_value('html_search_language', None, 'html', [str])
app.add_config_value('html_search_options', {}, 'html')
app.add_config_value('html_search_scorer', '', None)
app.add_config_value('html_scaled_image_link', True, 'html')
app.add_config_value('html_baseurl', '', 'html')
app.add_config_value('html_codeblock_linenos_style', 'inline', 'html', # RemovedInSphinx60Warning # NOQA
ENUM('table', 'inline'))
app.add_config_value('html_math_renderer', None, 'env')
app.add_config_value('html4_writer', False, 'html')
# events
app.add_event('html-collect-pages')
app.add_event('html-page-context')
# event handlers
app.connect('config-inited', convert_html_css_files, priority=800)
app.connect('config-inited', convert_html_js_files, priority=800)
app.connect('config-inited', migrate_html_add_permalinks, priority=800)
app.connect('config-inited', validate_html_extra_path, priority=800)
app.connect('config-inited', validate_html_static_path, priority=800)
app.connect('config-inited', validate_html_logo, priority=800)
app.connect('config-inited', validate_html_favicon, priority=800)
app.connect('builder-inited', validate_math_renderer)
app.connect('html-page-context', setup_css_tag_helper)
app.connect('html-page-context', setup_js_tag_helper)
app.connect('html-page-context', setup_resource_paths)
# load default math renderer
app.setup_extension('sphinx.ext.mathjax')
# load transforms for HTML builder
app.setup_extension('sphinx.builders.html.transforms')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
class TestRecFunctions(object):
# Misc tests
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
# Test zip_descr
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
test = drop_fields(a, ['a', 'b'])
assert_(test is None)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
# make sure type is preserved
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
def test_structured_to_unstructured(self):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4,5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
c = np.arange(20).reshape((4,5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([ 3. , 5.5, 9. , 11. ]))
# check that for uniform field dtypes we get a view, not a copy:
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1,2), (3,4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays(object):
# Test merge_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(object):
# Test append_fields
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(object):
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
class TestJoinBy(object):
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
#test = join_by(('a', 'b'), a, b)
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
from numpy.lib import recfunctions as rfn
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1,2,3]))],
dtype=[('key', int), ('value', 'uint16', 3)])
res = join_by('key', foo, bar)
assert_equal(res, bar.view(ma.MaskedArray))
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
def test_different_field_order(self):
# gh-8940
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
# this should not give a FutureWarning:
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
def test_duplicate_keys(self):
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
@pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
expected_dtype = np.dtype([
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_same_name_different_dtypes(self):
# gh-9338
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
expected_dtype = np.dtype([
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_subarray_key(self):
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
res = join_by('pos', a, b)
assert_equal(res.dtype, expected_dtype)
assert_equal(res, expected)
def test_padded_dtype(self):
dt = np.dtype('i1,f4', align=True)
dt.names = ('k', 'v')
assert_(len(dt.descr), 3) # padding field is inserted
a = np.array([(1, 3), (3, 2)], dt)
b = np.array([(1, 1), (2, 2)], dt)
res = join_by('k', a, b)
# no padding fields remain
expected_dtype = np.dtype([
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
])
assert_equal(res.dtype, expected_dtype)
class TestJoinBy2(object):
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
assert_raises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Diff Object"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from collections import OrderedDict
from future.utils import viewkeys
from .base import Model, proxy_gen
from .trial import Trial
from .graphs.diff_graph import DiffGraph
class Diff(Model):
"""This model represents a diff between two trials
Initialize it by passing both trials ids:
diff = Diff(1, 2)
There are four visualization modes for the graph:
tree: activation tree without any filters
diff.graph.mode = 0
no match: tree transformed into a graph by the addition of sequence and
return edges and removal of intermediate call edges
diff.graph.mode = 1
exact match: calls are only combined when all the sub-call match
diff.graph.mode = 2
namesapce: calls are combined without considering the sub-calls
diff.graph.mode = 3
You can change the graph width and height by the variables:
diff.graph.width = 600
diff.graph.height = 400
"""
__modelname__ = "Diff"
DEFAULT = {
"graph.width": 500,
"graph.height": 500,
"graph.mode": 3,
"graph.time_limit": None,
}
REPLACE = {
"graph_width": "graph.width",
"graph_height": "graph.height",
"graph_mode": "graph.mode",
"graph_time_limit": "graph.time_limit",
}
def __init__(self, trial_ref1, trial_ref2, **kwargs):
super(Diff, self).__init__(trial_ref1, trial_ref2, **kwargs)
self.trial1 = Trial(trial_ref1)
self.trial2 = Trial(trial_ref2)
self.graph = DiffGraph(self)
self.initialize_default(kwargs)
@property
def trial(self):
"""Return a tuple with information from both trials """
extra = ("start", "finish", "duration_text")
ignore = ("id",)
return diff_dict(
self.trial1.to_dict(ignore=ignore, extra=extra), # pylint: disable=no-member
self.trial2.to_dict(ignore=ignore, extra=extra)) # pylint: disable=no-member
@property
def modules(self):
"""Diff modules from trials"""
return diff_set(
set(proxy_gen(self.trial1.modules)),
set(proxy_gen(self.trial2.modules)))
@property
def environment(self):
"""Diff environment variables"""
return diff_set(
set(self.trial1.environment_attrs),
set(self.trial2.environment_attrs))
@property
def file_accesses(self):
"""Diff file accesses"""
return diff_set(
set(self.trial1.file_accesses),
set(self.trial2.file_accesses),
create_replaced=False)
def _ipython_display_(self):
"""Display history graph"""
if hasattr(self, "graph"):
# pylint: disable=protected-access
return self.graph._ipython_display_()
from IPython.display import display
display({
'text/plain': 'Diff {}:{}'.format(
self.trial1.id,
self.trial2.id
)
})
def diff_dict(before, after):
"""Compare dicts.
Return a dict with keys shared by both dicts that have different values
key -> [before[key], after[key]]
"""
result = OrderedDict()
for key in viewkeys(before):
if key != "id" and before[key] != after[key]:
result[key] = [before[key], after[key]]
return result
def diff_set(before, after, create_replaced=True):
"""Compare sets to get additions, removals and replacements
Return 3 sets:
added -- objects present in second set, but not present in first set
removed -- objects present in first set, but not present in second set
replaced -- objects that have the same name in both sets, but are different
"""
removed = before - after
added = after - before
replaced = set()
removed_by_name = {}
for element_removed in removed:
removed_by_name[element_removed.name] = element_removed
for element_added in added:
element_removed = removed_by_name.get(element_added.name)
if element_removed and create_replaced:
replaced.add((element_removed, element_added))
if create_replaced:
for (element_removed, element_added) in replaced:
removed.discard(element_removed)
added.discard(element_added)
return (added, removed, replaced)
|
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Optional, List, Any, cast
from streamlit.uploaded_file_manager import UploadedFileManager
import tornado.ioloop
import streamlit.elements.exception as exception_utils
from streamlit import __version__, caching, config, legacy_caching, secrets
from streamlit.case_converters import to_snake_case
from streamlit.credentials import Credentials
from streamlit.in_memory_file_manager import in_memory_file_manager
from streamlit.logger import get_logger
from streamlit.metrics_util import Installation
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import Config, CustomThemeConfig, UserInfo
from streamlit.session_data import SessionData
from streamlit.script_request_queue import RerunData, ScriptRequest, ScriptRequestQueue
from streamlit.script_runner import ScriptRunner, ScriptRunnerEvent
from streamlit.watcher.local_sources_watcher import LocalSourcesWatcher
LOGGER = get_logger(__name__)
if TYPE_CHECKING:
from streamlit.state import SessionState
class AppSessionState(Enum):
APP_NOT_RUNNING = "APP_NOT_RUNNING"
APP_IS_RUNNING = "APP_IS_RUNNING"
SHUTDOWN_REQUESTED = "SHUTDOWN_REQUESTED"
def _generate_scriptrun_id() -> str:
"""Randomly generate a unique ID for a script execution."""
return str(uuid.uuid4())
class AppSession:
"""
Contains session data for a single "user" of an active app
(that is, a connected browser tab).
Each AppSession has its own SessionData, root DeltaGenerator, ScriptRunner,
and widget state.
An AppSession is attached to each thread involved in running its script.
"""
def __init__(
self,
ioloop: tornado.ioloop.IOLoop,
session_data: SessionData,
uploaded_file_manager: UploadedFileManager,
message_enqueued_callback: Optional[Callable[[], None]],
local_sources_watcher: LocalSourcesWatcher,
):
"""Initialize the AppSession.
Parameters
----------
ioloop : tornado.ioloop.IOLoop
The Tornado IOLoop that we're running within.
session_data : SessionData
Object storing parameters related to running a script
uploaded_file_manager : UploadedFileManager
The server's UploadedFileManager.
message_enqueued_callback : Callable[[], None]
After enqueuing a message, this callable notification will be invoked.
local_sources_watcher: LocalSourcesWatcher
The file watcher that lets the session know local files have changed.
"""
# Each AppSession has a unique string ID.
self.id = str(uuid.uuid4())
self._ioloop = ioloop
self._session_data = session_data
self._uploaded_file_mgr = uploaded_file_manager
self._message_enqueued_callback = message_enqueued_callback
self._state = AppSessionState.APP_NOT_RUNNING
# Need to remember the client state here because when a script reruns
# due to the source code changing we need to pass in the previous client state.
self._client_state = ClientState()
self._local_sources_watcher = local_sources_watcher
self._local_sources_watcher.register_file_change_callback(
self._on_source_file_changed
)
self._stop_config_listener = config.on_config_parsed(
self._on_source_file_changed, force_connect=True
)
# The script should rerun when the `secrets.toml` file has been changed.
secrets._file_change_listener.connect(self._on_secrets_file_changed)
self._run_on_save = config.get_option("server.runOnSave")
# The ScriptRequestQueue is the means by which we communicate
# with the active ScriptRunner.
self._script_request_queue = ScriptRequestQueue()
self._scriptrunner: Optional[ScriptRunner] = None
# This needs to be lazily imported to avoid a dependency cycle.
from streamlit.state import SessionState
self._session_state = SessionState()
LOGGER.debug("AppSession initialized (id=%s)", self.id)
def flush_browser_queue(self) -> List[ForwardMsg]:
"""Clear the forward message queue and return the messages it contained.
The Server calls this periodically to deliver new messages
to the browser connected to this app.
Returns
-------
list[ForwardMsg]
The messages that were removed from the queue and should
be delivered to the browser.
"""
return self._session_data.flush_browser_queue()
def shutdown(self) -> None:
"""Shut down the AppSession.
It's an error to use a AppSession after it's been shut down.
"""
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
LOGGER.debug("Shutting down (id=%s)", self.id)
# Clear any unused session files in upload file manager and media
# file manager
self._uploaded_file_mgr.remove_session_files(self.id)
in_memory_file_manager.clear_session_files(self.id)
in_memory_file_manager.del_expired_files()
# Shut down the ScriptRunner, if one is active.
# self._state must not be set to SHUTDOWN_REQUESTED until
# after this is called.
if self._scriptrunner is not None:
self._enqueue_script_request(ScriptRequest.SHUTDOWN)
self._state = AppSessionState.SHUTDOWN_REQUESTED
self._local_sources_watcher.close()
if self._stop_config_listener is not None:
self._stop_config_listener()
secrets._file_change_listener.disconnect(self._on_secrets_file_changed)
def enqueue(self, msg: ForwardMsg) -> None:
"""Enqueue a new ForwardMsg to our browser queue.
This can be called on both the main thread and a ScriptRunner
run thread.
Parameters
----------
msg : ForwardMsg
The message to enqueue
"""
if not config.get_option("client.displayEnabled"):
return
self._session_data.enqueue(msg)
if self._message_enqueued_callback:
self._message_enqueued_callback()
def handle_backmsg_exception(self, e: BaseException) -> None:
"""Handle an Exception raised while processing a BackMsg from the browser."""
# This does a few things:
# 1) Clears the current app in the browser.
# 2) Marks the current app as "stopped" in the browser.
# 3) HACK: Resets any script params that may have been broken (e.g. the
# command-line when rerunning with wrong argv[0])
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STARTED)
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
msg = ForwardMsg()
exception_utils.marshall(msg.delta.new_element.exception, e)
self.enqueue(msg)
def request_rerun(self, client_state: Optional[ClientState]) -> None:
"""Signal that we're interested in running the script.
If the script is not already running, it will be started immediately.
Otherwise, a rerun will be requested.
Parameters
----------
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ClientState protobuf to run the script with, or None
to use previous client state.
"""
if client_state:
rerun_data = RerunData(
client_state.query_string, client_state.widget_states
)
else:
rerun_data = RerunData()
self._enqueue_script_request(ScriptRequest.RERUN, rerun_data)
@property
def session_state(self) -> "SessionState":
return self._session_state
def _on_source_file_changed(self) -> None:
"""One of our source files changed. Schedule a rerun if appropriate."""
if self._run_on_save:
self.request_rerun(self._client_state)
else:
self._enqueue_file_change_message()
def _on_secrets_file_changed(self, _) -> None:
"""Called when `secrets._file_change_listener` emits a Signal."""
# NOTE: At the time of writing, this function only calls `_on_source_file_changed`.
# The reason behind creating this function instead of just passing `_on_source_file_changed`
# to `connect` / `disconnect` directly is that every function that is passed to `connect` / `disconnect`
# must have at least one argument for `sender` (in this case we don't really care about it, thus `_`),
# and introducing an unnecessary argument to `_on_source_file_changed` just for this purpose sounded finicky.
self._on_source_file_changed()
def _clear_queue(self) -> None:
self._session_data.clear_browser_queue()
def _on_scriptrunner_event(
self,
sender: Optional[ScriptRunner],
event: ScriptRunnerEvent,
exception: Optional[BaseException] = None,
client_state: Optional[ClientState] = None,
) -> None:
"""Called when our ScriptRunner emits an event.
This is called from the sender ScriptRunner's script thread;
it is *not* called on the main thread.
Parameters
----------
sender : ScriptRunner | None
The ScriptRunner that emitted the event. This will be set to
None when called from `handle_backmsg_exception`.
event : ScriptRunnerEvent
The event type.
exception : BaseException | None
An exception thrown during compilation. Set only for the
SCRIPT_STOPPED_WITH_COMPILE_ERROR event.
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ScriptRunner's final ClientState. Set only for the
SHUTDOWN event.
"""
LOGGER.debug("OnScriptRunnerEvent: %s", event)
prev_state = self._state
if event == ScriptRunnerEvent.SCRIPT_STARTED:
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_IS_RUNNING
self._clear_queue()
self._enqueue_new_session_message()
elif (
event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
or event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR
):
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_NOT_RUNNING
script_succeeded = event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
self._enqueue_script_finished_message(
ForwardMsg.FINISHED_SUCCESSFULLY
if script_succeeded
else ForwardMsg.FINISHED_WITH_COMPILE_ERROR
)
if script_succeeded:
# When a script completes successfully, we update our
# LocalSourcesWatcher to account for any source code changes
# that change which modules should be watched. (This is run on
# the main thread, because LocalSourcesWatcher is not
# thread safe.)
self._ioloop.spawn_callback(
self._local_sources_watcher.update_watched_modules
)
else:
msg = ForwardMsg()
exception_utils.marshall(
msg.session_event.script_compilation_exception, exception
)
self.enqueue(msg)
elif event == ScriptRunnerEvent.SHUTDOWN:
# When ScriptRunner shuts down, update our local reference to it,
# and check to see if we need to spawn a new one. (This is run on
# the main thread.)
assert (
client_state is not None
), "client_state must be set for the SHUTDOWN event"
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
# Only clear media files if the script is done running AND the
# session is actually shutting down.
in_memory_file_manager.clear_session_files(self.id)
def on_shutdown():
# We assert above that this is non-null
self._client_state = cast(ClientState, client_state)
self._scriptrunner = None
# Because a new ScriptEvent could have been enqueued while the
# scriptrunner was shutting down, we check to see if we should
# create a new one. (Otherwise, a newly-enqueued ScriptEvent
# won't be processed until another event is enqueued.)
self._maybe_create_scriptrunner()
self._ioloop.spawn_callback(on_shutdown)
# Send a message if our run state changed
app_was_running = prev_state == AppSessionState.APP_IS_RUNNING
app_is_running = self._state == AppSessionState.APP_IS_RUNNING
if app_is_running != app_was_running:
self._enqueue_session_state_changed_message()
def _enqueue_session_state_changed_message(self) -> None:
msg = ForwardMsg()
msg.session_state_changed.run_on_save = self._run_on_save
msg.session_state_changed.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
self.enqueue(msg)
def _enqueue_file_change_message(self) -> None:
LOGGER.debug("Enqueuing script_changed message (id=%s)", self.id)
msg = ForwardMsg()
msg.session_event.script_changed_on_disk = True
self.enqueue(msg)
def _enqueue_new_session_message(self) -> None:
msg = ForwardMsg()
msg.new_session.script_run_id = _generate_scriptrun_id()
msg.new_session.name = self._session_data.name
msg.new_session.main_script_path = self._session_data.main_script_path
_populate_config_msg(msg.new_session.config)
_populate_theme_msg(msg.new_session.custom_theme)
# Immutable session data. We send this every time a new session is
# started, to avoid having to track whether the client has already
# received it. It does not change from run to run; it's up to the
# to perform one-time initialization only once.
imsg = msg.new_session.initialize
_populate_user_info_msg(imsg.user_info)
imsg.environment_info.streamlit_version = __version__
imsg.environment_info.python_version = ".".join(map(str, sys.version_info))
imsg.session_state.run_on_save = self._run_on_save
imsg.session_state.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
imsg.command_line = self._session_data.command_line
imsg.session_id = self.id
self.enqueue(msg)
def _enqueue_script_finished_message(
self, status: "ForwardMsg.ScriptFinishedStatus.ValueType"
) -> None:
"""Enqueue a script_finished ForwardMsg."""
msg = ForwardMsg()
msg.script_finished = status
self.enqueue(msg)
def handle_git_information_request(self) -> None:
msg = ForwardMsg()
try:
from streamlit.git_util import GitRepo
repo = GitRepo(self._session_data.main_script_path)
repo_info = repo.get_repo_info()
if repo_info is None:
return
repository_name, branch, module = repo_info
msg.git_info_changed.repository = repository_name
msg.git_info_changed.branch = branch
msg.git_info_changed.module = module
msg.git_info_changed.untracked_files[:] = repo.untracked_files
msg.git_info_changed.uncommitted_files[:] = repo.uncommitted_files
if repo.is_head_detached:
msg.git_info_changed.state = GitInfo.GitStates.HEAD_DETACHED
elif len(repo.ahead_commits) > 0:
msg.git_info_changed.state = GitInfo.GitStates.AHEAD_OF_REMOTE
else:
msg.git_info_changed.state = GitInfo.GitStates.DEFAULT
self.enqueue(msg)
except Exception as e:
# Users may never even install Git in the first place, so this
# error requires no action. It can be useful for debugging.
LOGGER.debug("Obtaining Git information produced an error", exc_info=e)
def handle_rerun_script_request(
self, client_state: Optional[ClientState] = None
) -> None:
"""Tell the ScriptRunner to re-run its script.
Parameters
----------
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ClientState protobuf to run the script with, or None
to use previous client state.
"""
self.request_rerun(client_state)
def handle_stop_script_request(self) -> None:
"""Tell the ScriptRunner to stop running its script."""
self._enqueue_script_request(ScriptRequest.STOP)
def handle_clear_cache_request(self) -> None:
"""Clear this app's cache.
Because this cache is global, it will be cleared for all users.
"""
legacy_caching.clear_cache()
caching.memo.clear()
caching.singleton.clear()
self._session_state.clear_state()
def handle_set_run_on_save_request(self, new_value: bool) -> None:
"""Change our run_on_save flag to the given value.
The browser will be notified of the change.
Parameters
----------
new_value : bool
New run_on_save value
"""
self._run_on_save = new_value
self._enqueue_session_state_changed_message()
def _enqueue_script_request(self, request: ScriptRequest, data: Any = None) -> None:
"""Enqueue a ScriptEvent into our ScriptEventQueue.
If a script thread is not already running, one will be created
to handle the event.
Parameters
----------
request : ScriptRequest
The type of request.
data : Any
Data associated with the request, if any.
"""
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
LOGGER.warning("Discarding %s request after shutdown" % request)
return
self._script_request_queue.enqueue(request, data)
self._maybe_create_scriptrunner()
def _maybe_create_scriptrunner(self) -> None:
"""Create a new ScriptRunner if we have unprocessed script requests.
This is called every time a ScriptRequest is enqueued, and also
after a ScriptRunner shuts down, in case new requests were enqueued
during its termination.
This function should only be called on the main thread.
"""
if (
self._state == AppSessionState.SHUTDOWN_REQUESTED
or self._scriptrunner is not None
or not self._script_request_queue.has_request
):
return
# Create the ScriptRunner, attach event handlers, and start it
self._scriptrunner = ScriptRunner(
session_id=self.id,
session_data=self._session_data,
enqueue_forward_msg=self.enqueue,
client_state=self._client_state,
request_queue=self._script_request_queue,
session_state=self._session_state,
uploaded_file_mgr=self._uploaded_file_mgr,
)
self._scriptrunner.on_event.connect(self._on_scriptrunner_event)
self._scriptrunner.start()
def _populate_config_msg(msg: Config) -> None:
msg.gather_usage_stats = config.get_option("browser.gatherUsageStats")
msg.max_cached_message_age = config.get_option("global.maxCachedMessageAge")
msg.mapbox_token = config.get_option("mapbox.token")
msg.allow_run_on_save = config.get_option("server.allowRunOnSave")
msg.hide_top_bar = config.get_option("ui.hideTopBar")
def _populate_theme_msg(msg: CustomThemeConfig) -> None:
enum_encoded_options = {"base", "font"}
theme_opts = config.get_options_for_section("theme")
if not any(theme_opts.values()):
return
for option_name, option_val in theme_opts.items():
if option_name not in enum_encoded_options and option_val is not None:
setattr(msg, to_snake_case(option_name), option_val)
# NOTE: If unset, base and font will default to the protobuf enum zero
# values, which are BaseTheme.LIGHT and FontFamily.SANS_SERIF,
# respectively. This is why we both don't handle the cases explicitly and
# also only log a warning when receiving invalid base/font options.
base_map = {
"light": msg.BaseTheme.LIGHT,
"dark": msg.BaseTheme.DARK,
}
base = theme_opts["base"]
if base is not None:
if base not in base_map:
LOGGER.warning(
f'"{base}" is an invalid value for theme.base.'
f" Allowed values include {list(base_map.keys())}."
' Setting theme.base to "light".'
)
else:
msg.base = base_map[base]
font_map = {
"sans serif": msg.FontFamily.SANS_SERIF,
"serif": msg.FontFamily.SERIF,
"monospace": msg.FontFamily.MONOSPACE,
}
font = theme_opts["font"]
if font is not None:
if font not in font_map:
LOGGER.warning(
f'"{font}" is an invalid value for theme.font.'
f" Allowed values include {list(font_map.keys())}."
' Setting theme.font to "sans serif".'
)
else:
msg.font = font_map[font]
def _populate_user_info_msg(msg: UserInfo) -> None:
msg.installation_id = Installation.instance().installation_id
msg.installation_id_v3 = Installation.instance().installation_id_v3
if Credentials.get_current().activation:
msg.email = Credentials.get_current().activation.email
else:
msg.email = ""
|
def concat(s1, s2):
if not s1:
return s2
return s1[0:1] + concat(s1[1:], s2)
def reverse(s1):
if not s1:
return s1
return concat(reverse(s1[1:]), s1[0])
def prefix(s1, s2):
if s1 == '' and s2 != '':
return True
if s1[:1] == s2[:1]:
return prefix(s1[1:], s2[1:])
return False
s1 = input()
s2 = input()
print(concat(s1, s2))
print(reverse(s1))
print(prefix(s1, s2))
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_sdk_model.interfaces.audioplayer.caption_data import CaptionData as CaptionData_e119f120
class Stream(object):
"""
:param expected_previous_token:
:type expected_previous_token: (optional) str
:param token:
:type token: (optional) str
:param url:
:type url: (optional) str
:param offset_in_milliseconds:
:type offset_in_milliseconds: (optional) int
:param caption_data:
:type caption_data: (optional) ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData
"""
deserialized_types = {
'expected_previous_token': 'str',
'token': 'str',
'url': 'str',
'offset_in_milliseconds': 'int',
'caption_data': 'ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData'
} # type: Dict
attribute_map = {
'expected_previous_token': 'expectedPreviousToken',
'token': 'token',
'url': 'url',
'offset_in_milliseconds': 'offsetInMilliseconds',
'caption_data': 'captionData'
} # type: Dict
supports_multiple_types = False
def __init__(self, expected_previous_token=None, token=None, url=None, offset_in_milliseconds=None, caption_data=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[int], Optional[CaptionData_e119f120]) -> None
"""
:param expected_previous_token:
:type expected_previous_token: (optional) str
:param token:
:type token: (optional) str
:param url:
:type url: (optional) str
:param offset_in_milliseconds:
:type offset_in_milliseconds: (optional) int
:param caption_data:
:type caption_data: (optional) ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData
"""
self.__discriminator_value = None # type: str
self.expected_previous_token = expected_previous_token
self.token = token
self.url = url
self.offset_in_milliseconds = offset_in_milliseconds
self.caption_data = caption_data
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Stream):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
# -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
from __future__ import print_function
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'inline': 'module://ipykernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://ipykernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PoolOperations:
"""PoolOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.batch.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_batch_account(
self,
resource_group_name: str,
account_name: str,
maxresults: Optional[int] = None,
select: Optional[str] = None,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.ListPoolsResult"]:
"""Lists all of the pools in the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the response.
:type maxresults: int
:param select: Comma separated list of properties that should be returned. e.g.
"properties/provisioningState". Only top level properties under properties/ are valid for
selection.
:type select: str
:param filter: OData filter expression. Valid properties for filtering are:
name
properties/allocationState
properties/allocationStateTransitionTime
properties/creationTime
properties/provisioningState
properties/provisioningStateTransitionTime
properties/lastModified
properties/vmSize
properties/interNodeCommunication
properties/scaleSettings/autoScale
properties/scaleSettings/fixedScale.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListPoolsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.batch.models.ListPoolsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListPoolsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_batch_account.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListPoolsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_batch_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> "_models.Pool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Pool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["_models.Pool"]:
"""Creates a new pool inside the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:param parameters: Additional parameters for pool creation.
:type parameters: ~azure.mgmt.batch.models.Pool
:param if_match: The entity state (ETag) version of the pool to update. A value of "*" can be
used to apply the operation only if the pool already exists. If omitted, this operation will
always be applied.
:type if_match: str
:param if_none_match: Set to '*' to allow a new pool to be created, but to prevent updating an
existing pool. Other values will be ignored.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Pool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.batch.models.Pool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
**kwargs
) -> "_models.Pool":
"""Updates the properties of an existing pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:param parameters: Pool properties that should be updated. Properties that are supplied will be
updated, any property not supplied will be unchanged.
:type parameters: ~azure.mgmt.batch.models.Pool
:param if_match: The entity state (ETag) version of the pool to update. This value can be
omitted or set to "*" to apply the operation unconditionally.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Pool')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
"""Gets information about the specified pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def disable_auto_scale(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
"""Disables automatic scaling for a pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.disable_auto_scale.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
disable_auto_scale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/disableAutoScale'} # type: ignore
async def stop_resize(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
"""Stops an ongoing resize operation on the pool.
This does not restore the pool to its previous state before the resize operation: it only stops
any further changes being made, and the pool maintains its current state. After stopping, the
pool stabilizes at the number of nodes it was at when the stop operation was done. During the
stop operation, the pool allocation state changes first to stopping and then to steady. A
resize operation need not be an explicit resize pool request; this API can also be used to halt
the initial sizing of the pool when it is created.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.stop_resize.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
stop_resize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/stopResize'} # type: ignore
|
# -*- coding: utf-8 -*-
"""The VFS back-end CLI arguments helper."""
from __future__ import unicode_literals
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class VFSBackEndArgumentsHelper(interface.ArgumentsHelper):
"""VFS back-end CLI arguments helper."""
NAME = 'vfs_backend'
DESCRIPTION = 'dfVFS back-end command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--vfs_back_end', '--vfs-back-end', dest='vfs_back_end',
choices=['auto', 'fsext', 'fshfs', 'fsntfs', 'tsk'], action='store',
metavar='TYPE', default='auto', help=(
'The preferred dfVFS back-end: "auto", "fsext", "fshfs", "fsntfs" '
'or "tsk".'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
vfs_back_end = cls._ParseStringOption(options, 'vfs_back_end')
setattr(configuration_object, '_vfs_back_end', vfs_back_end)
manager.ArgumentHelperManager.RegisterHelper(VFSBackEndArgumentsHelper)
|
from .constants import ALL_ROLES, DB_ROLE, WEB_ROLE
from .database import setup_db
from .django import update_db, update_python_libs
from .nginx import stop_nginx, start_nginx
from .ssh import setup_ssh_key
from .supervisor import stop_supervisor, start_supervisor, update_supervisor
from .utils import get_ip
from .webserver import setup_web
from fabric.colors import green
from fabric.api import *
from .git import get_source
from .nginx import update_nginx
import cuisine
COMMON_PACKAGES = [
'subversion', 'mercurial', 'git-core', 'vim', 'python-dev', 'ufw',
'python-setuptools', 'htop', 'ntp', 'colordiff', 'python-software-properties',
'psmisc',
'libpq-dev', # postgres
]
@task
@roles(DB_ROLE)
@runs_once
def set_database_ip(interface='eth1'):
"""Set the ip of the database."""
env.db_ip = get_ip(interface)
@task
@roles(WEB_ROLE)
@runs_once
def set_web_server_ips(interface='eth1'):
"""Set the ips of the webservers."""
env.webserver_internal_ips = [get_ip(interface),]
@task
def set_port(port):
"""Set the port to use for ssh connections."""
env.port = port
@task
@roles(ALL_ROLES)
def setup_common():
"""Set common packages."""
print(green("Running setup_common.........."))
execute(setup_ssh_key)
cuisine.package_install(COMMON_PACKAGES, True)
sudo('yes | ufw enable')
sudo('ufw logging on')
sudo('ufw allow %(port)s' % env)
sudo('ufw limit ssh')
sudo('ufw default deny')
@task
@roles(WEB_ROLE)
def setup_run_dirs():
for d in (env.log_location, env.socket_location):
with settings(warn_only=True):
sudo('mkdir %s' % d)
sudo('chown -R %s: %s' % (env.deploy_user, d))
@task
def setup():
"""Setup the servers."""
execute(setup_db)
execute(setup_web)
execute(update)
@task
def update():
"""Update the servers w/the latest source code + migrations."""
execute(stop_supervisor)
execute(stop_nginx)
execute(get_source)
execute(update_python_libs)
execute(update_db)
execute(update_supervisor)
execute(update_nginx)
execute(start_supervisor)
execute(start_nginx)
|
# TunaBot Ext - Help
from discord.ext import commands
import discord
from aiofiles import open as async_open
from ujson import load, loads
from data import is_admin
JSON_PATH = "data/help.json"
class Help(commands.Cog):
def __init__(self, bot):
self.bot, self.tuna = bot, bot.data
with open(JSON_PATH, 'r') as f:
self.data = load(f)
@commands.command(aliases=["rh"])
@is_admin
async def reloadhelp(self, ctx):
async with async_open(JSON_PATH, "r") as f:
self.data = loads(f.read())
await ctx.reply("Ok")
@commands.command()
async def help(self, ctx, *, cmd=None):
title, description = None, None
if cmd:
keys = []
for category in self.data:
if cmd == category:
keys.append(category)
break
for c in self.data[category]:
if c == cmd:
keys.append(category)
keys.append(c)
break
if len(keys) == 2:
title = f"{cmd}のHELP"
description = self.data[keys[0]][keys[1]]
elif len(keys) == 1:
title = f"{cmd}のHELP"
description = "\n".join(f"`{key}`" for key in self.data[category])
else:
title, description = "HELP", "見つかりませんでした。"
else:
title, description = "HELP", "\n".join(f"`{key}`" for key in self.data)
await ctx.reply(embed=discord.Embed(title=title, description=description))
def setup(bot):
bot.add_cog(Help(bot))
|
import os
from requests.utils import requote_uri
from pyrogram import Client, filters
Bot = Client(
"Requote-URL-Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
@Bot.on_message(filters.text)
async def filter(bot, update):
await update.reply_text(
text=f"`{requote_uri(update.text)}`\n\nMade by @FayasNoushad",
disable_web_page_preview=True,
quote=True
)
Bot.run()
|
""" This is magic glue for integrating the frontend and backend.
This is NOT the place for backend customizations. Go to
api/historic_hebrew_dates_ui/settings.py instead.
"""
import os.path as op
here = op.dirname(op.abspath(__file__))
# First, import the standard backend settings. This requires some
# magic because the backend directory itself is not a Python package.
# Imitated from https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
# or
# https://stackoverflow.com/a/29855240
# (respectively for Python >= 3.5 and Python 3.4)
import sys
from importlib import util, machinery
settings_name = 'settings'
settings_path = op.join(here, 'api', 'historic_hebrew_dates_ui', 'settings.py')
if sys.version_info >= (3, 5):
spec = util.spec_from_file_location(settings_name, settings_path)
settings = util.module_from_spec(spec)
spec.loader.exec_module(settings)
else:
settings = machinery.SourceFileLoader(settings_name, settings_path).load_module()
sys.modules[settings_name] = settings
from settings import *
# Next, augment the settings to make the backend aware of the frontend.
STATICFILES_DIRS += [
op.join(here, 'web-ui', 'dist'),
op.join(here, 'web-ui', 'node_modules'),
]
PROXY_FRONTEND = "http://localhost:4200"
|
# Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from oslo_utils import importutils
from delfin import exception
from delfin.common import constants
class AlertHandlerTestCase(unittest.TestCase):
ALERT_HANDLER_CLASS = 'delfin.drivers.dell_emc.vmax.alert_handler' \
'.snmp_alerts.AlertHandler'
def _get_alert_handler(self):
alert_handler_class = importutils.import_class(
self.ALERT_HANDLER_CLASS)
alert_handler = alert_handler_class()
return alert_handler
def _get_fake_alert_info(self):
alert_info = {
'1.3.6.1.3.94.1.11.1.3.0': 79,
'1.3.6.1.3.94.1.6.1.20.0': '000192601409',
'1.3.6.1.3.94.1.11.1.7.0': 'topology',
'1.3.6.1.3.94.1.11.1.9.0': 'Symmetrix 000192601409 FastSRP '
'SRP_1 : Remote (SRDF) diagnostic '
'event trace triggered.',
'1.3.6.1.3.94.1.11.1.6.0': '6',
'1.3.6.1.3.94.1.6.1.3.0': 'storage-subsystem',
'1.3.6.1.4.1.1139.3.8888.1.0.0': 'symmetrix',
'1.3.6.1.4.1.1139.3.8888.2.0.0': '1050',
'1.3.6.1.4.1.1139.3.8888.3.0.0': '1051',
'1.3.6.1.4.1.1139.3.8888.4.0.0': 'SRP_1'}
return alert_info
def test_parse_alert_with_all_necessary_info(self):
""" Success flow with all necessary parameters"""
alert_handler_inst = self._get_alert_handler()
alert = self._get_fake_alert_info()
expected_alert_model = {
'alert_id': alert['1.3.6.1.4.1.1139.3.8888.2.0.0'],
'alert_name': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG',
'severity': constants.Severity.WARNING,
'category': constants.Category.NOT_SPECIFIED,
'type': constants.EventType.EQUIPMENT_ALARM,
'sequence_number': alert['1.3.6.1.3.94.1.11.1.3.0'],
'serial_number': '000192601409',
'description': alert['1.3.6.1.3.94.1.11.1.9.0'],
'recovery_advice': 'None',
'resource_type': alert['1.3.6.1.3.94.1.6.1.3.0'],
'location': 'Array id=000192601409,'
'Component type=Symmetrix Disk '
'Group,'
'Component name=SRP_1,'
'Event source=symmetrix',
}
context = {}
alert_model = alert_handler_inst.parse_alert(context, alert)
# occur_time depends on current time
# Verify that all other fields are matching
expected_alert_model['occur_time'] = alert_model['occur_time']
self.assertDictEqual(expected_alert_model, alert_model)
def test_parse_alert_without_mandatory_info(self):
""" Error flow with some mandatory parameters missing"""
alert_handler_inst = self._get_alert_handler()
context = {}
alert = self._get_fake_alert_info()
alert['1.3.6.1.3.94.1.11.1.6.0'] = ''
self.assertRaisesRegex(exception.InvalidInput, "Mandatory information "
"connUnitEventSeverity"
" missing",
alert_handler_inst.parse_alert, context, alert)
|
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import functools
import os
import textwrap
required_conan_version = ">=1.43.0"
class Hdf5Conan(ConanFile):
name = "hdf5"
description = "HDF5 is a data model, library, and file format for storing and managing data."
license = "BSD-3-Clause"
topics = ("hdf5", "hdf", "data")
homepage = "https://portal.hdfgroup.org/display/HDF5/HDF5"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_cxx": [True, False],
"hl": [True, False],
"threadsafe": [True, False],
"with_zlib": [True, False],
"szip_support": [None, "with_libaec", "with_szip"],
"szip_encoding": [True, False],
"parallel": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"enable_cxx": True,
"hl": True,
"threadsafe": False,
"with_zlib": True,
"szip_support": None,
"szip_encoding": False,
"parallel": False,
}
generators = "cmake"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.enable_cxx:
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.options.enable_cxx or self.options.hl or (self.settings.os == "Windows" and not self.options.shared):
del self.options.threadsafe
if not bool(self.options.szip_support):
del self.options.szip_encoding
def requirements(self):
if self.options.with_zlib:
self.requires("zlib/1.2.12")
if self.options.szip_support == "with_libaec":
self.requires("libaec/1.0.6")
elif self.options.szip_support == "with_szip":
self.requires("szip/2.1.1")
if self.options.parallel:
self.requires("openmpi/4.1.0")
def validate(self):
if hasattr(self, "settings_build") and tools.cross_building(self, skip_x64_x86=True):
# While building it runs some executables like H5detect
raise ConanInvalidConfiguration("Current recipe doesn't support cross-building (yet)")
if self.options.parallel:
if self.options.enable_cxx:
raise ConanInvalidConfiguration("Parallel and C++ options are mutually exclusive")
if self.options.get_safe("threadsafe", False):
raise ConanInvalidConfiguration("Parallel and Threadsafe options are mutually exclusive")
if self.options.szip_support == "with_szip" and self.options.szip_encoding and \
not self.options["szip"].enable_encoding:
raise ConanInvalidConfiguration("encoding must be enabled in szip dependency (szip:enable_encoding=True)")
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
# Do not force PIC
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"set (CMAKE_POSITION_INDEPENDENT_CODE ON)", "")
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["HDF5_EXTERNALLY_CONFIGURED"] = True
cmake.definitions["HDF5_EXTERNAL_LIB_PREFIX"] = ""
cmake.definitions["HDF5_USE_FOLDERS"] = False
cmake.definitions["HDF5_NO_PACKAGES"] = True
cmake.definitions["ALLOW_UNSUPPORTED"] = False
if tools.Version(self.version) >= "1.10.6":
cmake.definitions["ONLY_SHARED_LIBS"] = self.options.shared
cmake.definitions["BUILD_STATIC_EXECS"] = False
cmake.definitions["HDF5_ENABLE_COVERAGE"] = False
cmake.definitions["HDF5_ENABLE_USING_MEMCHECKER"] = False
if tools.Version(self.version) >= "1.10.0":
cmake.definitions["HDF5_MEMORY_ALLOC_SANITY_CHECK"] = False
if tools.Version(self.version) >= "1.10.5":
cmake.definitions["HDF5_ENABLE_PREADWRITE"] = True
cmake.definitions["HDF5_ENABLE_DEPRECATED_SYMBOLS"] = True
cmake.definitions["HDF5_BUILD_GENERATORS"] = False
cmake.definitions["HDF5_ENABLE_TRACE"] = False
if self.settings.build_type == "Debug":
cmake.definitions["HDF5_ENABLE_INSTRUMENT"] = False # Option?
cmake.definitions["HDF5_ENABLE_PARALLEL"] = self.options.parallel
cmake.definitions["HDF5_ENABLE_Z_LIB_SUPPORT"] = self.options.with_zlib
cmake.definitions["HDF5_ENABLE_SZIP_SUPPORT"] = bool(self.options.szip_support)
if bool(self.options.szip_support):
cmake.definitions["CONAN_SZIP_LIBNAME"] = self._get_szip_lib() # this variable is added by conanize-link-szip*.patch
cmake.definitions["HDF5_ENABLE_SZIP_ENCODING"] = self.options.get_safe("szip_encoding", False)
cmake.definitions["HDF5_PACKAGE_EXTLIBS"] = False
cmake.definitions["HDF5_ENABLE_THREADSAFE"] = self.options.get_safe("threadsafe", False)
cmake.definitions["HDF5_ENABLE_DEBUG_APIS"] = False # Option?
cmake.definitions["BUILD_TESTING"] = False
cmake.definitions["HDF5_INSTALL_INCLUDE_DIR"] = os.path.join(self.package_folder, "include", "hdf5")
cmake.definitions["HDF5_BUILD_TOOLS"] = False
cmake.definitions["HDF5_BUILD_EXAMPLES"] = False
cmake.definitions["HDF5_BUILD_HL_LIB"] = self.options.hl
cmake.definitions["HDF5_BUILD_FORTRAN"] = False
cmake.definitions["HDF5_BUILD_CPP_LIB"] = self.options.enable_cxx
if tools.Version(self.version) >= "1.10.0":
cmake.definitions["HDF5_BUILD_JAVA"] = False
cmake.configure(build_folder=self._build_subfolder)
return cmake
def _get_szip_lib(self):
return {
"with_libaec": "libaec",
"with_szip": "szip",
}.get(str(self.options.szip_support))
def _components(self):
hdf5_requirements = []
if self.options.with_zlib:
hdf5_requirements.append("zlib::zlib")
if self.options.szip_support == "with_libaec":
hdf5_requirements.append("libaec::libaec")
elif self.options.szip_support == "with_szip":
hdf5_requirements.append("szip::szip")
if self.options.parallel:
hdf5_requirements.append("openmpi::openmpi")
return {
"hdf5_c": {"component": "C", "alias_target": "hdf5", "requirements": hdf5_requirements},
"hdf5_hl": {"component": "HL", "alias_target": "hdf5_hl", "requirements": ["hdf5_c"]},
"hdf5_cpp": {"component": "CXX", "alias_target": "hdf5_cpp", "requirements": ["hdf5_c"]},
"hdf5_hl_cpp": {"component": "HL_CXX", "alias_target": "hdf5_hl_cpp", "requirements": ["hdf5_c", "hdf5_cpp", "hdf5_hl"]},
}
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets, is_parallel):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
# add the additional hdf5_hl_cxx target when both CXX and HL components are specified
content += textwrap.dedent("""\
if(TARGET HDF5::HL AND TARGET HDF5::CXX AND NOT TARGET hdf5::hdf5_hl_cpp)
add_library(hdf5::hdf5_hl_cpp INTERFACE IMPORTED)
set_property(TARGET hdf5::hdf5_hl_cpp PROPERTY INTERFACE_LINK_LIBRARIES HDF5::HL_CXX)
endif()
""")
content += textwrap.dedent("set(HDF5_IS_PARALLEL {})".format("ON" if is_parallel else "OFF"))
tools.save(module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake",
"conan-official-{}-targets.cmake".format(self.name))
def package(self):
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
os.remove(os.path.join(self.package_folder, "lib", "libhdf5.settings"))
# Mimic the official CMake FindHDF5 targets. HDF5::HDF5 refers to the global target as per conan,
# but component targets have a lower case namespace prefix. hdf5::hdf5 refers to the C library only
components = self._components()
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"hdf5::{}".format(component["alias_target"]): "HDF5::{}".format(component["component"]) for component in components.values()},
self.options.get_safe("parallel", False)
)
def package_info(self):
def add_component(component_name, component, alias_target, requirements):
def _config_libname(lib):
if self.settings.os == "Windows" and self.settings.compiler != "gcc" and not self.options.shared:
lib = "lib" + lib
if self.settings.build_type == "Debug":
debug_postfix = "_D" if self.settings.os == "Windows" else "_debug"
return lib + debug_postfix
# See config/cmake_ext_mod/HDFMacros.cmake
return lib
self.cpp_info.components[component_name].set_property("cmake_target_name", f"hdf5::{alias_target}")
self.cpp_info.components[component_name].set_property("pkg_config_name", alias_target)
self.cpp_info.components[component_name].libs = [_config_libname(alias_target)]
self.cpp_info.components[component_name].requires = requirements
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components[component_name].names["cmake_find_package"] = component
self.cpp_info.components[component_name].names["cmake_find_package_multi"] = component
self.cpp_info.components[component_name].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components[component_name].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("cmake_file_name", "HDF5")
self.cpp_info.set_property("cmake_target_name", "HDF5::HDF5")
self.cpp_info.set_property("pkg_config_name", "hdf5-all-do-not-use") # to avoid conflict with hdf5_c component
components = self._components()
add_component("hdf5_c", **components["hdf5_c"])
self.cpp_info.components["hdf5_c"].includedirs.append(os.path.join("include", "hdf5"))
if self.settings.os == "Linux":
self.cpp_info.components["hdf5_c"].system_libs.extend(["dl", "m"])
if self.options.get_safe("threadsafe"):
self.cpp_info.components["hdf5_c"].system_libs.append("pthread")
if self.options.shared:
self.cpp_info.components["hdf5_c"].defines.append("H5_BUILT_AS_DYNAMIC_LIB")
if self.options.get_safe("enable_cxx"):
add_component("hdf5_cpp", **components["hdf5_cpp"])
if self.options.get_safe("hl"):
add_component("hdf5_hl", **components["hdf5_hl"])
if self.options.get_safe("enable_cxx"):
add_component("hdf5_hl_cpp", **components["hdf5_hl_cpp"])
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "HDF5"
self.cpp_info.names["cmake_find_package_multi"] = "HDF5"
|
from eventsourcing.domain import Aggregate, event
from uuid import uuid5, NAMESPACE_URL
class Account(Aggregate):
"""A simple-as-can-be bank account"""
@event('Created')
def __init__(self):
self.balance = 0
@event('Credited')
def credit(self, amount: int):
self.balance += amount
@event('Debited')
def debit(self, amount: int):
self.balance -= amount
class Ledger(Aggregate):
"""A simple-as-can-be Ledger to track net movements across all accounts"""
def __init__(self, name):
self.name = name
self.transaction_count = 0
self.balance = 0
@classmethod
def create_id(cls, name):
"""Enable predictable IDs so that a Ledger can be retrieved
using its name - even if its ID isn't known
"""
return uuid5(NAMESPACE_URL, f'/ledgers/{name}')
@event('TransactionAdded')
def add_transaction(self, amount: int):
self.transaction_count += 1
self.balance += amount
def get_balance(self):
return self.balance
def get_transaction_count(self):
return self.transaction_count
|
"""
Takes a [Notion.so](https://notion.so) export .zip and enhances it
"""
import tempfile
import sys
import os
import time
import re
import argparse
import zipfile
import urllib.parse
from datetime import datetime
from pathlib import Path
import backoff
import requests
from emoji_extractor.extract import Extractor as EmojiExtractor
from notion.client import NotionClient
from notion.block import PageBlock
def noteNameRewrite(nCl, originalNameNoExt):
"""
Takes original name (with no extension) and renames it using the Notion ID
and data from Notion itself
* Removes the Notion ID
* Looks up the Notion ID for it's icon, and appends if we can find it
"""
match = re.search(r"(.+?) ([0-9a-f]{32})$", originalNameNoExt)
if not match:
return (None, None, None)
notionId = match[2]
# Query notion for the ID
#print(f"Fetching Notion ID '{notionId}' for '{originalNameNoExt}'")
try:
pageBlock = nCl.get_block(notionId)
except requests.exceptions.HTTPError:
print(f"Failed to retrieve ID {notionId}")
return (None, None, None)
# The ID might not be a PageBlock (like when a note with no child PageBlocks
# has an image in it, generating a folder, Notion uses the ID of the first
# ImageBlock, maybe a bug on Notion's end? lol)
if not isinstance(pageBlock, PageBlock):
print(f"Block at ID {notionId}, was not PageBlock. Was {type(pageBlock).__name__}")
if hasattr(pageBlock, 'parent') and pageBlock.parent is not None:
# Try traversing up the parents for the first page
while hasattr(pageBlock, 'parent') and not isinstance(pageBlock, PageBlock):
pageBlock = pageBlock.parent
if isinstance(pageBlock, PageBlock):
print(f"Using some .parent as PageBlock")
elif hasattr(pageBlock, 'children') and pageBlock.children is not None:
# Try to find a PageBlock in the children, but only use if one single one exists
pageBlockChildren = [c for c in pageBlock.children if isinstance(c, PageBlock)]
if len(pageBlockChildren) != 1:
print(f"Ambiguous .children, contained {len(pageBlockChildren)} chlidren PageBlocks")
else:
print(f"Using .children[0] as PageBlock")
pageBlock = pageBlockChildren[0]
if not isinstance(pageBlock, PageBlock):
print(f"Failed to retrieve PageBlock for ID {notionId}")
return (None, None, None)
#print(f"Found parent '{type(pageBlock).__name__}' instead")
# Check for name truncation
newName = match[1]
if len(match[1]) == 50:
# Use full name instead, invalids replaced with " ", like the normal export
# TODO: These are just Windows reserved characters
# TODO: 200 was just a value to stop Windows from complaining
newName = re.sub(r"[\\/?:*\"<>|]", " ", pageBlock.title)
if len(newName) > 200:
print(f"'{newName}' too long, truncating to 200")
newName = newName[0:200]
# Add icon to the front if it's there and usable
icon = pageBlock.icon
if icon and EmojiExtractor().big_regex.match(icon): # A full match of a single emoji, might be None or an https://aws.amazon uploaded icon
newName = f"{icon} {newName}"
# Also get the times to set the file to
createdTime = datetime.fromtimestamp(int(pageBlock._get_record_data()["created_time"])/1000)
lastEditedTime = datetime.fromtimestamp(int(pageBlock._get_record_data()["last_edited_time"])/1000)
return (newName, createdTime, lastEditedTime)
class NotionExportRenamer:
"""
Holds state information for renaming a single Notion.so export. Allows it to avoid
naming collisions and store other state
"""
def __init__(self, notionClient, rootPath):
self.notionClient = notionClient
self.rootPath = rootPath
# Dict containing all the paths we've renamed and what they were renamed to
# (plus createdtime and lastEditedTime). Strings with relative directories to
# rootPath mapped to 3 tuples returned from noteNameRewrite
self._renameCache = {}
# Dict containing keys where it is an unrenamed path with the last part being
# renamed mapped to True. Used to see if other files in the folder might
# have the same name and to act accordingly
self._collisionCache = {}
def renameAndTimesWithNotion(self, pathToRename):
"""
Takes an original on file-system path and rewrites _just the basename_. It
collects rename operations for speed and collision prevention (as some renames
will cause the same name to occur)
@param {string} realPath The path to rename the basename of. Must point to an
actual unrenamed file/folder on disk rooted at self.rootPath so we can scan around it
@returns {tuple} 3 tuple of new name, created time and modified time
"""
if pathToRename in self._renameCache:
return self._renameCache[pathToRename]
path, name = os.path.split(pathToRename)
nameNoExt, ext = os.path.splitext(name)
newNameNoExt, createdTime, lastEditedTime = noteNameRewrite(self.notionClient, nameNoExt)
if not newNameNoExt: # No rename happened, probably no ID in the name or not an .md file
self._renameCache[pathToRename] = (name, None, None)
else:
# Merge files into folders in path at same name if that folder exists
if ext == '.md':
p = Path(os.path.join(self.rootPath, path, nameNoExt))
if p.exists() and p.is_dir():
# NOTE: newNameNoExt can contain a '/' for path joining later!
newNameNoExt = os.path.join(newNameNoExt, "!index")
# Check to see if name collides
if os.path.join(path, newNameNoExt) in self._collisionCache:
# If it does, try progressive (i) until a new one is found
i = 1
collidingNameNoExt = newNameNoExt
while os.path.join(path, newNameNoExt) in self._collisionCache:
newNameNoExt = f"{collidingNameNoExt} ({i})"
i += 1
self._renameCache[pathToRename] = (f"{newNameNoExt}{ext}", createdTime, lastEditedTime)
self._collisionCache[os.path.join(path, newNameNoExt)] = True
return self._renameCache[pathToRename]
def renameWithNotion(self, pathToRename):
"""
Takes an original on file-system path and rewrites _just the basename_. It
collects rename operations for speed and collision prevention (as some renames
will cause the same name to occur)
@param {string} pathToRename The path to rename the basename of. Must point to an
actual unrenamed file/folder on disk rooted at self.rootPath so we can scan around it
@returns {string} The new name
"""
return self.renameAndTimesWithNotion(pathToRename)[0]
def renamePathWithNotion(self, pathToRename):
"""
Renames all parts of a path
@param {string} pathToRename A real path on disk to a file or folder root at
self.rootPath. All pieces of the path will be renamed
"""
pathToRenameSplit = re.split(r"[\\/]", pathToRename)
paths = [os.path.join(*pathToRenameSplit[0:rpc + 1]) for rpc in range(len(pathToRenameSplit))]
return os.path.join(*[self.renameWithNotion(rp) for rp in paths])
def renamePathAndTimesWithNotion(self, pathToRename):
"""
Renames all parts of a path and return the created and lastEditedTime for the last
part of the path (the file)
@param {string} pathToRename A real path on disk to a file or folder root at
self.rootPath. All pieces of the path will be renamed
"""
newPath = self.renamePathWithNotion(os.path.dirname(pathToRename))
newName, createdTime, lastEditedTime = self.renameAndTimesWithNotion(pathToRename)
return (os.path.join(newPath, newName), createdTime, lastEditedTime)
def mdFileRewrite(renamer, mdFilePath, mdFileContents=None, removeTopH1=False, rewritePaths=False):
"""
Takes a Notion exported md file and rewrites parts of it
@param {string} mdFilePath String to the markdown file that's being editted, rooted at
self.rootPath
@param {string} [mdFileContents=None] The contents of the markdown file, if not provided
we will read it manually
@param {boolean} [removeTopH1=False] Remove the title on the first line of the MD file?
@param {boolean} [rewritePaths=False] Rewrite the relative paths in the MD file (images and links)
using Notion file name rewriting
"""
if not mdFileContents:
raise NotImplementedError("TODO: Not passing mdFileContents is not implemented... please pass it ;w;")
newMDFileContents = mdFileContents
if removeTopH1:
lines = mdFileContents.split("\n")
newMDFileContents = "\n".join(lines[1:])
if rewritePaths:
# Notion link/images use relative paths to other notes, which we can't known without
# consulting the file tree and renaming (to handle duplicates and such)
# Notion links are also URL encoded
# Can't use finditer because we modify the string each time...
searchStartIndex = 0
while True:
m = re.search(r"!?\[.+?\]\(([\w\d\-._~:/?=#%\]\[@!$&'\(\)*+,;]+?)\)", newMDFileContents[searchStartIndex:])
if not m:
break
if re.search(r":/", m.group(1)):
searchStartIndex = searchStartIndex + m.end(1)
continue # Not a local file path
relTargetFilePath = urllib.parse.unquote(m.group(1))
# Convert the current MD file path and link target path to the renamed version
# (also taking into account potentially mdFilePath renames moving the directory)
mdDirPath = os.path.dirname(mdFilePath)
newTargetFilePath = renamer.renamePathWithNotion(os.path.join(mdDirPath, relTargetFilePath))
newMDDirPath = os.path.dirname(renamer.renamePathWithNotion(mdFilePath))
# Find the relative path to the newly converted paths for both files
newRelTargetFilePath = os.path.relpath(newTargetFilePath, newMDDirPath)
# Convert back to the way markdown expects the link to be
newRelTargetFilePath = re.sub(r"\\", "/", newRelTargetFilePath)
newRelTargetFilePath = urllib.parse.quote(newRelTargetFilePath)
# Replace the path in the original string with the new relative renamed
# target path
newMDFileContents = newMDFileContents[0:m.start(1) + searchStartIndex] + newRelTargetFilePath + newMDFileContents[m.end(1) + searchStartIndex:]
searchStartIndex = searchStartIndex + m.start(1) + len(newRelTargetFilePath)
return newMDFileContents
def rewriteNotionZip(notionClient, zipPath, outputPath=".", removeTopH1=False, rewritePaths=True):
"""
Takes a Notion .zip and prettifies the whole thing
* Removes all Notion IDs from end of names, folders and files
* Add icon to the start of folder/file name if Unicode character
* For files had content in Notion, move them inside the folder, and set the
name to something that will sort to the top
* Fix links inside of files
* Optionally remove titles at the tops of files
@param {NotionClient} notionClient The NotionClient to use to query Notion with
@param {string} zipPath The path to the Notion zip
@param {string} [outputPath="."] Optional output path, otherwise will use cwd
@param {boolean} [removeTopH1=False] To remove titles at the top of all the md files
@param {boolean} [rewritePaths=True] To rewrite all the links and images in the Markdown files too
@returns {string} Path to the output zip file
"""
with tempfile.TemporaryDirectory() as tmpDir:
# Unpack the whole thing first (probably faster than traversing it zipped, like with tar files)
print(f"Extracting '{zipPath}' temporarily...")
with zipfile.ZipFile(zipPath) as zf:
zf.extractall(tmpDir)
# Make new zip to begin filling
zipName = os.path.basename(zipPath)
newZipName = f"{zipName}.formatted"
newZipPath = os.path.join(outputPath, newZipName)
with zipfile.ZipFile(newZipPath, 'w', zipfile.ZIP_DEFLATED) as zf:
#Traverse over the files, renaming, modifying, and rewriting back to the zip
renamer = NotionExportRenamer(notionClient, tmpDir)
for tmpWalkDir, dirs, files in os.walk(tmpDir):
walkDir = os.path.relpath(tmpWalkDir, tmpDir)
for name in files:
realPath = os.path.join(tmpWalkDir, name)
relPath = os.path.join("" if walkDir == "." else walkDir, name) # Prevent paths starting with .\\ which, when written to the tar, do annoying things
# print(f"Reading '{root}' '{name}'")
# Rewrite the current path and get the times from Notion
print("---")
print(f"Working on '{relPath}'")
newPath, createdTime, lastEditedTime = renamer.renamePathAndTimesWithNotion(relPath)
if os.path.splitext(name)[1] == ".md":
# Grab the data from the file if md file
with open(realPath, "r", encoding='utf-8') as f:
mdFileData = f.read()
mdFileData = mdFileRewrite(renamer, relPath, mdFileContents=mdFileData, removeTopH1=removeTopH1, rewritePaths=rewritePaths)
print(f"Writing as '{newPath}' with time '{lastEditedTime}'")
zi = zipfile.ZipInfo(newPath, lastEditedTime.timetuple())
zf.writestr(zi, mdFileData)
else:
print(f"Writing as '{newPath}' with time from original export (not an .md file)")
zf.write(realPath, newPath)
return newZipPath
def cli(argv):
"""
CLI entrypoint, takes CLI arguments array
"""
parser = argparse.ArgumentParser(description='Prettifies Notion .zip exports')
parser.add_argument('token_v2', type=str,
help='the token for your Notion.so session')
parser.add_argument('zip_path', type=str,
help='the path to the Notion exported .zip file')
parser.add_argument('--output-path', action='store', type=str, default=".",
help='The path to output to, defaults to cwd')
parser.add_argument('--remove-title', action='store_true',
help='Removes the title that Notion adds. H1s at the top of every file')
parser.add_argument('--rewrite-paths', action='store_false', default=True,
help='Rewrite the paths in the Markdown files themselves to match file renaming')
args = parser.parse_args(argv)
startTime = time.time()
nCl = NotionClient(token_v2=args.token_v2)
nCl.get_block = backoff.on_exception(backoff.expo,
requests.exceptions.HTTPError,
max_tries=5,
)(nCl.get_block)
outFileName = rewriteNotionZip(nCl, args.zip_path, outputPath=args.output_path,
removeTopH1=args.remove_title, rewritePaths=args.rewrite_paths)
print("--- Finished in %s seconds ---" % (time.time() - startTime))
print(f"Output file written as '{outFileName}'")
if __name__ == "__main__":
cli(sys.argv[1:])
|
# https://github.com/RomanMichaelPaolucci/AI_Stock_Trading/blob/master/IBM.csv
import abc
import threading
import time
import pandas as pd
import numpy as np
from keras.layers import Dense
from keras.models import Sequential, model_from_json
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from alpaca_trade_api import REST
class AlpacaPaperSocket(REST):
def __init__(self):
super().__init__(
key_id='PKPO0ZH3XTVB336B7TEO',
secret_key='gcs4U2Hp/ACI4A5UwYjYugrPqB2odD/m40Zuz5qw',
base_url='https://paper-api.alpaca.markets'
)
class TradingSystem(abc.ABC):
def __init__(self, api, symbol, time_frame, system_id, system_label):
# Connect to api
# Connect to BrokenPipeError
# Save fields to class
self.api = api
self.symbol = symbol
self.time_frame = time_frame
self.system_id = system_id
self.system_label = system_label
thread = threading.Thread(target=self.system_loop)
thread.start()
@abc.abstractmethod
def place_buy_order(self):
pass
@abc.abstractmethod
def place_sell_order(self):
pass
@abc.abstractmethod
def system_loop(self):
pass
# Class to develop your AI portfolio manager
class PMModelDevelopment:
def __init__(self):
# Read your data in and split the dependent and independent
data = pd.read_csv('IBM.csv')
X = data['Delta Close']
y = data.drop(['Delta Close'], axis=1)
# Train test spit
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Create the sequential
network = Sequential()
# Create the structure of the neural network
network.add(Dense(1, input_shape=(1,), activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(1, activation='tanh'))
# Compile the model
network.compile(
optimizer='rmsprop',
loss='hinge',
metrics=['accuracy']
)
# Train the model
network.fit(X_train.values, y_train.values, epochs=100)
# Evaluate the predictions of the model
y_pred = network.predict(X_test.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y_test, y_pred))
# Save structure to json
model = network.to_json()
with open("model.json", "w") as json_file:
json_file.write(model)
# Save weights to HDF5
network.save_weights("weights.h5")
# AI Portfolio Manager
class PortfolioManagementModel:
def __init__(self):
# Data in to test that the saving of weights worked
data = pd.read_csv('IBM.csv')
X = data['Delta Close']
y = data.drop(['Delta Close'], axis=1)
# Read structure from json
json_file = open('model.json', 'r')
json = json_file.read()
json_file.close()
self.network = model_from_json(json)
# Read weights from HDF5
self.network.load_weights("weights.h5")
# Verify weights and structure are loaded
y_pred = self.network.predict(X.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y, y_pred))
PortfolioManagementModel()
# in implemenation create a vector to store data...
class PortfolioManagementSystem(TradingSystem):
def __init__(self):
super().__init__(AlpacaPaperSocket(), 'IBM', 86400, 1, 'AI_PM')
self.AI = PortfolioManagementModel()
def place_buy_order(self):
self.api.submit_order(
symbol='IBM',
qty=1,
side='buy',
type='market',
time_in_force='day',
)
def place_sell_order(self):
self.api.submit_order(
symbol='IBM',
qty=1,
side='sell',
type='market',
time_in_force='day',
)
def system_loop(self):
# Variables for weekly close
this_weeks_close = 0
last_weeks_close = 0
delta = 0
day_count = 0
while(True):
# Wait a day to request more data
time.sleep(1440)
# Request EoD data for IBM
data_req = self.api.get_barset('IBM', timeframe='1D', limit=1).df
# Construct dataframe to predict
x = pd.DataFrame(
data=[[
data_req['IBM']['close'][0]]], columns='Close'.split()
)
if(day_count == 7):
day_count = 0
last_weeks_close = this_weeks_close
this_weeks_close = x['Close']
delta = this_weeks_close - last_weeks_close
# AI choosing to buy, sell, or hold
if np.around(self.AI.network.predict([delta])) <= -.5:
self.place_sell_order()
elif np.around(self.AI.network.predict([delta]) >= .5):
self.place_buy_order()
PortfolioManagementSystem()
|
class Calculator:
""""
This calculator performs the following basic mathematical operations:
* Addition
* Subtraction
* Division
* Multiplication
* nth root of number
* exponent
Attributes
----------
__value : (int or float)
the calculator memory value
Methods
--------
input_validation(new_value):
validates that the value entered is a number or float
add(new_value: int or float):
adds the new value to the value in the calculator memory
subtract(new_value: int or float):
subtracts the new value from the value in the calculator memory
multiply(new_value: int or float):
multiplies the new value with the value in the calculator memory
divide(new_value: int or float):
divides the value in the calculator memory with the new value
n_root(root: int or float):
takes the (n) root of the value in the calculator memory
exponent(exponent: int or float):
raises the values in the calculator memory to the power of the inputted value
reset_memory():
resets the calculator memory value to 0
memory_value():
returns the calculator memory value
"""
def __init__(self, value = 0) -> None:
"""
initializes the memory value
"""
self.__input_validation(value)
self.__value = value
def __input_validation(self, new_value: (int, float)) -> None:
"""
validates that the inputted value is an integer or float
"""
if not isinstance(new_value, (int,float)):
raise NotANumber(new_value)
def add(self, new_value: (int,float)) -> (int, float):
"""
adds the new value to the value in the calculator memory
"""
self.__input_validation(new_value)
self.__value += new_value
return self.__value
def subtract(self, new_value: (int, float)) -> (int, float):
"""
subtracts the new value from the value in the calculator memory
"""
self.__input_validation(new_value)
self.__value -= new_value
return self.__value
def multiply(self, new_value: (int, float)) -> (int, float):
"""
multiplies the new value with the value in the calculator memory
"""
self.__input_validation(new_value)
self.__value *= new_value
return self.__value
def divide(self, new_value: (int, float)) -> (int, float):
"""
divides the value in the calculator memory with the new value
"""
self.__input_validation(new_value)
self.__value /= new_value
return self.__value
#except (ZeroDivisionError) as err:
#print(f'Cannot divide by zero -> {err}')
def n_root(self, root: (int, float)) -> (int, float):
"""
takes the (n) root of the value in the calculator memory
"""
self.__input_validation(root)
if root <= 0:
raise NotAPositiveNumber(root, type= 'Inputted value')
elif self.__value <= 0:
raise NotAPositiveNumber(self.__value, type= 'Memory value')
else:
self.__value = self.__value ** (1/root)
return self.__value
def exponent(self, exponent: (int, float)) -> (int, float):
"""
raises the values in the calculator memory to the power of the inputted value
"""
self.__input_validation(exponent)
self.__value = self.__value ** exponent
return self.__value
def reset_memory(self) -> (int, float):
"""
resets the calculator memory value to 0
"""
self.__value = 0
return self.__value
def memory_value(self) -> (int, float):
return self.__value
class NotANumber(Exception):
"""
Raises an error stating the input is not a number
Methods
--------
__init__(value, message):
initializes the error class
__str__():
returns the inputted value and error message
"""
def __init__(self, value, message = 'is not a number'):
"""
initializes the error class
"""
self.__message = message
self.__value = value
super().__init__(self.__message)
def __str__(self):
"""
returns the inputted value and error message
"""
return f'"{self.__value}" {self.__message}'
class NotAPositiveNumber(Exception):
"""
Raises an error stating the input is not a positive number
Methods
--------
__init__(value, message):
initializes the error class
__str__():
returns the inputted value and error message
"""
def __init__(self, value, message = 'is not a positive number.The function only accepts positive numbers', type = ''):
"""
initializes the error class
"""
self.__message = message
self.__value = value
self.__type = type
super().__init__(self.__message)
def __str__(self):
""" returns the value and error message"""
return f'{self.__type} ({self.__value}) {self.__message}'
|
"""The Logitech Harmony Hub integration."""
import asyncio
import logging
from homeassistant.components.remote import ATTR_ACTIVITY, ATTR_DELAY_SECS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import DOMAIN, HARMONY_OPTIONS_UPDATE, PLATFORMS
from .remote import HarmonyRemote
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Logitech Harmony Hub component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Logitech Harmony Hub from a config entry."""
# As there currently is no way to import options from yaml
# when setting up a config entry, we fallback to adding
# the options to the config entry and pull them out here if
# they are missing from the options
_async_import_options_from_data_if_missing(hass, entry)
address = entry.data[CONF_HOST]
name = entry.data[CONF_NAME]
activity = entry.options.get(ATTR_ACTIVITY)
delay_secs = entry.options.get(ATTR_DELAY_SECS)
harmony_conf_file = hass.config.path(f"harmony_{entry.unique_id}.conf")
try:
device = HarmonyRemote(
name, entry.unique_id, address, activity, harmony_conf_file, delay_secs
)
connected_ok = await device.connect()
except (asyncio.TimeoutError, ValueError, AttributeError):
raise ConfigEntryNotReady
if not connected_ok:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = device
entry.add_update_listener(_update_listener)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = 0
for importable_option in [ATTR_ACTIVITY, ATTR_DELAY_SECS]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = 1
if modified:
hass.config_entries.async_update_entry(entry, options=options)
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
async_dispatcher_send(
hass, f"{HARMONY_OPTIONS_UPDATE}-{entry.unique_id}", entry.options
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
# Shutdown a harmony remote for removal
device = hass.data[DOMAIN][entry.entry_id]
await device.shutdown()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import os
import yaml
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import namedtuple
class MyDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(MyDumper, self).increase_indent(flow, False)
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'noise',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
class EVLocalAvg(object):
def __init__(self, window=5, ev_freq=2, total_epochs=50):
""" Keep track of the eigenvalues local average.
Args:
window (int): number of elements used to compute local average.
Default: 5
ev_freq (int): frequency used to compute eigenvalues. Default:
every 2 epochs
total_epochs (int): total number of epochs that DARTS runs.
Default: 50
"""
self.window = window
self.ev_freq = ev_freq
self.epochs = total_epochs
self.stop_search = False
self.stop_epoch = total_epochs - 1
self.stop_genotype = None
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
# start and end index of the local average window
self.la_start_idx = 0
self.la_end_idx = self.window
def reset(self):
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
def update(self, epoch, ev, genotype):
""" Method to update the local average list.
Args:
epoch (int): current epoch
ev (float): current dominant eigenvalue
genotype (namedtuple): current genotype
"""
self.ev.append(ev)
self.genotypes.update({epoch: genotype})
# set the stop_genotype to the current genotype in case the early stop
# procedure decides not to early stop
self.stop_genotype = genotype
# since the local average computation starts after the dominant
# eigenvalue in the first epoch is already computed we have to wait
# at least until we have 3 eigenvalues in the list.
if (len(self.ev) >= int(np.ceil(self.window/2))) and (epoch <
self.epochs - 1):
# start sliding the window as soon as the number of eigenvalues in
# the list becomes equal to the window size
if len(self.ev) < self.window:
self.ev_local_avg.append(np.mean(self.ev))
else:
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx]))
self.la_start_idx += 1
self.la_end_idx += 1
# keep track of the offset between the current epoch and the epoch
# corresponding to the local average. NOTE: in the end the size of
# self.ev and self.ev_local_avg should be equal
self.la_epochs.update({epoch: int(epoch -
int(self.ev_freq*np.floor(self.window/2)))})
elif len(self.ev) < int(np.ceil(self.window/2)):
self.la_epochs.update({epoch: -1})
# since there is an offset between the current epoch and the local
# average epoch, loop in the last epoch to compute the local average of
# these number of elements: window, window - 1, window - 2, ..., ceil(window/2)
elif epoch == self.epochs - 1:
for i in range(int(np.ceil(self.window/2))):
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window - i
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx + 1]))
self.la_start_idx += 1
def early_stop(self, epoch, factor=1.3, es_start_epoch=10, delta=4):
""" Early stopping criterion
Args:
epoch (int): current epoch
factor (float): threshold factor for the ration between the current
and prefious eigenvalue. Default: 1.3
es_start_epoch (int): until this epoch do not consider early
stopping. Default: 20
delta (int): factor influencing which previous local average we
consider for early stopping. Default: 2
"""
if int(self.la_epochs[epoch] - self.ev_freq*delta) >= es_start_epoch:
# the current local average corresponds to
# epoch - int(self.ev_freq*np.floor(self.window/2))
current_la = self.ev_local_avg[-1]
# by default take the local average corresponding to epoch
# delta*self.ev_freq
previous_la = self.ev_local_avg[-1 - delta]
self.stop_search = current_la / previous_la > factor
if self.stop_search:
self.stop_epoch = int(self.la_epochs[epoch] - self.ev_freq*delta)
self.stop_genotype = self.genotypes[self.stop_epoch]
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def write_yaml_results_eval(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.search_dp), str(args.search_wd)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f, Loader=yaml.Loader)
if setting in result.keys():
if regularization in result[setting].keys():
if args.search_task_id in result[setting][regularization]:
result[setting][regularization][args.search_task_id].append(result_to_log)
else:
result[setting][regularization].update({args.search_task_id:
[result_to_log]})
else:
result[setting].update({regularization: {args.search_task_id:
[result_to_log]}})
else:
result.update({setting: {regularization: {args.search_task_id:
[result_to_log]}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.search_task_id: [result_to_log]
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
def write_yaml_results(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.drop_path_prob), str(args.weight_decay)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f, Loader=yaml.Loader)
if setting in result.keys():
if regularization in result[setting].keys():
result[setting][regularization].update({args.task_id: result_to_log})
else:
result[setting].update({regularization: {args.task_id: result_to_log}})
else:
result.update({setting: {regularization: {args.task_id: result_to_log}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.task_id: result_to_log
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
class Cutout(object):
def __init__(self, length, prob=1.0):
self.length = length
self.prob = prob
def __call__(self, img):
if np.random.binomial(1, self.prob):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_svhn(args):
SVHN_MEAN = [0.4377, 0.4438, 0.4728]
SVHN_STD = [0.1980, 0.2010, 0.1970]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
return train_transform, valid_transform
def _data_transforms_dr_detection(args):
DR_DETECTION_MEAN = [0.42, 0.22, 0.075]
DR_DETECTION_STD = [0.27, 0.15, 0.081]
if args.is_eval:
train_transform = transforms.Compose([
transforms.Resize(540), # 256
transforms.RandomRotation((-45.0, +45.0)),
transforms.RandomResizedCrop(512, scale=(0.9, 1.1), ratio=(0.9, 1.1)), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=[0.75,1.5],
saturation=[0.75,1.5], hue=0.15),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD)
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(540),
transforms.CenterCrop(512),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
])
else:
train_transform = transforms.Compose([
transforms.Resize(256), # 256
transforms.RandomRotation((-45.0, +45.0)),
transforms.RandomResizedCrop(224, scale=(0.9, 1.1), ratio=(0.9, 1.1)), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=[0.75, 1.5],
saturation=[0.75, 1.5], hue=0.15),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
# transforms.RandomErasing(),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
])
return train_transform, valid_transform
def _data_transforms_malaria(args):
train_transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomCrop(64), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomCrop(64), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
return train_transform, valid_transform
def _data_transforms_mnist(args):
MNIST_MEAN = [0.5, 0.5, 0.5]
MNIST_STD = [0.5, 0.5, 0.5]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(MNIST_MEAN, MNIST_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(MNIST_MEAN, MNIST_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar100(args):
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for v in model.parameters())/1e6
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def save_checkpoint(state, is_best, save, epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint(model, optimizer, scheduler, architect, save, la_tracker,
epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
model.alphas_normal.data = checkpoint['alphas_normal']
model.alphas_reduce.data = checkpoint['alphas_reduce']
optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
la_tracker.ev = checkpoint['ev']
la_tracker.ev_local_avg = checkpoint['ev_local_avg']
la_tracker.genotypes = checkpoint['genotypes']
la_tracker.la_epochs = checkpoint['la_epochs']
la_tracker.la_start_idx = checkpoint['la_start_idx']
la_tracker.la_end_idx = checkpoint['la_end_idx']
lr = checkpoint['lr']
return lr
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def print_args(args):
for arg, val in args.__dict__.items():
print(arg + '.' * (50 - len(arg) - len(str(val))) + str(val))
print()
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-ip-domain-cfg.
usage: gn-create-xr-ip-domain-cfg-33-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ip_domain_cfg \
as xr_ip_domain_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def config_ip_domain(ip_domain):
"""Add config data to ip_domain object."""
vrf = ip_domain.vrfs.Vrf()
vrf.vrf_name = "RED"
vrf.name = "red.example"
# first name server
server = vrf.servers.Server()
server.order = 0
server.server_address = "2001:db8:800a::1"
vrf.servers.server.append(server)
# second name server
server = vrf.servers.Server()
server.order = 1
server.server_address = "2001:db8:800a::2"
vrf.servers.server.append(server)
# third name server
server = vrf.servers.Server()
server.order = 2
server.server_address = "2001:db8:800a::3"
vrf.servers.server.append(server)
ip_domain.vrfs.vrf.append(vrf)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
ip_domain = xr_ip_domain_cfg.IpDomain() # create object
config_ip_domain(ip_domain) # add object configuration
# create configuration on gNMI device
crud.create(provider, ip_domain)
exit()
# End of script
|
#
# This file is part of pysmi software.
#
# Copyright (c) 2015-2020, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysmi/license.html
#
import sys
import os
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import StringIO
except ImportError:
from io import StringIO
from pysmi.reader.zipreader import ZipReader
class ZipReaderTestCase(unittest.TestCase):
zipArchive = [
80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 8, 135, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0, 3, 16, 211, 195, 89,
25, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 3, 4, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47,
85, 84, 9, 0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4,
140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 230, 134,
53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 28, 0, 116, 101, 115,
116, 47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116, 65, 85, 84, 9,
0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 66, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 2, 135, 53, 75,
162, 170, 2, 92, 138, 7, 0, 0, 138, 7, 0, 0, 13, 0, 28, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 9, 0, 3, 3, 211, 195, 89,
3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 3, 4, 10, 0, 0, 0, 0, 0, 253, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0, 3, 253, 210, 195, 89, 3,
211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75,
3, 4, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 85, 84,
9, 0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75,
227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 28, 0, 116, 101, 115, 116,
47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116, 65, 46, 116, 120,
116, 85, 84, 9, 0, 3, 116, 204, 195, 89, 134, 204, 195, 89, 117, 120, 11, 0,
1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 115, 117, 98, 100, 105, 114, 116,
101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 109, 131, 53, 75, 237,
78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14, 0, 28, 0, 116, 101, 115, 116, 47,
116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 9, 0, 3, 78, 204, 195, 89,
134, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
116, 101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 144, 131, 53,
75, 204, 176, 61, 249, 144, 2, 0, 0, 144, 2, 0, 0, 13, 0, 28, 0, 116, 101,
115, 116, 47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 9, 0, 3, 143,
204, 195, 89, 143, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4,
140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 117, 131, 53, 75, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0,
3, 94, 204, 195, 89, 98, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0,
4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117,
98, 100, 105, 114, 47, 85, 84, 9, 0, 3, 116, 204, 195, 89, 134, 204, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4,
10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0,
0, 21, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116,
101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 9, 0, 3, 116, 204, 195, 89, 116,
204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 115,
117, 98, 100, 105, 114, 116, 101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0,
0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14, 0, 28,
0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84,
9, 0, 3, 78, 204, 195, 89, 78, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 116, 101, 115, 116, 65, 10, 80, 75, 1, 2, 30, 3, 10,
0, 0, 0, 0, 0, 117, 131, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 24,
0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 85,
84, 5, 0, 3, 94, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140,
102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 63,
0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 85, 84, 5,
0, 3, 116, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102,
0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 227, 250, 30,
37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129,
133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116,
101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 116, 204, 195, 89, 117,
120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10,
0, 0, 0, 0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14,
0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 224, 0, 0, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 78, 204, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 5, 6, 0,
0, 0, 0, 4, 0, 4, 0, 76, 1, 0, 0, 46, 1, 0, 0, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0,
0, 0, 230, 134, 53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 28, 0,
116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116,
65, 85, 84, 9, 0, 3, 207, 210, 195, 89, 207, 210, 195, 89, 117, 120, 11, 0, 1,
4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 66, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0,
0, 253, 134, 53, 75, 39, 231, 88, 122, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0,
116, 101, 115, 116, 47, 116, 101, 115, 116, 67, 85, 84, 9, 0, 3, 253, 210,
195, 89, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140,
102, 0, 0, 67, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75, 165,
133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0, 116, 101, 115, 116, 47,
116, 101, 115, 116, 65, 85, 84, 9, 0, 3, 173, 210, 195, 89, 173, 210, 195, 89,
117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 65, 10, 80, 75, 1,
2, 30, 3, 10, 0, 0, 0, 0, 0, 253, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101, 115,
116, 47, 85, 84, 5, 0, 3, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230, 134, 53,
75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16,
0, 253, 65, 63, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114,
47, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53,
75, 227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 24, 0, 0, 0, 0, 0, 1,
0, 0, 0, 180, 129, 133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100,
105, 114, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 116,
204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0,
0, 0, 6, 0, 0, 0, 14, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 224, 0, 0,
0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84,
5, 0, 3, 78, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102,
0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 144, 131, 53, 75, 204, 176, 61,
249, 144, 2, 0, 0, 144, 2, 0, 0, 13, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180,
129, 46, 1, 0, 0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 46, 122, 105,
112, 85, 84, 5, 0, 3, 143, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75,
102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0,
0, 180, 129, 5, 4, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114,
47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11,
0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0,
0, 0, 253, 134, 53, 75, 39, 231, 88, 122, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24,
0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 82, 4, 0, 0, 116, 101, 115, 116, 47, 116,
101, 115, 116, 67, 85, 84, 5, 0, 3, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4,
140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0,
211, 134, 53, 75, 165, 133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24, 0, 0,
0, 0, 0, 1, 0, 0, 0, 180, 129, 152, 4, 0, 0, 116, 101, 115, 116, 47, 116, 101,
115, 116, 65, 85, 84, 5, 0, 3, 173, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 5, 6, 0, 0, 0, 0, 8, 0, 8, 0, 150, 2,
0, 0, 222, 4, 0, 0, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75,
165, 133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 65, 85, 84, 9, 0, 3, 173, 210, 195, 89, 3, 211, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 65, 10, 80, 75,
1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 8, 135, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101,
115, 116, 47, 85, 84, 5, 0, 3, 16, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230,
134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0,
0, 16, 0, 253, 65, 63, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100,
105, 114, 47, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230,
134, 53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 24, 0, 0, 0, 0,
0, 1, 0, 0, 0, 180, 129, 133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98,
100, 105, 114, 47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3, 207, 210, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2,
30, 3, 10, 0, 0, 0, 0, 0, 2, 135, 53, 75, 162, 170, 2, 92, 138, 7, 0, 0, 138,
7, 0, 0, 13, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 129, 210, 0, 0, 0, 116,
101, 115, 116, 47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 5, 0, 3,
3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75, 165, 133, 110, 72,
2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 163,
8, 0, 0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3,
173, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
80, 75, 5, 6, 0, 0, 0, 0, 5, 0, 5, 0, 151, 1, 0, 0, 233, 8, 0, 0, 0, 0]
if sys.version_info[0] < 3:
zipContents = ''.join([chr(x) for x in zipArchive])
else:
zipContents = bytes(zipArchive)
def testGetDataFromFile(self):
filename = None
try:
fd, filename = tempfile.mkstemp()
os.write(fd, self.zipContents)
os.close(fd)
zipReader = ZipReader(filename)
mibinfo, data = zipReader.getData('testA')
assert data == 'A\n'
except Exception:
pass
if filename:
try:
os.remove(filename)
except Exception:
pass
def testGetInnerZipData(self):
filename = None
try:
fd, filename = tempfile.mkstemp()
os.write(fd, self.zipContents)
os.close(fd)
zipReader = ZipReader(filename)
mibinfo, data = zipReader.getData('testC')
assert data == 'C\n'
except Exception:
pass
if filename:
try:
os.remove(filename)
except Exception:
pass
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
# -*- coding: utf-8 -*-
import binascii
from copy import copy
from .codec import size_for_addr
from .codec import string_to_bytes
from .codec import bytes_to_string
from .codec import protocol_with_name
from .protocols import protocol_with_code
from .protocols import read_varint_code
class ProtocolNotFoundException(Exception):
pass
class Multiaddr(object):
"""Multiaddr is a representation of multiple nested internet addresses.
Multiaddr is a cross-protocol, cross-platform format for representing
internet addresses. It emphasizes explicitness and self-description.
Learn more here: https://github.com/jbenet/multiaddr
Multiaddrs have both a binary and string representation.
>>> from multiaddr import Multiaddr
>>> addr = Multiaddr("/ip4/1.2.3.4/tcp/80")
Multiaddr objects are immutable, so `encapsulate` and `decapsulate`
return new objects rather than modify internal state.
"""
def __init__(self, addr):
"""Instantiate a new Multiaddr.
Args:
addr : A string-encoded or a byte-encoded Multiaddr
"""
if isinstance(addr, str):
self._bytes = string_to_bytes(addr)
elif isinstance(addr, bytes):
self._bytes = addr
else:
raise ValueError("Invalid address type, must be bytes or str")
def __eq__(self, other):
"""Checks if two Multiaddr objects are exactly equal."""
return self._bytes == other._bytes
def __ne__(self, other):
return not (self == other)
def __str__(self):
"""Return the string representation of this Multiaddr.
May raise an exception if the internal state of the Multiaddr is
corrupted."""
try:
return bytes_to_string(self._bytes)
except Exception:
raise ValueError(
"multiaddr failed to convert back to string. corrupted?")
def __repr__(self):
return "<Multiaddr %s>" % str(self)
def to_bytes(self):
"""Returns the byte array representation of this Multiaddr."""
return self._bytes
def protocols(self):
"""Returns a list of Protocols this Multiaddr includes."""
buf = binascii.unhexlify(self.to_bytes())
protos = []
while buf:
code, num_bytes_read = read_varint_code(buf)
proto = protocol_with_code(code)
protos.append(proto)
buf = buf[num_bytes_read:]
size = size_for_addr(proto, buf)
buf = buf[size:]
return protos
def encapsulate(self, other):
"""Wrap this Multiaddr around another.
For example:
/ip4/1.2.3.4 encapsulate /tcp/80 = /ip4/1.2.3.4/tcp/80
"""
mb = self.to_bytes()
ob = other.to_bytes()
return Multiaddr(b''.join([mb, ob]))
def decapsulate(self, other):
"""Remove a Multiaddr wrapping.
For example:
/ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80
"""
s1 = str(self)
s2 = str(other)
try:
idx = s1.rindex(s2)
except ValueError:
# if multiaddr not contained, returns a copy
return copy(self)
try:
return Multiaddr(s1[:idx])
except Exception as ex:
raise ValueError(
"Multiaddr.decapsulate incorrect byte boundaries: %s"
% str(ex))
def value_for_protocol(self, code):
"""Return the value (if any) following the specified protocol."""
from .util import split
if isinstance(code, str):
protocol = protocol_with_name(code)
code = protocol.code
for sub_addr in split(self):
if sub_addr.protocols()[0].code == code:
addr_parts = str(sub_addr).split("/")
if len(addr_parts) > 3:
raise ValueError("Unknown Protocol format")
elif len(addr_parts) == 3:
# If we have an address, return it
return addr_parts[2]
elif len(addr_parts) == 2:
# We were given something like '/utp', which doesn't have
# an address, so return ''
return ''
raise ProtocolNotFoundException()
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.profile, name='profile'),
path(
'order_history/<order_number>',
views.order_history,
name='order_history'),
]
|
# -*- encoding: utf-8 -*-
import abc
import numpy as np
import scipy.sparse
from autosklearn.pipeline.implementations.OneHotEncoder import OneHotEncoder
from autosklearn.util import predict_RAM_usage
def perform_one_hot_encoding(sparse, categorical, data):
predicted_RAM_usage = float(
predict_RAM_usage(data[0], categorical)) / 1024 / 1024
if predicted_RAM_usage > 1000:
sparse = True
rvals = []
if any(categorical):
encoder = OneHotEncoder(categorical_features=categorical,
dtype=np.float32,
sparse=sparse)
rvals.append(encoder.fit_transform(data[0]))
for d in data[1:]:
rvals.append(encoder.transform(d))
if not sparse and scipy.sparse.issparse(rvals[0]):
for i in range(len(rvals)):
rvals[i] = rvals[i].todense()
else:
rvals = data
return rvals, sparse
class AbstractDataManager():
__metaclass__ = abc.ABCMeta
def __init__(self, name):
self._data = dict()
self._info = dict()
self._name = name
@property
def name(self):
return self._name
@property
def data(self):
return self._data
@property
def info(self):
return self._info
@property
def feat_type(self):
return self._feat_type
@feat_type.setter
def feat_type(self, value):
self._feat_type = value
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, value):
self._encoder = value
def perform1HotEncoding(self):
sparse = True if self.info['is_sparse'] == 1 else False
has_missing = True if self.info['has_missing'] else False
to_encode = ['categorical']
if has_missing:
to_encode += ['binary']
encoding_mask = [feat_type.lower() in to_encode
for feat_type in self.feat_type]
data = [self.data['X_train']]
if 'X_valid' in self.data:
data.append(self.data['X_valid'])
if 'X_test' in self.data:
data.append(self.data['X_test'])
data, sparse = perform_one_hot_encoding(
sparse=sparse, categorical=encoding_mask,
data=data)
self.info['is_sparse'] = 1 if sparse else 0
self.data['X_train'] = data[0]
if 'X_valid' in self.data and 'X_test' in self.data:
self.data['X_valid'] = data[1]
self.data['X_test'] = data[2]
elif 'X_valid' in self.data:
self.data['X_valid'] = data[1]
elif 'X_test' in self.data:
self.data['X_test'] = data[1]
def __repr__(self):
return 'DataManager : ' + self.name
def __str__(self):
val = 'DataManager : ' + self.name + '\ninfo:\n'
for item in self.info:
val = val + '\t' + item + ' = ' + str(self.info[item]) + '\n'
val = val + 'data:\n'
for subset in self.data:
val = val + '\t%s = %s %s %s\n' % (subset, type(self.data[subset]),
str(self.data[subset].shape),
str(self.data[subset].dtype))
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = val + '\tdensity: %f\n' % \
(float(len(self.data[subset].data)) /
self.data[subset].shape[0] /
self.data[subset].shape[1])
val = val + 'feat_type:\t' + str(self.feat_type) + '\n'
return val
|
# Generated by Django 3.0.5 on 2020-04-11 04:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
"""
Django settings for travel_blog project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$^_aoggnuh-=s=kpxb*2qkr+%)^^0cnm8h32h@qq*&1k8*g^l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.gis',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'travel_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'travel_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.postgresql',
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'HOST': 'localhost',
'NAME': 'travel_blog_db',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'assets'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/uploads/'
|
# -*- coding: UTF-8 -*-
from django.test import Client, TestCase
from django.contrib.auth.models import Group
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils import simplejson
from privilege.core.config import GROUP_CACHE_KEY
class GroupTestCases(TestCase):
fixtures = ['privilege.json']
def setUp(self):
TestCase.setUp(self)
self.client = Client()
def tearDown(self):
self.client.logout()
TestCase.tearDown(self)
def test_group_list_not_login(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.check_not_login(group_list_url)
def test_group_list_logined_but_not_superuser(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.check_not_superuser(group_list_url)
def test_group_list_ok(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.get(group_list_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["page"].object_list)
def test_group_detail_not_login(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.check_not_login(group_detail_url)
def test_get_group_detail_logined_but_not_superuser(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.check_not_superuser(group_detail_url)
def test_get_group_detail_not_exist(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(0, 1,))
self.client.login(username="super", password="test")
response = self.client.get(group_detail_url)
self.assertEqual(response.status_code, 404)
def test_get_group_detail_ok(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.client.login(username="super", password="test")
response = self.client.get(group_detail_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["group"])
def test_change_group_permission_not_login(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.check_not_login(change_group_url)
def test_change_group_permission_not_super_user(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.check_not_superuser(change_group_url)
def test_change_group_permission_get_method(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.client.login(username="super", password="test")
response = self.client.get(change_group_url)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_not_exist(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 0}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_post_bad_params(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 1, "permission_id": ""}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_ok(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 1, "permission_id": "1", "op_code": "add"}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "ok", "msg": _("Success")})
self.assertEqual(response.content, expect_content)
cache.set(GROUP_CACHE_KEY, None)
def test_add_group_not_login(self):
add_group_url = reverse("privilege.views.group.add_group")
self.check_not_login(add_group_url)
def test_add_group_not_superuser(self):
add_group_url = reverse("privilege.views.group.add_group")
self.check_not_superuser(add_group_url)
def test_add_group_not_post(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.get(add_group_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"])
def test_add_group_post_blank(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.post(add_group_url, {"name": ""})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"].errors)
def test_add_group_ok(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.post(add_group_url, {"name": "add_success"})
self.assertEqual(response.status_code, 302)
self.assertTrue(Group.objects.filter(name="add_success").count())
Group.objects.filter(name="add_success").delete()
cache.set(GROUP_CACHE_KEY, None)
def test_edit_group_not_login(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.check_not_login(edit_group_url)
def test_edit_group_not_superuser(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.check_not_superuser(edit_group_url)
def test_test_edit_group_not_exist(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(0, ))
self.client.login(username="super", password="test")
response = self.client.get(edit_group_url)
self.assertEqual(response.status_code, 404)
def test_test_edit_group_not_post(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.get(edit_group_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"])
def test_test_edit_group_post_blank(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.post(edit_group_url, {"name": ""})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"].errors)
def test_test_edit_group_ok(self):
group = Group.objects.create(name="to_delete")
edit_group_url = reverse("privilege.views.group.edit_group", args=(group.id, ))
self.client.login(username="super", password="test")
response = self.client.post(edit_group_url, {"name": "changed"})
self.assertEqual(response.status_code, 302)
group = Group.objects.get(id=group.id)
self.assertEqual(group.name, "changed")
group.delete()
cache.set(GROUP_CACHE_KEY, None)
def test_delete_grooup_not_login(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(1, ))
self.check_not_login(delete_group_url)
def test_delete_grooup_not_superuser(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(1, ))
self.check_not_superuser(delete_group_url)
def test_delete_grooup_ok(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(0, ))
response = self.client.post(delete_group_url)
self.assertEqual(response.status_code, 302)
cache.set(GROUP_CACHE_KEY, None)
def check_not_login(self, url):
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def check_not_superuser(self, url):
self.client.login(username="test", password="test")
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
|
import os
import time
from pathlib import Path # from path home
import schedule
print(Path.home()) # C:\Users\angel
old_files_folder_name = "old_files"
print("Hello ")
def clean_up_downloads():
print("Cleaning up Downloads")
# get all items from the downloads filder
download_folder_path = os.path.join(Path.home(), "Downloads", "Downloads")
download_items = os.listdir(download_folder_path)
moved_items = 0
# create the old files folder if not present
old_files_folder_path = os.path.join(download_folder_path, old_files_folder_name)
if old_files_folder_name not in download_items:
print(f"No {old_files_folder_name} folder yet, creating folder")
os.mkdir(old_files_folder_path) # create folder "old_files"
# create new folder with todays timestamp
timestamp = time.strftime("%Y_%m_%d") # Year month and day
datetime_folder_path = os.path.join(old_files_folder_path, timestamp)
if not os.path.exists(datetime_folder_path):
print(f"No {datetime_folder_path} folder yet, creating folder")
os.mkdir(datetime_folder_path)
else:
print(f"{timestamp} folder already exists in {old_files_folder_name}")
# rename all items to move them into the current datetime folder
to_be_moved = [item for item in download_items if item != old_files_folder_name] # also moves folders
for item in to_be_moved:
print(f"Moving {item} to {datetime_folder_path} folder")
old_path = os.path.join(download_folder_path, item)
new_path = os.path.join(datetime_folder_path, item)
os.rename(old_path, new_path)
moved_items += 1
print(f"Moved {moved_items} of {len(to_be_moved)} items")
# clean up the downloads folder every monday
# i execute the file on friday 20:21
schedule.every().friday.at("20:22").do(clean_up_downloads)
# keep the script running and sleep in between the checks
while True:
print("here")
schedule.run_pending()
# sleep 24h
time.sleep(1) # 60 * 60 * 24
|
from nutils.testing import *
import nutils.types
import inspect, pickle, itertools, ctypes, stringly, tempfile, io, os
import numpy
class apply_annotations(TestCase):
def test_without_annotations(self):
@nutils.types.apply_annotations
def f(a, b):
return a, b
a, b = f(1, 2)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
def test_pos_or_kw(self):
@nutils.types.apply_annotations
def f(a:int, b, c:str):
return a, b, c
a, b, c = f(1, 2, 3)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
self.assertEqual(c, '3')
def test_with_signature(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_posonly(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_kwonly(self):
@nutils.types.apply_annotations
def f(a:str, *, b:int, c:bool):
return a, b, c
self.assertEqual(f(1, b='2', c=3), ('1', 2, True))
def test_varpos(self):
@nutils.types.apply_annotations
def f(a:str, *args):
return a, args
self.assertEqual(f(1, 2, 3), ('1', (2, 3)))
def test_varpos_annotated(self):
map_str = lambda args: map(str, args)
@nutils.types.apply_annotations
def f(a:str, *args:map_str):
return a, args
self.assertEqual(f(1, 2, 3), ('1', ('2', '3')))
def test_varkw(self):
@nutils.types.apply_annotations
def f(a:str, **kwargs):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b=2, c=3)))
def test_varkw_annotated(self):
map_str = lambda kwargs: {k: str(v) for k, v in kwargs.items()}
@nutils.types.apply_annotations
def f(a:str, **kwargs:map_str):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b='2', c='3')))
def test_posonly_varkw(self):
def f(a, b, **c):
return a, b, c
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str),
inspect.Parameter('b', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str, default=None),
inspect.Parameter('c', inspect.Parameter.VAR_KEYWORD)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=4, c=2, d=3), ('1', '4', dict(c=2, d=3)))
def test_default_none(self):
@nutils.types.apply_annotations
def f(a:str=None):
return a
self.assertEqual(f(), None)
self.assertEqual(f(None), None)
self.assertEqual(f(1), '1')
class nutils_hash(TestCase):
class custom:
@property
def __nutils_hash__(self):
return b'01234567890123456789'
def f(self):
pass
def test_ellipsis(self):
self.assertEqual(nutils.types.nutils_hash(...).hex(), '0c8bce06e451e4d5c49f60da0abf2ccbadf80600')
def test_None(self):
self.assertEqual(nutils.types.nutils_hash(None).hex(), 'bdfcbd663476b2db5b2b2e59a6d93882a908dc76')
def test_bool(self):
self.assertEqual(nutils.types.nutils_hash(False).hex(), '04a5e8f73dcea55dcd7482a476cf2e7b53d6dc50')
self.assertEqual(nutils.types.nutils_hash(True).hex(), '3fe990437e1624c831729f2866979254437bb7e9')
def test_int(self):
self.assertEqual(nutils.types.nutils_hash(1).hex(), '00ec7dea895ebd921e56bbc554688d8b3a1e4dfc')
self.assertEqual(nutils.types.nutils_hash(2).hex(), '8ae88fa39407cf75e46f9e0aba8c971de2256b14')
def test_float(self):
self.assertEqual(nutils.types.nutils_hash(1.).hex(), 'def4bae4f2a3e29f6ddac537d3fa7c72195e5d8b')
self.assertEqual(nutils.types.nutils_hash(2.5).hex(), '5216c2bf3c16d8b8ff4d9b79f482e5cea0a4cb95')
def test_complex(self):
self.assertEqual(nutils.types.nutils_hash(1+0j).hex(), 'cf7a0d933b7bb8d3ca252683b137534a1ecae073')
self.assertEqual(nutils.types.nutils_hash(2+1j).hex(), 'ee088890528f941a80aa842dad36591b05253e55')
def test_inequality_numbers(self):
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1.).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1+0j).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(True).hex())
def test_str(self):
self.assertEqual(nutils.types.nutils_hash('spam').hex(), '3ca1023ab75a68dc7b0f83b43ec624704a7aef61')
self.assertEqual(nutils.types.nutils_hash('eggs').hex(), '124b0a7b3984e08125c380f7454896c1cad22e2c')
def test_bytes(self):
self.assertEqual(nutils.types.nutils_hash(b'spam').hex(), '5e717ec15aace7c25610c1dea340f2173f2df014')
self.assertEqual(nutils.types.nutils_hash(b'eggs').hex(), '98f2061978497751cac94f982fd96d9b015b74c3')
def test_tuple(self):
self.assertEqual(nutils.types.nutils_hash(()).hex(), '15d44755bf0731b2a3e9a5c5c8e0807b61881a1f')
self.assertEqual(nutils.types.nutils_hash((1,)).hex(), '328b16ebbc1815cf579ae038a35c4d68ebb022af')
self.assertNotEqual(nutils.types.nutils_hash((1,'spam')).hex(), nutils.types.nutils_hash(('spam',1)).hex())
def test_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset([1,2])).hex(), '3862dc7e5321bc8a576c385ed2c12c71b96a375a')
self.assertEqual(nutils.types.nutils_hash(frozenset(['spam','eggs'])).hex(), '2c75fd3db57f5e505e1425ae9ff6dcbbc77fd123')
@unittest.skipIf(sys.version_info < (3,7), "not supported in this Python version")
def test_dataclass(self):
import dataclasses
A = dataclasses.make_dataclass('A', [('n', int), ('f', float)])
self.assertEqual(nutils.types.nutils_hash(A(n=1, f=2.5)).hex(), 'daf4235240e897beb9586db3c91663b24e229c52')
def test_type_bool(self):
self.assertEqual(nutils.types.nutils_hash(bool).hex(), 'feb912889d52d45fcd1e778c427b093a19a1ea78')
def test_type_int(self):
self.assertEqual(nutils.types.nutils_hash(int).hex(), 'aa8cb9975f7161b1f7ceb88b4b8585b49946b31e')
def test_type_float(self):
self.assertEqual(nutils.types.nutils_hash(float).hex(), '6d5079a53075f4b6f7710377838d8183730f1388')
def test_type_complex(self):
self.assertEqual(nutils.types.nutils_hash(complex).hex(), '6b00f6b9c6522742fd3f8054af6f10a24a671fff')
def test_type_str(self):
self.assertEqual(nutils.types.nutils_hash(str).hex(), '2349e11586163208d2581fe736630f4e4b680a7b')
def test_type_bytes(self):
self.assertEqual(nutils.types.nutils_hash(bytes).hex(), 'b0826ca666a48739e6f8b968d191adcefaa39670')
def test_type_tuple(self):
self.assertEqual(nutils.types.nutils_hash(tuple).hex(), '07cb4a24ca8ac53c820f20721432b4726e2ad1af')
def test_type_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset).hex(), '48dc7cd0fbd54924498deb7c68dd363b4049f5e2')
def test_type_bufferedreader(self):
try:
fid, path = tempfile.mkstemp()
os.write(fid, b'test')
os.close(fid)
with open(path, 'rb') as f:
f.seek(2)
self.assertEqual(nutils.types.nutils_hash(f).hex(), '4edef1af3aa845b9e8bbde2d8265be5f30be4c2a')
self.assertEqual(f.tell(), 2)
with open(path, 'rb+') as f, self.assertRaises(TypeError):
nutils.types.nutils_hash(f).hex()
finally:
os.unlink(path)
def test_type_boundmethod(self):
self.assertEqual(nutils.types.nutils_hash(self.custom().f).hex(), 'ebf7084bb2504922235ab035a9197b9cb4cf47af')
def test_custom(self):
self.assertEqual(nutils.types.nutils_hash(self.custom()).hex(), b'01234567890123456789'.hex())
def test_unhashable(self):
with self.assertRaises(TypeError):
nutils.types.nutils_hash([])
class CacheMeta(TestCase):
def test_property(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@property
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
def test_set_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
t.x = 1
def test_del_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
del t.x
def test_method_without_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
def test_method_with_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, b):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(2, 2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_args_and_preprocessors(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@nutils.types.apply_annotations
def x(self, a:int, b:int):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a='1', b='2'), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x('2', '2'), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x('1', 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_kwargs(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, **kwargs):
nonlocal ncalls
ncalls += 1
return a + sum(kwargs.values())
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
def test_subclass_redefined_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
class U(T):
__cache__ = 'x',
@property
def x(self):
return super().x + 1
@property
def y(self):
return super().x
u1 = U()
self.assertEqual(u1.x, 2)
self.assertEqual(u1.y, 1)
u2 = U()
self.assertEqual(u2.y, 1)
self.assertEqual(u2.x, 2)
def test_missing_attribute(self):
with self.assertRaisesRegex(TypeError, 'Attribute listed in __cache__ is undefined: x'):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
def test_invalid_attribute(self):
with self.assertRaisesRegex(TypeError, "Don't know how to cache attribute x: None"):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
x = None
def test_name_mangling(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = '__x',
@property
def __x(self):
nonlocal ncalls
ncalls += 1
return 1
@property
def y(self):
return self.__x
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
class strictint(TestCase):
def test_int(self):
value = nutils.types.strictint(1)
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_numpy_int(self):
value = nutils.types.strictint(numpy.int64(1))
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1.)
def test_numpy_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(numpy.float64(1.))
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictint('1')
class strictfloat(TestCase):
def test_int(self):
value = nutils.types.strictfloat(1)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_int(self):
value = nutils.types.strictfloat(numpy.int64(1))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_float(self):
value = nutils.types.strictfloat(1.)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_float(self):
value = nutils.types.strictfloat(numpy.float64(1.))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictfloat('1.')
class strictstr(TestCase):
def test_str(self):
value = nutils.types.strictstr('spam')
self.assertEqual(value, 'spam')
self.assertEqual(type(value), str)
def test_int(self):
with self.assertRaises(ValueError):
nutils.types.strictstr(1)
class strict(TestCase):
def test_valid(self):
self.assertEqual(nutils.types.strict[int](1), 1)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.strict[int]('1')
def test_call(self):
with self.assertRaises(TypeError):
nutils.types.strict()
class tupletype(TestCase):
def test_valid1(self):
value = nutils.types.tuple[nutils.types.strictint]([])
self.assertEqual(value, ())
self.assertEqual(type(value), tuple)
def test_valid2(self):
value = nutils.types.tuple[nutils.types.strictint]([1,2,3])
self.assertEqual(value, (1,2,3))
self.assertEqual(type(value), tuple)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.tuple[nutils.types.strictint]([1, 'spam','eggs'])
def test_without_item_constructor(self):
src = 1,2,3
self.assertEqual(nutils.types.tuple(src), tuple(src))
def test_name(self):
self.assertEqual(nutils.types.tuple[nutils.types.strictint].__name__, 'tuple[nutils.types.strictint]')
class frozendict(TestCase):
def test_constructor(self):
src = {'spam': 1, 'eggs': 2.3}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items())), ('frozendict', nutils.types.frozendict(src))]:
with self.subTest(name):
frozen = nutils.types.frozendict(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_constructor_invalid(self):
with self.assertRaises(ValueError):
nutils.types.frozendict(['spam', 'eggs', 1])
def test_clsgetitem(self):
T = nutils.types.frozendict[str, float]
src = {1: 2, 'spam': '2.3'}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items()))]:
with self.subTest(name):
frozen = T(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), {'1': 2., 'spam': 2.3})
def test_clsgetitem_invalid_types(self):
with self.assertRaises(RuntimeError):
nutils.types.frozendict[str, float, bool]
def test_clsgetitem_invalid_value(self):
T = nutils.types.frozendict[str, float]
with self.assertRaises(ValueError):
T(1)
def test_setitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
frozen['eggs'] = 3
def test_delitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
del frozen['eggs']
def test_getitem_existing(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(frozen['spam'], 1)
def test_getitem_nonexisting(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(KeyError):
frozen['foo']
def test_contains(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertIn('spam', frozen)
self.assertNotIn('foo', frozen)
def test_iter(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(frozenset(frozen), frozenset(src))
def test_len(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(len(frozen), len(src))
def test_hash(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertEqual(hash(nutils.types.frozendict(src)), hash(nutils.types.frozendict(src)))
def test_copy(self):
src = {'spam': 1, 'eggs': 2.3}
copy = nutils.types.frozendict(src).copy()
self.assertIsInstance(copy, dict)
self.assertEqual(copy, src)
def test_pickle(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = pickle.loads(pickle.dumps(nutils.types.frozendict(src)))
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_eq_same_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
self.assertEqual(a, a)
def test_eq_other_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
self.assertEqual(a, b)
def test_eq_deduplicated(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
a == b # this replaces `a.__base` with `b.__base`
self.assertEqual(a, b)
def test_ineq_frozendict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), nutils.types.frozendict({'spam': 1}))
def test_ineq_dict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), src)
def test_nutils_hash(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), '8cf14f109e54707af9c2e66d7d3cdb755cce8243')
class frozenmultiset(TestCase):
def test_constructor(self):
src = 'spam', 'bacon', 'sausage', 'spam'
for name, value in [('tuple', src), ('frozenmultiset', nutils.types.frozenmultiset(src))]:
with self.subTest(name=name):
frozen = nutils.types.frozenmultiset(value)
for item in 'spam', 'bacon', 'sausage':
self.assertEqual({k: tuple(frozen).count(k) for k in set(src)}, {'spam':2, 'bacon':1, 'sausage':1})
def test_clsgetitem(self):
src = False, 1, numpy.int64(2)
frozen = nutils.types.frozenmultiset[nutils.types.strictint](src)
self.assertEqual(set(frozen), {0, 1, 2})
def test_preserve_order(self):
for src in [('spam', 'bacon', 'sausage', 'spam'), ('spam', 'egg', 'spam', 'spam', 'bacon', 'spam')]:
with self.subTest(src=src):
self.assertEqual(tuple(nutils.types.frozenmultiset(src)), src)
def test_and(self):
for l, r, lar in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], ['spam', 'eggs']],
[['spam'], ['eggs'], []],
[['spam','spam']]*3]:
with self.subTest(l=l, r=r, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(l)&nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lar))
with self.subTest(l=r, r=l, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(r)&nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(lar))
def test_sub(self):
for l, r, lmr, rml in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], [], ['spam']],
[['spam'], ['eggs'], ['spam'], ['eggs']],
[['spam'], ['spam'], [], []]]:
with self.subTest(l=l, r=r, lmr=lmr):
self.assertEqual(nutils.types.frozenmultiset(l)-nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lmr))
with self.subTest(l=r, r=l, lmr=rml):
self.assertEqual(nutils.types.frozenmultiset(r)-nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(rml))
def test_pickle(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = pickle.loads(pickle.dumps(nutils.types.frozenmultiset(src)))
self.assertIsInstance(frozen, nutils.types.frozenmultiset)
self.assertEqual(frozen, nutils.types.frozenmultiset(src))
def test_hash(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(hash(nutils.types.frozenmultiset(src)), hash(ref))
def test_nutils_hash(self):
for perm in itertools.permutations(('spam', 'bacon', 'sausage', 'spam')):
with self.subTest(perm=perm):
frozen = nutils.types.frozenmultiset(perm)
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), 'f3fd9c6d4741af2e67973457ee6308deddcb714c')
def test_eq(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(nutils.types.frozenmultiset(src), ref)
def test_contains(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
for item in 'spam', 'bacon', 'eggs':
with self.subTest(item=item):
if item in src:
self.assertIn(item, frozen)
else:
self.assertNotIn(item, frozen)
def test_len(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
self.assertEqual(len(frozen), len(src))
def test_nonzero(self):
self.assertTrue(nutils.types.frozenmultiset(['spam', 'eggs']))
self.assertFalse(nutils.types.frozenmultiset([]))
def test_add(self):
l = nutils.types.frozenmultiset(['spam', 'bacon'])
r = nutils.types.frozenmultiset(['sausage', 'spam'])
lpr = nutils.types.frozenmultiset(['spam', 'bacon', 'sausage', 'spam'])
self.assertEqual(l+r, lpr)
def test_isdisjoint(self):
for l, r, disjoint in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], False],
[['spam'], ['eggs'], True],
[['spam'], ['spam'], False]]:
with self.subTest(l=l, r=r, disjoint=disjoint):
self.assertEqual(nutils.types.frozenmultiset(l).isdisjoint(nutils.types.frozenmultiset(r)), disjoint)
class frozenarray(TestCase):
def _test_constructor(self, src, frozen_dtype, src_types=(list,numpy.array,nutils.types.frozenarray)):
src = list(src)
for copy in True, False:
for src_type in src_types:
with self.subTest(copy=copy, src_type=src_type):
frozen = nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def _test_constructor_raises(self, src, frozen_dtype, exc_type, exc_regex):
src = list(src)
for copy in True, False:
for src_type in list, numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertRaisesRegex(exc_type, exc_regex):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
def test_constructor_bool(self):
self._test_constructor((False, True), bool)
def test_constructor_bool_emptyarray(self):
self._test_constructor((), bool, src_types=[list])
def test_constructor_int(self):
self._test_constructor((0,1), int)
def test_constructor_int_upcast(self):
self._test_constructor((False,True), int)
def test_constructor_int_downcast(self):
self._test_constructor((0.,1.), int)
def test_constructor_int_emptyarray(self):
self._test_constructor((), int, src_types=[list])
def test_constructor_float(self):
self._test_constructor((0.,1.), float)
def test_constructor_float_upcast(self):
self._test_constructor((0,1), float)
def test_constructor_float_downcast(self):
src = [0.+0j,1.+0j]
for copy in True, False:
with self.subTest(copy=copy, src_type=list), self.assertRaises(TypeError):
nutils.types.frozenarray(src, copy=copy, dtype=float)
for src_type in numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertWarns(numpy.ComplexWarning):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=float)
def test_constructor_complex(self):
self._test_constructor((0+0j,1+1j), complex)
def test_constructor_strictint(self):
self._test_constructor((0,1), nutils.types.strictint)
def test_constructor_strictint_upcast(self):
self._test_constructor((False,True), nutils.types.strictint)
def test_constructor_strictint_downcast(self):
self._test_constructor_raises((0.,1.), nutils.types.strictint, ValueError, '^downcasting .* is forbidden$')
def test_constructor_strictint_emptyarray(self):
self._test_constructor((), nutils.types.strictint, src_types=[list])
def test_constructor_strictfloat(self):
self._test_constructor((0.,1.), nutils.types.strictfloat)
def test_constructor_strictfloat_upcast(self):
self._test_constructor((0,1), nutils.types.strictfloat)
def test_constructor_strictfloat_downcast(self):
self._test_constructor_raises((0.+0j,1.+0j), nutils.types.strictfloat, ValueError, '^downcasting .* is forbidden$')
def test_constructor_invalid_dtype(self):
self._test_constructor_raises((0,1), list, ValueError, '^unsupported dtype:')
def test_clsgetitem(self):
src = [0.,1.]
frozen = nutils.types.frozenarray[nutils.types.strictfloat](src)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def test_clsgetitem_invalid(self):
src = [0.,1.]
with self.assertRaises(ValueError):
nutils.types.frozenarray[nutils.types.strictint](src)
def test_nutils_hash(self):
a = nutils.types.frozenarray(numpy.array([[1,2],[3,4]], numpy.int64))
b = nutils.types.frozenarray(numpy.array([[1,3],[2,4]], numpy.int64))
self.assertNotEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b.T).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), '42cc3a5e1216c1f0a9921a61a3a2c67025c98d69')
self.assertEqual(nutils.types.nutils_hash(b).hex(), '8f0c9f9a118c42c258f1e69e374aadda99b4be97')
def test_pickle(self):
src = [[1,2],[3,4]]
value = pickle.loads(pickle.dumps(nutils.types.frozenarray(src)))
self.assertIsInstance(value, nutils.types.frozenarray)
self.assertEqual(value, nutils.types.frozenarray(src))
def test_eq_same_instance(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, a)
def test_eq_not_frozenarray(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertNotEqual(a, [[1,2],[3,4]])
def test_eq_same_base(self):
base = numpy.array([[1,2],[3,4]], int)
a = nutils.types.frozenarray(base, copy=False)
b = nutils.types.frozenarray(base, copy=False)
self.assertEqual(a, b)
def test_eq_different_array(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,3],[2,4]], int)
self.assertNotEqual(a, b)
def test_eq_different_dtype(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], float)
self.assertNotEqual(a, b)
def test_eq_different_base(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, b)
def test_ineq_equal(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertTrue(l >= r)
def test_ineq_smaller(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([2,1], int)
self.assertTrue(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertFalse(l >= r)
def test_ineq_larger(self):
l = nutils.types.frozenarray([2,1], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertFalse(l <= r)
self.assertTrue(l > r)
self.assertTrue(l >= r)
def test_ineq_incomparable(self):
array = nutils.types.frozenarray([1,2], int)
for op in operator.lt, operator.le, operator.gt, operator.ge:
with self.subTest(op=op), self.assertRaises(TypeError):
op(array, 1)
def test_full(self):
self.assertEqual(nutils.types.frozenarray.full([2,3], 1.5), nutils.types.frozenarray([[1.5]*3]*2, float))
def test_as_numpy_array(self):
a = numpy.array(nutils.types.frozenarray([1,2]))
self.assertIsInstance(a, numpy.ndarray)
class c_array(TestCase):
def test_idempotence(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
P = nutils.types.c_array[numpy.int64]
a_ct = P(a)
self.assertEqual(P(a_ct), a_ct)
def test_list(self):
a = [1,2,3]
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array_invalid_dtype(self):
a = numpy.array([1,2,3], dtype=numpy.int32)
with self.assertRaisesRegex(ValueError, '^Expected dtype .* but array has dtype .*\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_array_noncontinguous(self):
a = numpy.array([[1,2],[3,4]], dtype=numpy.int32).T
with self.assertRaisesRegex(ValueError, '^Array is not contiguous\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_wo_getitem(self):
with self.assertRaises(TypeError):
nutils.types.c_array()
class T_Immutable(nutils.types.Immutable):
def __init__(self, x, y, *, z):
pass
class T_Singleton(nutils.types.Singleton):
def __init__(self, x, y, *, z):
pass
@parametrize
class ImmutableFamily(TestCase):
def test_pickle(self):
T = {nutils.types.Immutable: T_Immutable, nutils.types.Singleton: T_Singleton}[self.cls]
a = T(1, 2, z=3)
b = pickle.loads(pickle.dumps(a))
self.assertEqual(a, b)
def test_eq(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(T(1, 2), T(1, 2))
self.assertNotEqual(T(1, 2), T(2, 1))
self.assertNotEqual(T(1, 2), U(1, 2))
def test_canonical_args(self):
class T(self.cls):
def __init__(self, x, y, z=3):
pass
self.assertEqual(T(x=1, y=2), T(1, 2, 3))
def test_keyword_args(self):
class T(self.cls):
def __init__(self, x, y, **kwargs):
pass
a = T(x=1, y=2, z=3)
b = T(1, 2, z=3)
self.assertEqual(a, b)
def test_preprocessors(self):
class T(self.cls):
@nutils.types.apply_annotations
def __init__(self, x: int):
pass
self.assertEqual(T(1), T('1'))
self.assertEqual(T(1), T(x='1'))
def test_nutils_hash(self):
class T(self.cls):
def __init__(self, x, y):
pass
class T1(self.cls, version=1):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(1, 2)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(2, 1)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(U(1, 2)).hex())
# Since the hash does not include base classes, the hashes of Immutable and Singleton are the same.
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), '8c3ba8f0d9eb054ab192f4e4e2ba7442564bdf85')
self.assertEqual(nutils.types.nutils_hash(T1(1, 2)).hex(), 'bab4ee65b5189f544a4242f0e386af76cfa6e31d')
@parametrize.enable_if(lambda cls: cls is nutils.types.Singleton)
def test_deduplication(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
a = T(1, 2)
b = T(1, 2)
c = T(2, 1)
d = U(1, 2)
self.assertIs(a, b)
self.assertEqual(a, b)
self.assertIsNot(a, c)
self.assertNotEqual(a, c)
self.assertIsNot(a, d)
self.assertNotEqual(a, d)
ImmutableFamily(cls=nutils.types.Immutable)
ImmutableFamily(cls=nutils.types.Singleton)
# vim:sw=2:sts=2:et
|
import importlib
import xarray as xr
import numpy as np
import pandas as pd
import sys
from CASutils import filter_utils as filt
from CASutils import readdata_utils as read
from CASutils import calendar_utils as cal
importlib.reload(filt)
importlib.reload(read)
importlib.reload(cal)
expname=['SASK_CLM5_CLM5F_01.001.FSCAM.sask_1979_2014',
'TOR_CLM5_CLM5F_01.001.FSCAM.tor_1979_2014',
'SID_SNOWD_SNOWDF_01.001.FSCAM.sidsnowd1']
outname='SCAM_CLM5_CLM5F_001'
cityname=['Saskatoon','Toronto','Siderovsk']
citylon=[253.330, 280.617, 82.3139]
citylat=[52.1579, 43.6532, 66.5973]
for icity in np.arange(0,3,1):
basedir="/project/cas02/islas/CLM5_CLM4/raw/SCAM_new_lowrelax/"
pathout="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/"
fpath=basedir+expname[icity]+"/atm/hist/h0concat.nc"
print(fpath)
dat = read.read_sfc_cesm(fpath,"1979-01-01T12:00:00","2014-12-31T12:00:00")
if (icity == 0):
trefht = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],
dims=['time','city'], name='trefht')
trefht[:,icity] = dat.TREFHT.isel(lon=0,lat=0)
trefht.to_netcdf(path=pathout+"TREFHT_"+outname+".nc")
|
#!/usr/bin/env python
"""
Import experiments into the database
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
"""
from wok.task import Task
from wok.element import DataElementList
from intogen.data.entity import types
from intogen.data.entity.server import EntityServer
from intogen.biomart import biomart_db_connect, DEFAULT_INSERT_SIZE, DEFAULT_DB_ENGINE
from intogen.sql import BatchInsert
from pubmed import Pubmed
task = Task()
@task.main()
def main():
task.check_conf(["entities", "repositories", "biomart.db"])
conf = task.conf
insert_size = conf.get("biomart.insert_size", DEFAULT_INSERT_SIZE, dtype=int)
if "biomart.study_source" in conf:
study_source_map = conf["biomart.study_source"]
else:
study_source_map = conf.create_element()
log = task.logger()
exp_port = task.ports("experiment")
es = EntityServer(conf["entities"])
em = es.manager()
conn = biomart_db_connect(conf["biomart.db"], log)
db_engine = conf.get("biomart.db.engine", DEFAULT_DB_ENGINE)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE ent_experiment (
id int(11) NOT NULL,
exp_name varchar(64) NOT NULL,
study_id varchar(32) NOT NULL,
study_source varchar(32) DEFAULT NULL,
study_source_url varchar(512) DEFAULT NULL,
study_link varchar(512) DEFAULT NULL,
pub_pubmed varchar(32) DEFAULT NULL,
pub_title varchar(300) DEFAULT NULL,
pub_authors varchar(300) DEFAULT NULL,
pub_year varchar(16) DEFAULT NULL,
pub_journal varchar(200) DEFAULT NULL,
platf_id varchar(32) NOT NULL,
platf_title varchar(250) DEFAULT NULL,
platf_technology varchar(96) DEFAULT NULL,
PRIMARY KEY (id),
KEY exp_name (exp_name),
KEY pub_pubmed (pub_pubmed),
KEY pub_title (pub_title),
KEY pub_authors (pub_authors),
KEY pub_year (pub_year),
KEY pub_journal (pub_journal),
KEY platf_title (platf_title),
KEY platf_technology (platf_technology)
) ENGINE={} CHARACTER SET utf8 COLLATE utf8_general_ci""".format(db_engine))
ib = BatchInsert(cursor, "ent_experiment",
["id", "exp_name", "study_id", "study_source", "study_source_url", "study_link",
"pub_title", "pub_authors", "pub_year", "pub_pubmed", "pub_journal",
"platf_id", "platf_title", "platf_technology"], insert_size)
pubmed = Pubmed()
for i, exp in enumerate(exp_port, 1):
study_id = exp[0]
platform_id = exp[1]
study = em.find(study_id, types.SOURCE_STUDY)
if study is None:
log.error("{} not found: {}".format(types.SOURCE_STUDY, study_id))
continue
platf = em.find(platform_id, types.SOURCE_PLATFORM)
if platf is None:
log.error("{} not found: {}".format(types.SOURCE_PLATFORM, platform_id))
continue
log.info("Experiment for study {} and platform {} ...".format(study_id, platform_id))
pub = {}
for k in ["title", "short_authors", "date", "journal"]:
pub[k] = None
if "pubmed" in study:
pmid = study["pubmed"]
if isinstance(pmid, (DataElementList, list)):
pmid = pmid[0]
log.warn("Study {} with many pubmed_id's, only the first {} will be considered".format(study_id, pmid))
log.debug("Retrieving information for pubmed_id '{}' ...".format(pmid))
try:
pub = pubmed.find(pmid)
if len(pub) == 0:
log.error("No publication information found for pubmed_id '{}' in experiment ({}, {})".format(pmid, study_id, platform_id))
else:
pub = pub[0]
except Exception as ex:
log.error("Error retrieving pubmed information for experiment ({}, {}) with pubmed_id '{}'".format(study_id, platform_id, pmid))
log.exception(ex)
else:
pmid = None
log.warn("Study {} has no 'pubmed_id' annotation".format(study_id))
if "title" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'title'".format(study_id))
elif "SO/contact_details[0]/contact_name" not in study \
and "SO/contact_details/contact_name" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'SO.contact_details[0].contact_name'".format(study_id))
else:
try:
pub["title"] = study["title"]
if "SO/contact_details[0]/contact_name" in study:
pub["short_authors"] = study["SO/contact_details[0]/contact_name"]
else:
pub["short_authors"] = study["SO/contact_details/contact_name"]
if "SO/submission/pub_date" in study:
pub["date"] = study["SO/submission/pub_date"]
else:
pub["date"] = ""
except Exception as ex:
log.debug(study)
log.execption(ex)
for k, v in pub.items():
if v is not None and isinstance(v, basestring):
pub[k] = v.replace("'", r"\'")
exp_name = "{}; {}".format(study_id, platform_id)
study_source = None
study_source_url = None
study_link = None
parts = study_id.split("-")
if len(parts) >= 2 and parts[0] in study_source_map:
ss = study_source_map[parts[0]]
study_source = ss.get("name")
study_source_url = ss.get("home_url")
try:
study_link = ss.get("link", "").format(parts[1])
except:
pass
ib.insert(i, exp_name, study_id, study_source, study_source_url, study_link,
pub["title"], pub["short_authors"], pub["date"], pmid, pub["journal"],
platform_id, platf["SO/platform_title"], "")
log.debug("{} experiments inserted".format(ib.count))
ib.close()
cursor.close()
conn.close()
em.close()
es.close()
task.start()
|
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with email """
email = "test@aqurds.com"
password = "aqurds123"
user = get_user_model().objects.create_user(email, password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_normalize(self):
"""Test if the email is normalized or not"""
email = "test@AQURDS.COM"
password = "aqurds123"
user = get_user_model().objects.create_user(email, password)
self.assertEqual(user.email, email.lower())
def test_email_validation_for_user(self):
"""Test will validate user email.
None is not allowed and will raise ValueError"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "aqurds123")
def test_create_super_user(self):
"""Test creating a new super user with email"""
email = "super_user@aqurds.com"
password = "super_user_123"
super_user = get_user_model().objects.create_superuser(email, password)
self.assertTrue(super_user.is_superuser)
self.assertTrue(super_user.is_staff)
|
# -*- coding: utf-8 -*-
# NOTES:
# - this file is all about the trust model for the HODL contracts. TRUST NO ONE. VALIDATE ALL.
from __future__ import annotations
import dataclasses
import decimal
import re
import time
import typing as th
import hddcoin.hodl
from clvm_tools.binutils import disassemble, int_to_bytes #type:ignore
from hddcoin.hodl import exc as exc
from hddcoin.hodl.ContractDetails import ContractDetails
from hddcoin.hodl.util import vlog, puzhash2addr
from hddcoin.types.blockchain_format.program import Program, SerializedProgram
from hddcoin.types.blockchain_format.sized_bytes import bytes32
from hddcoin.util.byte_types import hexstr_to_bytes
SECONDS_PER_MONTH = int(86400 * 365 / 12)
conPat = (
'\(a\ \(q\ 4\ \(c\ 44\ \(c\ 11\ \(\)\)\)\ \(c\ \(c\ 92\ \(c\ 23\ \(\)\)\)\ \(c\ \(c\ 52\ \('
'q\ 1\)\)\ \(a\ \(i\ \(=\ 5\ 32\)\ \(q\ 4\ \(c\ 36\ \(c\ 34\ \(c\ 50\ \(\)\)\)\)\ \(a\ \(i'
'\ \(>\ 11\ 38\)\ \(q\ 4\ \(c\ 90\ \(c\ 46\ \(c\ 38\ \(\)\)\)\)\ \(c\ \(c\ 90\ \(c\ 54\ \(c'
'\ \(\-\ 11\ 38\)\ \(\)\)\)\)\ \(\)\)\)\ \(q\ 4\ \(c\ 90\ \(c\ 46\ \(c\ 11\ \(\)\)\)\)\ \(\)'
'\)\)\ 1\)\)\ \(q\ 2\ \(i\ \(=\ 5\ 48\)\ \(q\ 2\ \(i\ \(any\ \(>\ \(/\ \(\*\ \(q\ \.\ 1000'
'\)\ 94\)\ 38\)\ \(q\ \.\ 350\)\)\ \(>\ \(q\ \.\ 0x00e8d4a51000\)\ 38\)\ \(>\ 38\ \(q\ \.\ 0'
'x0d8d726b7177a80000\)\)\)\ \(q\ 8\)\ \(q\ 4\ \(c\ 44\ \(c\ 38\ \(\)\)\)\ \(c\ \(c\ 90\ \(c'
'\ 23\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\)\ \(c\ \(c\ 122\ \(c\ 50\ \(\)\)\)\ \(\)\)\)\)\)\ 1\)'
'\ \(q\ 2\ \(i\ \(=\ 5\ 56\)\ \(q\ 4\ \(c\ 44\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\ \(c\ \(c\ 124'
'\ \(c\ 126\ \(\)\)\)\ \(c\ \(c\ 90\ \(c\ 46\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\)\ \(\)\)\)\)\ '
'\(q\ 2\ \(i\ \(=\ 5\ 40\)\ \(q\ 8\ 42\ 50\ 38\ 94\ 126\ 46\)\ \(q\ 8\)\)\ 1\)\)\ 1\)\)\ 1'
'\)\)\ 1\)\)\)\)\ \(c\ \(q\ \(\(\(q\ \.\ 2\)\ 4\ \.\ 3\)\ \(50\ \.\ 82\)\ 73\ 72\ \.\ 81\)\ '
'\(\((?P<v7>.*)\ \.\ (?P<v5>.*)\)\ (?P<v6>.*)\ 51\ \.\ 62\)\ \((?P<v1>.*)\ \.\ (?P<v8>.*)\)'
'\ (?P<v2>.*)\ (?P<v4>.*)\ \.\ (?P<v3>.*)\)\ 1\)\)'
)
@dataclasses.dataclass
class BakedInTerms:
deposit_bytes: int
payout_puzhash: str
payout_tstamp: int
reward_bytes: int
contract_id: str
program_name: str
client_pubkey: str
def _cmpRct(tok: str, expected: th.Any, received: th.Any) -> None:
if expected != received:
raise exc.ContractValidationError(f"Unexpected receipt value for {tok}: {received}")
def _cmpCon(tok: str, expected: th.Any, received: th.Any) -> None:
if expected != received:
raise exc.ContractValidationError(
f"Unexpected contract value for {tok}. Expected: {expected}; Received: {received}")
def _atomReprAsInt(s: str) -> int:
"""Translate CLVM atom repr to int."""
if s.startswith("0x"):
return int(s, base=16)
elif s.startswith('"'):
return int.from_bytes(s[1:-1].encode("ascii"), "big")
return int(s)
def _atomReprAsStr(s: str) -> str:
"""Translate CLVM atom repr to str."""
if s.startswith("0x"):
return bytes.fromhex(s[2:]).decode("ascii")
elif s.startswith('"'):
return s[1:-1]
return int_to_bytes(int(s)).decode("ascii")
def _atomReprAsHex(s: str) -> str:
"""Translate CLVM integer atom repr to a 0x-prefixed hex string."""
if s.startswith("0x"):
return s
elif s.startswith('"'):
return "0x" + s[1:-1].encode("ascii").hex()
return hex(int(s))
def _extractBakedInTerms(reveal: str) -> BakedInTerms:
try:
m = th.cast(re.Match, re.search(conPat,
disassemble(Program.from_bytes(hexstr_to_bytes(reveal)))))
yum = BakedInTerms(
deposit_bytes = _atomReprAsInt(m.group("v1")),
payout_puzhash = _atomReprAsHex(m.group("v2")),
payout_tstamp = _atomReprAsInt(m.group("v3")),
reward_bytes = _atomReprAsInt(m.group("v4")),
contract_id = _atomReprAsHex(m.group("v5")),
program_name = _atomReprAsStr(m.group("v6")),
client_pubkey = _atomReprAsHex(m.group("v7")),
)
except Exception:
raise exc.ContractValidationError("Contract reveal is not valid.")
return yum
def _validatePuzzleHash(addr: str, reveal: str) -> bytes32:
sp: SerializedProgram = SerializedProgram.fromhex(reveal)
ph = hddcoin.hodl.util.addr2puzhash(addr)
ph_b32 = sp.get_tree_hash()
if ph != ph_b32.hex():
raise exc.ContractValidationError(f"Reveal does not match address")
return ph_b32
def validateContract(# Given to server...
ex_program_name: str,
ex_deposit_bytes: int,
ex_payout_address: str,
ex_client_pubkey: str,
# Expected from server based on program details we had...
ex_term_in_months: decimal.Decimal,
ex_reward_percent: decimal.Decimal,
receipt: th.Dict[str, th.Any],
) -> None: # raises exc.ContractValidationError on issues
"""Make sure that the receipt, and instructions therein, are what we expect.
Raises exc.ContractValidationError if any issues are found.
"""
# The overall trust model here is: TRUST NO ONE. THESE ARE MY PRECIOUS HDDs!!
#
# In the comments below, there are two parties:
#
# 1. The "client" --> This hddcoin application (i.e. this code) or the person running it
# 2. The "server" --> The HODL server that has been contacted to provide contract terms,
# which include a specific contract/puzzle to send an amount to.
#
# Although the HDDcoin team are certainly a trustable bunch and can be expected to provide the
# correct/expected contract terms to the client to follow, if the client is concerned about
# overall security and precious HDD funds (which the client obviously should be!!), the client
# should ABSOLUTELY ASSUME THAT THE SERVER IS NOT TRUSTABLE, UNTIL VERIFIED. More specifically,
# the client should assume that whoever/whatever provided the client the contract terms to
# follow could definitely have been compromised by EVIL HACKERS AFTER THE CLIENT'S PRECIOUS HDD.
#
# Nasty scenarios we should be concerned about include (with overlapping concerns):
#
# 1. the HODL API server could have been hacked
# 2. there could be a man-in-the-middle attack happening, making data untrustworthy
# 3. the contract terms provided could have been falsified in some/any way
# 4. the on-chain contract (smart coin via puzzlehash/reveal) could be bogus
# 5. sneaky hacker farmers could mess with how pushed coins/puzzles are processed on-chain
# 6. and more!
#
# With these concerns in mind, the client needs to be sure that everything is secure before
# committing funds on-chain. The smart contract itself provides excellent on-chain security to
# make sure that no adverse shenanigans can happen once funds are on chain. The purpose in this
# `validateContract` function is to make sure that there are no other surprises in store (as
# listed above).
#
# As stated in the docstring: This function makes sure that sure that the provided contract is
# what the client expects.
#
# What the HODL contract is all about is providing a secure conditional lockbox where:
#
# A) the client can stash a deposit into the box that ONLY THE CLIENT CAN EVER ACCESS
# B) a secure way is provided for the server (i.e. the HDDcoin team) to add the guaranteed
# reward to the lockbox for later payout (at end of contract)
# - IMPORTANT NOTE: the server can never access the deposit in any way whatsoever
# - the HDDcoin team gets reward funds from a HODL reserve in the pre-farm funds
# C) if the client meets the contract terms (i.e. the HODL deposit sits in the box for the
# length of the term), both the deposit and the reward pay out to the client's wallet
# D) if the client decides to cancel the contract, the deposit is returned to the client, and
# the guaranteed reward is returned to the HDDcoin HODL reserve
# - ONLY THE CLIENT CAN EVER CANCEL THE CONTRACT. NOBODY ELSE.
# - once the reward is added, it is GUARANTEED for the client (unless canceled). Sweet!
# E) there are other various bits involved... but they mostly revolve around ensuring that
# the mechanics of the contract are secure against nefarious hackers... I see you there
# reading this... SHOO!! Go away!! ¬_¬
#
# All of those listed things are *if all is as expected*. Again, this is what this validation
# function is about. Even if the server is compromised (which it should not be, but... TRUST
# NOBODY!), the client's HDD must NEVER be placed at risk here. This is fundamental to the HODL
# program, and is supported through all supporting client code, server code, and on-chain code.
vlog(1, "Extracting receipt fields for validation")
try:
rx_program_name = receipt["requested"]["program_name"]
rx_deposit_bytes = receipt["requested"]["deposit_bytes"]
rx_payout_address = receipt["requested"]["payout_address"]
rx_client_pubkey = receipt["requested"]["client_pubkey"]
rx_contract_id = receipt["receipt_info"]["contract_id"]
rx_contract_address = receipt["coin_details"]["contract_address"]
rx_reveal = receipt["coin_details"]["reveal"]
rx_solCancelDep = receipt["coin_details"]["solution_cancel_deposited"]
rx_solCancelGuar = receipt["coin_details"]["solution_cancel_guaranteed"]
rx_solPayout = receipt["coin_details"]["solution_payout"]
except KeyError as e:
raise exc.ContractValidationError(f"Missing receipt key: {e.args[0]}")
# Check the receipt fields (which don't matter that much, but still...)
vlog(1, "Validating requested vs received")
_cmpRct("program_name", ex_program_name, rx_program_name)
_cmpRct("deposit_bytes", ex_deposit_bytes, rx_deposit_bytes)
_cmpRct("payout_address", ex_payout_address, rx_payout_address)
_cmpRct("client_pubkey", ex_client_pubkey, rx_client_pubkey)
# Contract address and reveal must match...
vlog(1, "Validating puzzle hash")
ph_b32 = _validatePuzzleHash(rx_contract_address, rx_reveal)
# Reveal must be the contract we expect...
vlog(1, "Validating puzzle reveal")
ex_payout_ph = f"0x{hddcoin.hodl.util.addr2puzhash(ex_payout_address)}"
ex_reward_bytes = int(ex_deposit_bytes * (ex_reward_percent / 100))
epoch_s = int(time.time())
ex_payout_tstamp = int(epoch_s + (ex_term_in_months * SECONDS_PER_MONTH))
try:
terms = _extractBakedInTerms(rx_reveal)
_cmpCon("deposit_bytes", ex_deposit_bytes, terms.deposit_bytes)
_cmpCon("payout_address", ex_payout_ph, terms.payout_puzhash)
_cmpCon("reward_bytes", ex_reward_bytes, terms.reward_bytes)
_cmpCon("contract_id", f"0x{rx_contract_id}", terms.contract_id)
_cmpCon("program_name", ex_program_name, terms.program_name)
_cmpCon("client_pubkey", f"0x{ex_client_pubkey}", terms.client_pubkey)
except Exception as e:
raise exc.ContractValidationError(f"Error validating contract terms: {e!r}")
if abs(ex_payout_tstamp - terms.payout_tstamp) > 3600: # 1h good enough for validation
msg = f"Unexpected contract value for payout_timestamp: {terms.payout_tstamp}"
raise exc.ContractValidationError(msg)
# Solutions must match...
vlog(1, "Validating solutions")
ex_solCancelDep = str(Program.to([1, ex_deposit_bytes, ph_b32]))
ex_solCancelGuar = str(Program.to([1, ex_deposit_bytes + ex_reward_bytes, ph_b32]))
ex_solPayout = str(Program.to([3, ex_deposit_bytes + ex_reward_bytes, ph_b32]))
_cmpRct("solution_cancel_deposited", ex_solCancelDep, rx_solCancelDep)
_cmpRct("solution_cancel_guaranteed", ex_solCancelGuar, rx_solCancelGuar)
_cmpRct("solution_payout", ex_solPayout, rx_solPayout)
# ALL IS WELL IF WE GOT HERE!
vlog(1, "Contract provided by server is as expected!")
def validateCancellation(ex_contract_id: str,
contractDetails: ContractDetails,
) -> None:
"""Makes sure that the contract details fetched from the HODL server by the cancel request are a
match to what the user expects."""
# This is essentially just cross-checking the contract dict details with what is actually in the
# reveal. We don't need to validate the cancellation solutions since we don't use/need them.
# Those are only for users who want to do it on their own without HODL tooling.
rx_contract_id = contractDetails.contract_id
rx_contract_address = contractDetails.contract_address
rx_reveal = contractDetails.puzzle_reveal
if rx_contract_id != ex_contract_id:
raise exc.CancelValidationError("contract_id mismatch")
vlog(1, "Validating puzzle hash")
_validatePuzzleHash(rx_contract_address, rx_reveal)
vlog(1, "Validating puzzle reveal")
# Not much to validate here. If it is the right contract form, it can only be a HODL contract.
# Even still, to be ABSOLUTELY sure, we'll validate that the baked-in terms match the contract
# details displayed to the user.
terms = _extractBakedInTerms(rx_reveal)
_cmpCon("deposit_bytes", contractDetails.deposit_bytes, terms.deposit_bytes)
_cmpCon("payout_address", contractDetails.payout_address, puzhash2addr(terms.payout_puzhash))
_cmpCon("reward_bytes", contractDetails.reward_bytes, terms.reward_bytes)
_cmpCon("contract_id", f"0x{contractDetails.contract_id}", terms.contract_id)
_cmpCon("program_name", f"{contractDetails.program_name}", terms.program_name)
_cmpCon("client_pubkey", f"0x{contractDetails.client_pubkey}", terms.client_pubkey)
|
from django.apps import AppConfig
class DjangoFiltersMergerConfig(AppConfig):
name = 'django_filtersmerger'
|
from django.apps import AppConfig
class KullisharifappConfig(AppConfig):
name = 'KulliSharifapp'
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2021 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
"""
API for OGC Filter Encoding (FE) constructs and metadata.
Filter Encoding: http://www.opengeospatial.org/standards/filter
Supports version 2.0.2 (09-026r2).
"""
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["dif", "fes", "gml", "ogc", "ows110", "xs", "xsi"])
ns[None] = n.get_namespace("fes")
return ns
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/filter/2.0/filterAll.xsd'
schema_location = '%s %s' % (namespaces['fes'], schema)
class FilterRequest(object):
""" filter class """
def __init__(self, parent=None, version='2.0.0'):
"""
filter Constructor
Parameters
----------
- parent: parent etree.Element object (default is None)
- version: version (default is '2.0.0')
"""
self.version = version
self._root = etree.Element(util.nspath_eval('fes:Filter', namespaces))
if parent is not None:
self._root.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
def set(self, parent=False, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None,
identifier=None):
"""
Construct and process a GetRecords request
Parameters
----------
- parent: the parent Element object. If this is not, then generate a standalone request
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- propertyname: the ValueReference to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- identifier: the dc:identifier to query against with a PropertyIsEqualTo. Ignores all other inputs.
"""
# Set the identifier if passed. Ignore other parameters
dc_identifier_equals_filter = None
if identifier is not None:
dc_identifier_equals_filter = PropertyIsEqualTo('dc:identifier', identifier)
self._root.append(dc_identifier_equals_filter.toXML())
return self._root
# Set the query type if passed
dc_type_equals_filter = None
if qtype is not None:
dc_type_equals_filter = PropertyIsEqualTo('dc:type', qtype)
# Set a bbox query if passed
bbox_filter = None
if bbox is not None:
bbox_filter = BBox(bbox)
# Set a keyword query if passed
keyword_filter = None
if len(keywords) > 0:
if len(keywords) > 1: # loop multiple keywords into an Or
ks = []
for i in keywords:
ks.append(PropertyIsLike(propertyname, "*%s*" % i, wildCard="*"))
keyword_filter = Or(operations=ks)
elif len(keywords) == 1: # one keyword
keyword_filter = PropertyIsLike(propertyname, "*%s*" % keywords[0], wildCard="*")
# And together filters if more than one exists
filters = [_f for _f in [keyword_filter, bbox_filter, dc_type_equals_filter] if _f]
if len(filters) == 1:
self._root.append(filters[0].toXML())
elif len(filters) > 1:
self._root.append(And(operations=filters).toXML())
return self._root
def setConstraint(self, constraint, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraint: An OgcExpression object
- tostring (optional): return as string
"""
self._root.append(constraint.toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
def setConstraintList(self, constraints, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: A list of OgcExpression objects
The list is interpretted like so:
[a,b,c]
a || b || c
[[a,b,c]]
a && b && c
[[a,b],[c],[d],[e]] or [[a,b],c,d,e]
(a && b) || c || d || e
- tostring (optional): return as string
"""
ors = []
if len(constraints) == 1:
if isinstance(constraints[0], OgcExpression):
flt = self.setConstraint(constraints[0])
else:
self._root.append(And(operations=constraints[0]).toXML())
flt = self._root
if tostring:
return util.element_to_string(flt, xml_declaration=False)
else:
return flt
for c in constraints:
if isinstance(c, OgcExpression):
ors.append(c)
elif isinstance(c, list) or isinstance(c, tuple):
if len(c) == 1:
ors.append(c[0])
elif len(c) >= 2:
ands = []
for sub in c:
if isinstance(sub, OgcExpression):
ands.append(sub)
ors.append(And(operations=ands))
self._root.append(Or(operations=ors).toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
class FilterCapabilities(object):
"""Abstraction for Filter_Capabilities 2.0"""
def __init__(self, elem):
if elem is None:
self.spatial_operands = []
self.spatial_operators = []
self.temporal_operators = []
self.temporal_operands = []
self.scalar_comparison_operators = []
self.conformance = {}
return
# Spatial_Capabilities
self.spatial_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:GeometryOperands/fes:GeometryOperand', namespaces))]
self.spatial_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:SpatialOperators/fes:SpatialOperator', namespaces)):
self.spatial_operators.append(f.attrib['name'])
# Temporal_Capabilities
self.temporal_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperands/fes:TemporalOperand', namespaces))]
self.temporal_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperators/fes:TemporalOperator', namespaces)):
self.temporal_operators.append(f.attrib['name'])
# Scalar_Capabilities
self.scalar_comparison_operators = [f.text for f in elem.findall(util.nspath_eval(
'fes:Scalar_Capabilities/fes:ComparisonOperators/fes:ComparisonOperator', namespaces))]
# Conformance
self.conformance = {}
for f in elem.findall(util.nspath_eval('fes:Conformance/fes:Constraint', namespaces)):
self.conformance[f.attrib.get('name')] = f.find(util.nspath_eval('ows110:DefaultValue', namespaces)).text
def setsortby(parent, propertyname, order='ASC'):
"""
constructs a SortBy element
Parameters
----------
- parent: parent etree.Element object
- propertyname: the ValueReference
- order: the SortOrder (default is 'ASC')
"""
tmp = etree.SubElement(parent, util.nspath_eval('fes:SortBy', namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('fes:SortProperty', namespaces))
etree.SubElement(tmp2, util.nspath_eval('fes:ValueReference', namespaces)).text = propertyname
etree.SubElement(tmp2, util.nspath_eval('fes:SortOrder', namespaces)).text = order
class SortProperty(object):
def __init__(self, propertyname, order='ASC'):
self.propertyname = propertyname
self.order = order.upper()
if self.order not in ['DESC', 'ASC']:
raise ValueError("SortOrder can only be 'ASC' or 'DESC'")
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortProperty", namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:SortOrder', namespaces)).text = self.order
return node0
class SortBy(object):
def __init__(self, properties):
self.properties = properties
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortBy", namespaces))
for prop in self.properties:
node0.append(prop.toXML())
return node0
class OgcExpression(object):
def __init__(self):
pass
class BinaryComparisonOpType(OgcExpression):
""" Super class of all the property operation classes"""
def __init__(self, propertyoperator, propertyname, literal, matchcase=True):
self.propertyoperator = propertyoperator
self.propertyname = propertyname
self.literal = literal
self.matchcase = matchcase
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.propertyoperator, namespaces))
if not self.matchcase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsEqualTo(BinaryComparisonOpType):
""" PropertyIsEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsEqualTo', propertyname, literal, matchcase)
class PropertyIsNotEqualTo(BinaryComparisonOpType):
""" PropertyIsNotEqualTo class """
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsNotEqualTo', propertyname, literal, matchcase)
class PropertyIsLessThan(BinaryComparisonOpType):
"""PropertyIsLessThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThan', propertyname, literal, matchcase)
class PropertyIsGreaterThan(BinaryComparisonOpType):
"""PropertyIsGreaterThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThan', propertyname, literal, matchcase)
class PropertyIsLessThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsLessThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsGreaterThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsGreaterThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsLike(OgcExpression):
"""PropertyIsLike class"""
def __init__(self, propertyname, literal, escapeChar='\\', singleChar='_', wildCard='%', matchCase=True):
self.propertyname = propertyname
self.literal = literal
self.escapeChar = escapeChar
self.singleChar = singleChar
self.wildCard = wildCard
self.matchCase = matchCase
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsLike', namespaces))
node0.set('wildCard', self.wildCard)
node0.set('singleChar', self.singleChar)
node0.set('escapeChar', self.escapeChar)
if not self.matchCase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsNull(OgcExpression):
"""PropertyIsNull class"""
def __init__(self, propertyname):
self.propertyname = propertyname
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsNull', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
return node0
class PropertyIsBetween(OgcExpression):
"""PropertyIsBetween class"""
def __init__(self, propertyname, lower, upper):
self.propertyname = propertyname
self.lower = lower
self.upper = upper
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsBetween', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
node1 = etree.SubElement(node0, util.nspath_eval('fes:LowerBoundary', namespaces))
etree.SubElement(node1, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.lower
node2 = etree.SubElement(node0, util.nspath_eval('fes:UpperBoundary', namespaces))
etree.SubElement(node2, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.upper
return node0
class BBox(OgcExpression):
"""Construct a BBox, two pairs of coordinates (west-south and east-north)"""
def __init__(self, bbox, crs=None):
self.bbox = bbox
self.crs = crs
def toXML(self):
tmp = etree.Element(util.nspath_eval('fes:BBOX', namespaces))
etree.SubElement(tmp, util.nspath_eval('fes:ValueReference', namespaces)).text = 'ows:BoundingBox'
tmp2 = etree.SubElement(tmp, util.nspath_eval('gml:Envelope', namespaces))
if self.crs is not None:
tmp2.set('srsName', self.crs)
etree.SubElement(tmp2, util.nspath_eval('gml:lowerCorner', namespaces)).text = '{} {}'.format(
self.bbox[0], self.bbox[1])
etree.SubElement(tmp2, util.nspath_eval('gml:upperCorner', namespaces)).text = '{} {}'.format(
self.bbox[2], self.bbox[3])
return tmp
# BINARY
class BinaryLogicOpType(OgcExpression):
""" Binary Operators: And / Or """
def __init__(self, binary_operator, operations):
self.binary_operator = binary_operator
try:
assert len(operations) >= 2
self.operations = operations
except Exception:
raise ValueError("Binary operations (And / Or) require a minimum of two operations to operate against")
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.binary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class And(BinaryLogicOpType):
def __init__(self, operations):
super(And, self).__init__('fes:And', operations)
class Or(BinaryLogicOpType):
def __init__(self, operations):
super(Or, self).__init__('fes:Or', operations)
# UNARY
class UnaryLogicOpType(OgcExpression):
""" Unary Operator: Not """
def __init__(self, unary_operator, operations):
self.unary_operator = unary_operator
self.operations = operations
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.unary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class Not(UnaryLogicOpType):
def __init__(self, operations):
super(Not, self).__init__('fes:Not', operations)
|
# Generated by Django 3.2 on 2021-05-05 06:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'verbose_name': '用户管理', 'verbose_name_plural': '用户管理'},
),
]
|
import unittest
import random
import math
import io
import struct
from mitmproxy.io import tnetstring
MAXINT = 2 ** (struct.Struct('i').size * 8 - 1) - 1
FORMAT_EXAMPLES = {
b'0:}': {},
b'0:]': [],
b'51:5:hello,39:11:12345678901#4:this,4:true!0:~4:\x00\x00\x00\x00,]}':
{b'hello': [12345678901, b'this', True, None, b'\x00\x00\x00\x00']},
b'5:12345#': 12345,
b'12:this is cool,': b'this is cool',
b'19:this is unicode \xe2\x98\x85;': u'this is unicode \u2605',
b'0:,': b'',
b'0:;': u'',
b'0:~': None,
b'4:true!': True,
b'5:false!': False,
b'10:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00,': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'24:5:12345#5:67890#5:xxxxx,]': [12345, 67890, b'xxxxx'],
b'18:3:0.1^3:0.2^3:0.3^]': [0.1, 0.2, 0.3],
b'243:238:233:228:223:218:213:208:203:198:193:188:183:178:173:168:163:158:153:148:143:138:133:128:123:118:113:108:103:99:95:91:87:83:79:75:71:67:63:59:55:51:47:43:39:35:31:27:23:19:15:11:hello-there,]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]': [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[b'hello-there']]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] # noqa
}
def get_random_object(random=random, depth=0):
"""Generate a random serializable object."""
# The probability of generating a scalar value increases as the depth increase.
# This ensures that we bottom out eventually.
if random.randint(depth, 10) <= 4:
what = random.randint(0, 1)
if what == 0:
n = random.randint(0, 10)
l = []
for _ in range(n):
l.append(get_random_object(random, depth + 1))
return l
if what == 1:
n = random.randint(0, 10)
d = {}
for _ in range(n):
n = random.randint(0, 100)
k = str([random.randint(32, 126) for _ in range(n)])
d[k] = get_random_object(random, depth + 1)
return d
else:
what = random.randint(0, 4)
if what == 0:
return None
if what == 1:
return True
if what == 2:
return False
if what == 3:
if random.randint(0, 1) == 0:
return random.randint(0, MAXINT)
else:
return -1 * random.randint(0, MAXINT)
n = random.randint(0, 100)
return bytes([random.randint(32, 126) for _ in range(n)])
class Test_Format(unittest.TestCase):
def test_roundtrip_format_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
self.assertEqual(expect, tnetstring.loads(data))
self.assertEqual(
expect, tnetstring.loads(tnetstring.dumps(expect)))
self.assertEqual((expect, b''), tnetstring.pop(data))
def test_roundtrip_format_random(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b""), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_format_unicode(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b''), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_big_integer(self):
i1 = math.factorial(30000)
s = tnetstring.dumps(i1)
i2 = tnetstring.loads(s)
self.assertEqual(i1, i2)
class Test_FileLoading(unittest.TestCase):
def test_roundtrip_file_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
s = io.BytesIO()
s.write(data)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
s = io.BytesIO()
tnetstring.dump(expect, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_roundtrip_file_random(self):
for _ in range(500):
v = get_random_object()
s = io.BytesIO()
tnetstring.dump(v, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(v, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_error_on_absurd_lengths(self):
s = io.BytesIO()
s.write(b'1000000000:pwned!,')
s.seek(0)
with self.assertRaises(ValueError):
tnetstring.load(s)
self.assertEqual(s.read(1), b':')
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(Test_Format))
suite.addTest(loader.loadTestsFromTestCase(Test_FileLoading))
return suite
|
import os
import sys
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(base_path)
from torch import optim
from metallic.data.benchmarks import get_benchmarks
from metallic.data.dataloader import MetaDataLoader
from metallic.models import OmniglotCNN
from metallic.metalearners import FOMAML, MAML, Reptile, MinibatchProx, ANIL
from metallic.trainer import Trainer
from metallic.utils import Logger
# ---- hyperparameters ----
ALGO = 'maml'
BATCH_SIZE = 16
N_WAY = 5
K_SHOT = 1
OUTER_LR = 0.001
INNER_LR = 0.4
INNER_STEPS = 1
N_EPOCHES = 100
N_ITERS_PER_EPOCH = 500
N_ITERS_TEST = 600
N_WORKERS = 5
# -------------------------
ALGO_LIST = {
'maml': MAML,
'fomaml': FOMAML,
'reptile': Reptile,
'minibatchprox': MinibatchProx,
'anil': ANIL
}
def set_trainer():
train_dataset, val_dataset, _ = get_benchmarks(
name = 'omniglot',
root = os.path.join(base_path, 'data'),
n_way = N_WAY,
k_shot = K_SHOT,
)
train_loader = MetaDataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)
val_loader = MetaDataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)
model = OmniglotCNN(N_WAY)
if ALGO == 'anil':
in_optim = optim.SGD(model.classifier.parameters(), lr=INNER_LR)
else:
in_optim = optim.SGD(model.parameters(), lr=INNER_LR)
out_optim = optim.Adam(model.parameters(), lr=OUTER_LR)
metalearner = ALGO_LIST[ALGO](
model = model,
in_optim = in_optim,
out_optim = out_optim,
root = os.path.join(base_path, 'checkpoints'),
inner_steps = INNER_STEPS
)
logger = Logger(
root = os.path.join(base_path, 'logs'),
n_iters_per_epoch = N_ITERS_PER_EPOCH,
log_basename = metalearner.alg_name,
verbose = True
)
trainer = Trainer(
metalearner = metalearner,
train_loader = train_loader,
val_loader = val_loader,
n_epoches = N_EPOCHES,
n_iters_per_epoch = N_ITERS_PER_EPOCH,
n_iters_test = N_ITERS_TEST,
logger = logger
)
return trainer
if __name__ == '__main__':
trainer = set_trainer()
trainer.run_train()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0048_auto_20150916_0441'),
]
operations = [
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(max_length=1, default='U', choices=[('U', 'Prefer not to say (undisclosed)'), ('M', 'Male'), ('F', 'Female'), ('O', 'Other')]),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='airport_iata',
field=models.CharField(help_text='Please use its 3-letter IATA code (<a href="http://www.airportcodes.aero/" target="_blank">http://www.airportcodes.aero/</a>) to tell us where you\'re located.', max_length=3, verbose_name='Nearest major airport'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='email',
field=models.EmailField(max_length=254, verbose_name='Email address'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='gender',
field=models.CharField(max_length=1, default='U', choices=[('U', 'Prefer not to say'), ('F', 'Female'), ('M', 'Male'), ('O', 'Other (enter below)')]),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='lessons',
field=models.ManyToManyField(help_text='Please mark ALL that apply.', to='workshops.Lesson', verbose_name="Topic and lessons you're comfortable teaching"),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='occupation',
field=models.CharField(blank=True, help_text='Please choose the one that best describes you.', choices=[('undisclosed', 'Prefer not to say'), ('undergrad', 'Undergraduate student'), ('grad', 'Graduate student'), ('postdoc', 'Post-doctoral researcher'), ('faculty', 'Faculty'), ('research', 'Research staff (including research programmer)'), ('support', 'Support staff (including technical support)'), ('librarian', 'Librarian/archivist'), ('commerce', 'Commercial software developer '), ('', 'Other (enter below)')], max_length=40, default='undisclosed', verbose_name='What is your current occupation/career stage?'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='twitter',
field=models.CharField(blank=True, max_length=100, default='', verbose_name='Twitter username'),
),
]
|
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for path.bzl"""
load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts", "unittest")
load("//:mappings.bzl", "pkg_mkdirs")
load("//:path.bzl", "compute_data_path")
##########
# Test compute_data_path
##########
def _compute_data_path_test_impl(ctx):
env = analysistest.begin(ctx)
target_under_test = analysistest.target_under_test(env)
# Subtle: This allows you to vendor the library into your own repo at some
# arbitrary path.
expect = ctx.attr.expected_path
if expect.startswith('tests'):
expect = ctx.label.package + expect[5:]
asserts.equals(
env,
expect,
compute_data_path(ctx, ctx.attr.in_path),
)
return analysistest.end(env)
compute_data_path_test = analysistest.make(
_compute_data_path_test_impl,
attrs = {
"in_path": attr.string(mandatory = True),
"expected_path": attr.string(mandatory = True),
},
)
def _test_compute_data_path(name):
pkg_mkdirs(
name = "dummy",
dirs = [],
tags = ["manual"],
)
compute_data_path_test(
name = name + "_normal_test",
target_under_test = ":dummy",
in_path = "a/b/c",
expected_path = "tests/a/b/c",
)
compute_data_path_test(
name = name + "_absolute_test",
target_under_test = ":dummy",
in_path = "/a/b/c",
expected_path = "a/b/c",
)
compute_data_path_test(
name = name + "_relative_test",
target_under_test = ":dummy",
in_path = "./a/b/c",
expected_path = "tests/a/b/c",
)
compute_data_path_test(
name = name + "_empty_test",
target_under_test = ":dummy",
in_path = "./",
expected_path = "tests",
)
compute_data_path_test(
name = name + "_empty2_test",
target_under_test = ":dummy",
in_path = "./.",
expected_path = "tests",
)
def path_tests(name):
"""Declare path.bzl analysis tests."""
_test_compute_data_path(name=name + "_compute_data_path")
|
from __future__ import print_function, absolute_import
import unittest
from conda.version import ver_eval, VersionSpec, VersionOrder, normalized_version
class TestVersionSpec(unittest.TestCase):
def test_version_order(self):
versions = [
(VersionOrder("0.4"), [[0], [0], [4]]),
(VersionOrder("0.4.0"), [[0], [0], [4], [0]]),
(VersionOrder("0.4.1a.vc11"),[[0], [0], [4], [1, 'a'],[0, 'vc', 11]]),
(VersionOrder("0.4.1.rc"), [[0], [0], [4], [1], [0, 'rc']]),
(VersionOrder("0.4.1.vc11"), [[0], [0], [4], [1],[0, 'vc', 11]]),
(VersionOrder("0.4.1"), [[0], [0], [4], [1]]),
(VersionOrder("0.5*"), [[0], [0], [5, '*']]),
(VersionOrder("0.5a1"), [[0], [0], [5, 'a', 1]]),
(VersionOrder("0.5b3"), [[0], [0], [5, 'b', 3]]),
(VersionOrder("0.5C1"), [[0], [0], [5, 'c', 1]]),
(VersionOrder("0.5z"), [[0], [0], [5, 'z']]),
(VersionOrder("0.5za"), [[0], [0], [5, 'za']]),
(VersionOrder("0.5"), [[0], [0], [5]]),
(VersionOrder("0.9.6"), [[0], [0], [9], [6]]),
(VersionOrder("0.960923"), [[0], [0], [960923]]),
(VersionOrder("1.0"), [[0], [1], [0]]),
(VersionOrder("1.0.4a3"), [[0], [1], [0], [4, 'a', 3]]),
(VersionOrder("1.0.4b1"), [[0], [1], [0], [4, 'b', 1]]),
(VersionOrder("1.0.4"), [[0], [1], [0], [4]]),
(VersionOrder("1.1dev1"), [[0], [1], [1, 'DEV', 1]]),
(VersionOrder("1.1a1"), [[0], [1], [1, 'a', 1]]),
(VersionOrder("1.1.dev1"), [[0], [1], [1], [0, 'DEV', 1]]),
(VersionOrder("1.1.a1"), [[0], [1], [1], [0, 'a', 1]]),
(VersionOrder("1.1"), [[0], [1], [1]]),
(VersionOrder("1.1.post1"), [[0], [1], [1], [0, float('inf'), 1]]),
(VersionOrder("1.1.1dev1"), [[0], [1], [1], [1, 'DEV', 1]]),
(VersionOrder("1.1.1rc1"), [[0], [1], [1], [1, 'rc', 1]]),
(VersionOrder("1.1.1"), [[0], [1], [1], [1]]),
(VersionOrder("1.1.1post1"), [[0], [1], [1], [1, float('inf'), 1]]),
(VersionOrder("1.1post1"), [[0], [1], [1, float('inf'), 1]]),
(VersionOrder("2g6"), [[0], [2, 'g', 6]]),
(VersionOrder("2.0b1pr0"), [[0], [2], [0, 'b', 1, 'pr', 0]]),
(VersionOrder("2.2be.ta29"), [[0], [2], [2, 'be'], [0, 'ta', 29]]),
(VersionOrder("2.2be5ta29"), [[0], [2], [2, 'be', 5, 'ta', 29]]),
(VersionOrder("2.2beta29"), [[0], [2], [2, 'beta', 29]]),
(VersionOrder("2.2.0.1"), [[0], [2], [2],[0],[1]]),
(VersionOrder("3.1.1.6"), [[0], [3], [1], [1], [6]]),
(VersionOrder("3.2.p.r0"), [[0], [3], [2], [0, 'p'], [0, 'r', 0]]),
(VersionOrder("3.2.pr0"), [[0], [3], [2], [0, 'pr', 0]]),
(VersionOrder("3.2.pr.1"), [[0], [3], [2], [0, 'pr'], [1]]),
(VersionOrder("5.5.kw"), [[0], [5], [5], [0, 'kw']]),
(VersionOrder("11g"), [[0], [11, 'g']]),
(VersionOrder("14.3.1"), [[0], [14], [3], [1]]),
(VersionOrder("14.3.1.post26.g9d75ca2"),
[[0],[14],[3],[1],[0,float('inf'),26],[0,'g',9,'d',75,'ca',2]]),
(VersionOrder("1996.07.12"), [[0], [1996], [7], [12]]),
(VersionOrder("1!0.4.1"), [[1], [0], [4], [1]]),
(VersionOrder("1!3.1.1.6"), [[1], [3], [1], [1], [6]]),
(VersionOrder("2!0.4.1"), [[2], [0], [4], [1]]),
]
# check parser
for v, l in versions:
self.assertEqual(v.version, l)
self.assertEqual(VersionOrder("0.4.1.rc"), VersionOrder(" 0.4.1.RC "))
self.assertEqual(normalized_version(" 0.4.1.RC "), VersionOrder("0.4.1.rc"))
with self.assertRaises(ValueError):
VersionOrder("")
with self.assertRaises(ValueError):
VersionOrder(" ")
with self.assertRaises(ValueError):
VersionOrder("5.5++")
with self.assertRaises(ValueError):
VersionOrder("5.5..mw")
with self.assertRaises(ValueError):
VersionOrder("5.5.mw.")
with self.assertRaises(ValueError):
VersionOrder("!")
with self.assertRaises(ValueError):
VersionOrder("a!1.0")
# check __eq__
self.assertEqual(VersionOrder(" 0.4.rc "), VersionOrder("0.4.RC"))
self.assertEqual(VersionOrder("0.4"), VersionOrder("0.4.0"))
self.assertNotEqual(VersionOrder("0.4"), VersionOrder("0.4.1"))
self.assertEqual(VersionOrder("0.4.a1"), VersionOrder("0.4.0a1"))
self.assertNotEqual(VersionOrder("0.4.a1"), VersionOrder("0.4.1a1"))
# check __lt__
self.assertEqual(sorted(versions, key=lambda x: x[0]), versions)
# test openssl convention
openssl = [VersionOrder(k) for k in ['1.0.1', '1.0.1post.a', '1.0.1post.b',
'1.0.1post.z', '1.0.1post.za', '1.0.2']]
self.assertEqual(sorted(openssl), openssl)
def test_pep440(self):
# this list must be in sorted order (slightly modified from the PEP 440 test suite
# https://github.com/pypa/packaging/blob/master/tests/test_version.py)
VERSIONS = [
# Implicit epoch of 0
"1.0a1", "1.0a2.dev456", "1.0a12.dev456", "1.0a12",
"1.0b1.dev456", "1.0b2", "1.0b2.post345.dev456", "1.0b2.post345",
"1.0c1.dev456", "1.0c1", "1.0c3", "1.0rc2", "1.0.dev456", "1.0",
"1.0.post456.dev34", "1.0.post456", "1.1.dev1",
"1.2.r32+123456", "1.2.rev33+123456",
"1.2+abc", "1.2+abc123def", "1.2+abc123",
"1.2+123abc", "1.2+123abc456", "1.2+1234.abc", "1.2+123456",
# Explicit epoch of 1
"1!1.0a1", "1!1.0a2.dev456", "1!1.0a12.dev456", "1!1.0a12",
"1!1.0b1.dev456", "1!1.0b2", "1!1.0b2.post345.dev456", "1!1.0b2.post345",
"1!1.0c1.dev456", "1!1.0c1", "1!1.0c3", "1!1.0rc2", "1!1.0.dev456", "1!1.0",
"1!1.0.post456.dev34", "1!1.0.post456", "1!1.1.dev1",
"1!1.2.r32+123456", "1!1.2.rev33+123456",
"1!1.2+abc", "1!1.2+abc123def", "1!1.2+abc123",
"1!1.2+123abc", "1!1.2+123abc456", "1!1.2+1234.abc", "1!1.2+123456",
]
version = [VersionOrder(v) for v in VERSIONS]
self.assertEqual(version, sorted(version))
def test_hexrd(self):
VERSIONS = ['0.3.0.dev', '0.3.3']
vos = [VersionOrder(v) for v in VERSIONS]
self.assertEqual(sorted(vos), vos)
def test_ver_eval(self):
self.assertEqual(ver_eval('1.7.0', '==1.7'), True)
self.assertEqual(ver_eval('1.7.0', '<=1.7'), True)
self.assertEqual(ver_eval('1.7.0', '<1.7'), False)
self.assertEqual(ver_eval('1.7.0', '>=1.7'), True)
self.assertEqual(ver_eval('1.7.0', '>1.7'), False)
self.assertEqual(ver_eval('1.6.7', '>=1.7'), False)
self.assertEqual(ver_eval('2013a', '>2013b'), False)
self.assertEqual(ver_eval('2013k', '>2013b'), True)
self.assertEqual(ver_eval('3.0.0', '>2013b'), False)
self.assertEqual(ver_eval('1.0.0', '>1.0.0a'), True)
self.assertEqual(ver_eval('1.0.0', '>1.0.0*'), True)
def test_ver_eval_errors(self):
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '><2.4.5')
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '!!2.4.5')
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '!')
def test_match(self):
for vspec, res in [
('1.7*', True), ('1.7.1', True), ('1.7.0', False),
('1.7', False), ('1.5*', False), ('>=1.5', True),
('!=1.5', True), ('!=1.7.1', False), ('==1.7.1', True),
('==1.7', False), ('==1.7.2', False), ('==1.7.1.0', True),
]:
m = VersionSpec(vspec)
self.assertEqual(m.match('1.7.1'), res)
def test_local_identifier(self):
"""The separator for the local identifier should be either `.` or `+`"""
# a valid versionstr should match itself
versions = (
'1.7.0'
'1.7.0.post123'
'1.7.0.post123.gabcdef9',
'1.7.0.post123+gabcdef9',
)
for version in versions:
m = VersionSpec(version)
self.assertTrue(m.match(version))
|
import logging
def say(n):
logging.basicConfig(level=logging.DEBUG)
for i in range(n):
logging.info(str(i) + ": Hello world")
say(1)
if __name__=="__main__":
say(3)
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Bundle class with support for npm dependencies."""
from __future__ import absolute_import, print_function
from collections import defaultdict
import semver
from flask_assets import Bundle as BundleBase
from pkg_resources import parse_version
from speaklater import is_lazy_string
__all__ = ('LazyNpmBundle', 'NpmBundle', 'extract_deps', 'make_semver', )
class NpmBundle(BundleBase):
"""Bundle extension with a name and npm dependencies.
The npm dependencies are used to generate a package.json file.
"""
def __init__(self, *contents, **options):
"""Initialize the named bundle.
:param name: name of the bundle
:type name: str
:param npm: npm dependencies
:type npm: dict
"""
self.npm = options.pop('npm', {})
super(NpmBundle, self).__init__(*contents, **options)
class LazyNpmBundle(NpmBundle):
"""Magically evaluate lazy strings as file names."""
def _get_contents(self):
"""Create strings from lazy strings."""
return [
str(value) if is_lazy_string(value) else value
for value in super(LazyNpmBundle, self)._get_contents()
]
contents = property(_get_contents, NpmBundle._set_contents)
def extract_deps(bundles, log=None):
"""Extract the dependencies from the bundle and its sub-bundles."""
def _flatten(bundle):
deps = []
if hasattr(bundle, 'npm'):
deps.append(bundle.npm)
for content in bundle.contents:
if isinstance(content, BundleBase):
deps.extend(_flatten(content))
return deps
flatten_deps = []
for bundle in bundles:
flatten_deps.extend(_flatten(bundle))
packages = defaultdict(list)
for dep in flatten_deps:
for pkg, version in dep.items():
packages[pkg].append(version)
deps = {}
for package, versions in packages.items():
deps[package] = semver.max_satisfying(versions, '*', True)
if log and len(versions) > 1:
log('Warn: {0} version {1} resolved to: {2}'.format(
repr(package), versions, repr(deps[package])
))
return deps
def make_semver(version_str):
"""Make a semantic version from Python PEP440 version.
Semantic versions does not handle post-releases.
"""
v = parse_version(version_str)
major = v._version.release[0]
try:
minor = v._version.release[1]
except IndexError:
minor = 0
try:
patch = v._version.release[2]
except IndexError:
patch = 0
prerelease = []
if v._version.pre:
prerelease.append(''.join(str(x) for x in v._version.pre))
if v._version.dev:
prerelease.append(''.join(str(x) for x in v._version.dev))
prerelease = '.'.join(prerelease)
# Create semver
version = '{0}.{1}.{2}'.format(major, minor, patch)
if prerelease:
version += '-{0}'.format(prerelease)
if v.local:
version += '+{0}'.format(v.local)
return version
|
from typing import Tuple
import torch
from torch import Tensor
def homogeneous(A: Tensor, b: Tensor) -> Tensor:
"""
Converts heterogeneous matrix into homogeneous matrix.
:param A: Heterogeneous matrix of shape [*, N, N].
:param b: Heterogeneous vector of shape [*, N, 1].
:return: Homogeneous matrix of shape [*, N + 1, N + 1].
"""
assert A.shape[:-2] == b.shape[:-2]
assert A.shape[-2] == A.shape[-1] == b.shape[-2]
assert b.shape[-1] == 1
s, n = A.shape[:-2], A.shape[-2]
c = torch.zeros(s + (1, n), dtype=A.dtype, device=A.device)
d = torch.ones(s + (1, 1), dtype=A.dtype, device=A.device)
M = torch.cat(
[
torch.cat([A, b], dim=-1),
torch.cat([c, d], dim=-1),
],
dim=-2,
)
return M
def heterogeneous(M: Tensor) -> Tuple[Tensor, Tensor]:
"""
Converts homogeneous matrix into heterogeneous matrix.
:param M: Homogeneous matrix of shape [*, N + 1, N + 1].
:return: Heterogeneous matrix and vector of shapes [*, N, N] and [*, N, 1] respectively.
"""
assert M.shape[-2] == M.shape[-1]
n = M.shape[-2] - 1
Ab, cd = M.split([n, 1], dim=-2)
A, b = Ab.split([n, 1], dim=-1)
c, d = cd.split([n, 1], dim=-1)
A, b = A / d, b / d
return A, b
def affine(x: Tensor, A: Tensor, b: Tensor) -> Tensor:
"""
Applies an affine transformation to x given A and b.
:param x: Vector of shape [*, N, 1].
:param A: Matrix of shape [*, N, N].
:param b: Vector of shape [*, N, 1].
:return: Vector of shape [*, N, 1].
"""
assert x.ndim == A.ndim == b.ndim
assert x.shape[-2] == A.shape[-2] == A.shape[-1] == b.shape[-2]
assert x.shape[-1] == b.shape[-1] == 1
y = A @ x + b
return y
def eye_like(x: Tensor) -> Tensor:
"""
Return an identity matrix of the same shape as x.
:param x: Matrix of shape [*, M, N].
:return: Identity matrix of shape [*, M, N].
"""
m, n = x.shape[-2], x.shape[-1]
return torch.eye(m, n, dtype=x.dtype, device=x.device).expand_as(x)
def diag(x: Tensor):
"""
Returns a diagonal matrix given a vector.
:param x: Vector of shape [*, M, 1].
:return: Diagonal matrix of shape [*, M, M].
"""
assert x.shape[-1] == 1
m = x.shape[-2]
return torch.eye(m, dtype=x.dtype, device=x.device) * x
|
#!/usr/bin/env python
# Run the various build scripts
import sys
import os
from parse import parse_machines
from machines import machines
from assemblies import assemblies
from vitamins import vitamins
from printed import printed
from guides import guides
from publish import publish
def build(do_publish=0):
print("Build")
print("-----")
outfile = 'hardware.json'
oldfile = 'backup.json'
print("Backup current json...")
oldjso = None
if os.path.isfile(outfile) and not os.path.isfile(oldfile):
os.rename(outfile, oldfile)
errorlevel = 0
errorlevel += parse_machines()
if errorlevel == 0:
errorlevel += vitamins()
if errorlevel == 0:
errorlevel += printed()
if errorlevel == 0:
errorlevel += assemblies()
if errorlevel == 0:
errorlevel += machines()
if errorlevel == 0:
errorlevel += guides()
if errorlevel == 0 and do_publish > 0:
publish()
# if everything is ok then delete backup - no longer required
if errorlevel == 0:
os.remove(oldfile)
return errorlevel
if __name__ == '__main__':
if len(sys.argv) == 2:
sys.exit(build(sys.argv[1]))
else:
sys.exit(build(0))
|
"""CouchDB Models"""
from kai.model.blog import Article
from kai.model.documentation import Documentation
from kai.model.generics import Comment, Rating
from kai.model.human import Human
from kai.model.paste import Paste
from kai.model.snippet import Snippet
from kai.model.traceback import Traceback
|
from flask import render_template
from flask import request
from flask import send_file
from flask import make_response
import cv2
import urllib
import numpy as np
# Add the pytorch folder to our script path
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/Users/danielblackburn/space-apps-2019/beautiful_earth/pytorch-CycleGAN-and-pix2pix')
import inference
from inference import infer
from app import app
@app.route('/')
@app.route('/index')
def index():
user = {'username': 'Miguel'}
return render_template('index.html', title='Beautiful Earth')
@app.route('/image', methods = ['POST'])
def image():
json = request.json
imageUrl = json['imageUrl']
quadKey = json['quadKey']
temp = inference.infer(imageUrl)
filename = "/Users/danielblackburn/space-apps-2019/beautiful_earth/app/static/"+quadKey+".png"
cv2.imwrite(filename, temp)
response = make_response(send_file(filename, mimetype='image/jpeg', as_attachment=True, attachment_filename=quadKey))
response.headers['X-quadKey'] = quadKey
return response
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for user data."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import hashlib
import imghdr
import logging
import re
from constants import constants
from core.domain import role_services
from core.domain import user_domain
from core.platform import models
import feconf
import python_utils
import utils
from google.appengine.api import urlfetch
current_user_services = models.Registry.import_current_user_services()
(user_models, audit_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.audit])
# Size (in px) of the gravatar being retrieved.
GRAVATAR_SIZE_PX = 150
# Data url for images/avatar/user_blue_72px.png.
# Generated using utils.convert_png_to_data_url.
DEFAULT_IDENTICON_DATA_URL = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEwAAABMCAYAAADHl1ErAAAAAXNSR0IArs4c6QAADhtJREFUeAHtXHlwVdUZ/859jyxmIQESyCaglC0iAgkJIntrIpvKphSwY2ttxbFOp9R/cGGqdhykLaMVO2OtoyRSCEKNEpYKyBIVQ1iNkBhNMCtb8shiQpJ3b7/fTW7m5uUlecu9L4nTM5Pce8895zvf93vnnPud833fEdQLKXb5jsC6%2BuZERZbHKaSMYRbGKERxgpQQUkSIIigEbAmFavlfrUKiVhCVcFa%2BIJEvJOlCcNCAnNKMFQ0o58vEfPgmhS5Mn0ot8n2KIs8lIZJJUfy8almIJqbxhRDSIbJKe2s%2BXvWlV/RcrGwqYGGp20bI1LyaeVmjKMrodp4EycGBAy6MjgsrSxozqG7O5GgxcVREeEigNDAwwBpmsUiRKGu3y1caGltstQ3yjbOFV6sPnypXTuRXBReU2GLqGprHkUKSRlMIUcD3WyUakGbbt7JYyzf6agpgYfe9O8kui/U8nB7UhJIkUTljwrBTTz449mZKUlyCEBTnjTCKQiX7T5ScfGP3Rf9j5ysny7IyTKXHPwYP690WSXnZtvcXp71pw1ldQwELm59%2BlyzbX%2BbeNL%2Btscb4EYOyNz2ZWD99wtAFnGdxxoQBefbs85f3rHsjJyivuGo60wsATe51WZJkWW/LWnXGgDZUEoYAFr58x0B7beOLPHGv5XnFIpGoS0mKOfze%2Bpmj/f2smNR9lm42teQ/8vLRgv0nyuZwVwtm1Ows5BZLSMBz1RkrbnjLiNeAhaWmPWgn%2BxYeejwkRMu9idH7tm%2BYE8/z0EhvmfOmPs9/RQ9tOJx3IKc8lUixkqBKC1nW2vat3u0NXY8Bi1%2B%2Bw6%2BktnETD7%2BnwEB4iP/pL/5xf03U4IBZ3jBkdN2K641Hkn/7YWh17c1JoM3D9PW4kIB1eRkrmjxpyyPAeK4aLttbPuAhOIU5aHpm1cTMZ1ffuRT8eMKED%2BooL6Wd%2B2Bj%2BtnFUGeYyVzJYl3Kc9sld9t2W8Dw%2BWkTWuz2fdxQ9ACr9P3Jfy7%2BZuSw0HnuNtwb5Ysqaw4mPJb5k%2BYW%2BVZuv9xqsaRWZ60%2B7w4vbgEWnrJ1hp3kTO5ZYUPCAnK%2B3bYiitWDWHca7O2yrI6U3r5yR8U1W2MiC2%2BzkLS4ev%2BaY67y1a749VQBYLUIZT/AGhUTduS7f68Y39/AgozgGbxDBsgCmSBbT/Jr710CDMMQPYvHf2DC2Mj9p95efA8TCNKI9MNrEGSALJAJskFGV%2BTocUhigrfbWz5jYtH4VdrAMksBdYVnI8vYJ/8q83hhmW0WEy23WKx39/Qh6LaHQXXA1xBgYc5isBL4/scCFoC3QCbIBhkhK2TGi65St4CpeharDvgaYoJnIv15GHaFQRBkg4w8p02BzF0VRH6XgEGDV5VS1rOgOvTHCb47wfXvIBtkhE4JmSG7/r3%2B3ilg6toQyx1OUEr7i56lF8zde8gIWVEPSz1g4IyGU8CwkMbaEMudNg3eWd0fXR5khcyQXcXAiYSdAMMWDY/ltVhIY23IdXr8kjqh21%2BzRKvMogUYAAtHQToBhv0sbNFg16GvLaQdmTfjGTJDdmCgYuHQSIfe07pTSqewn3V9z6qrvb1F48Crzx6xNTR4QXoE9tN4c2%2ByfufWqudC3VbmAYzNPwZrkf6dL%2B4LSm5Q9vkrVH79B6qs%2BoH8B1goatAtNCIqmOZOiabw4G5VJMNYREdhDD7ae6J0USsmtEwj3t7DYLCwK83f8WbbzauZP7/kq53SxiY7vfmfC5R24Fv6prTrDVEWgqbfEUlPLY2nlKkxGv%2BmXbFzG7H4/eE8g/tZyO92zbDSPoe1WncUgT14X4G189NimvjobnrhX6e6BQuo8DCho2crafnzB2n%2BMwe4PL5H5iVgACx4wEltli%2B1sXbA%2BGkNcmCwUN%2BY%2BI%2B3WOjZt3Lpl68cpQoefu6m4%2Bcqae7TWfTfk%2BXuVnWrvA4LFRtUVockjKxKc8sJmMJsWWsiON/U9eJvNmXTtk%2B%2BdYt5Z4WZX0p/bjYtmBbn7LURefaw%2BVuvwoQnBliTYCxu7WFskQb1WROjcvliKlibM/IMAQv8siD0643H6etiGx7NSBbYUlXCbRipgKnme859Ysl4jwwDrnKaV2SjDe%2B0tu9qnZ7KsQWch/YxVpt6KunZexieUVPDSIJjCC86k3lwyikJ0di%2BMS09/3au2iuMbuDr4mpKN2CIO%2BMLVnpgA4yAlVRX1ziV4fODrwOv2k2bDM4UVvEkXeaMJ0PyXn3/nCF0HIkAE2ADjICVpChiLArBMcSxsJHPmdmXjCTXiVZRRS19VVTdKd%2BIDA0bYCW1%2BWcRvGiMIN4Vjb1flHb1yrD8rM9LDKOlJ6RhA6ww6au%2BD3A50hcy%2Bt5sRRP8FpSYo8zqsBnDPax13oJ/ltEgafSqam5SU7NdezTtWsHrTzOShg2wYtWP3SQ5wZnNjMZA80Z9s1mkO9CtMakdDRtgJcGnFK3C869D6wY%2BRISp7loGUnROKtKkdtqxYawkzQGXdwNUN0nnrHiXGxxoJf40e0fEhdpRg29xoZT7RTRsgJV%2B8e0%2BJTdqJIwd4kZpz4pOGWN%2BG5Lq2s38wQHXMzZdq2XiAlllgP2%2BaH6yOX4xGjbAinejlVq0CG9l10T3rNT99wwnf96KMyvNuHMoDR0UaAr5dmwYK1YrhAoYXLtNaa2N6DAW5vFF6qLClGZeeHSyKXRBVMMGWLFaoUZYEPzgTWuxjfC6lROI/RgMb2bZ7JGUaOIcqWEDrDDp50MCBA0YLokDQRgx0p%2BdTezH4PDG88dxI8LotaeneU7AhZo6bPK5hwkVMERYuFDX6yLT2JDx99/fTVY2anibYiOCaPuGuayydDB%2BeUu2U30NG2AlCaFcRAmEo3QqaVLGynm30a6X5sHz2uMWksZH0pHXF9CIYeb/zho2CAqTgoMDvoTXCmJ3EI7isQRuVpw9KYqytyykhxk8qASuJoD84mNTKGvjveSLFQQwUeOaGCNE0Flqvs5o8b/9gZ8xwyMmj404NComZJyrzHtbLjTIjxZNv1X9C/S30pXqRrLVdd4lh7EjOX4oPfHAOHrzD9Np9l1RZMHnygeJ45kOZXxaPJ6byr6WueotdfAjhI73rGdu2ZXnn5oY7QM2OjZxx8hw%2BvPjCepf2bUfqJz/Llc1qHpb1OBAiosMpoFB5i%2BtOnLV%2BoTgL9ypYYZ8bZ0tOd6QmuUNbCiFMoN9GPM0TCbeXYoZcgvhr48kOyLlVF6AESf1UwV7G88jBbC/ISqsjzDb62wAC9UmydhoAaz6b/tWcIgQul7ntI8woMNCxQZstQOGSFYeqQriDeGI0Ud47jU2gIEae8kmtlZsWllpB6zNO2UXZwcg3rDXOO0jDbdhEIDoXs1zB6y1A4YHhP3iiuBMOJXh3tfJzuZ/qBbfX65nR5UGqmto8TUL2OoqAgZoWMNEY6KTMhOa%2Bt4ehCDfmxjz8c4X5y3UChp5hVk/j63Vpwuu0zdlNVTIrkuFfC1hkOobO%2B//Qw8LD/an26JDaFRsKI2KCWU76kCaOi6CoHYYnZY9d/DjAzllC/lDmFWz75EFevqdFmGIkbbL9hREsiI40yg/11wGhxex9PlXV%2BjEhatUU99ZQdUzpr%2BH08n1mkb1L%2BfiVf0rGs5Lo2nxkXT3HUPZ0S7WawAhsxrFy6HPwKJDY/zQqYehAPey1%2BDgDxfsSxkPwZPYaTmU7S7BPWDXkWLafayYLlWaaidW2cASK5nBWzJzOD3AG5YebCgqw5dvP4PoXab1Oveu3znK5xQIOPW31DZchL/6M6vv2sn%2B68scK3b1jDlo%2B6Hv6G878ij/e1M3cbtiQc3HML4vKZbWrbyTpowe3G1Z7SVH7e7cmHZmGXePSmtI4FhnQfVOAQMBNfhdse/CwvzsO/cf6ykapKlZpq0HCmlzxlc%2B6U2akK5c2XJNf3x4At3D29hdJUTrTnz0wxlwOrEIy5Kugum7BAyEtaGJwKVrH63mrSDn0besEdNTmz9XJ%2B6uGOoL%2BbAr/OXJJIoM77jryx%2Bh0iGL0mSENnc1FDX%2BO6gVWqZ2RfQ9I5oLQgj75fxO/q%2BvpJ9TnXTxlevr6cPjlyj5iUx2bb%2BsZ7UesqlgsayQWf/S8b7bHobC3QWYrv3rZ%2BwuXuhIs88/Y4v8vfWz4BvrdoBpj4BBejWE2W4/yupTGMJ%2BD21O/emf3j1t2bTNrYD8PgWkv7/FflvUwE8uFFelMAg2i8Uy05UTBlwCTAWtLUieJ8XA2MiQIxXX6xNYI%2B6XC3Wep%2Br5xz/Jsszij1qDVREprp4s4DJgGmjaMQzcUA5bgaNkRTbH3GxSf5SEVMoxRBUMlrnHMIB//ArounxbjgZZuWWtSzlokmyGkwWv4Bm8QwZ1GLpxZgUYcquHaRLgQ6A/SobJ4IiGpeyc7RE9ja55V/aKEOID5s/3R8loQjkeVsTzwmmeF2oYuFlamT5xFeII/4qh3LMmgR/oWT4/rEgPhONxWEKifUJW4mWikfpyvr5nBbNIkUQeD8BU7lm9fxyWHgDHA9fYQlzHg/0w/6qjuZzqdKwvb/J9PveiAl4Hz%2BE5q%2B8duKYXHjHSjkf6sXkqWyEZK4QFLIQ51iihWrr2CJKCeE6fzm2pax8Grm8e6acHDffth0YSLdF9CCoZvFye55okRU7gIetV1AkPuRJZSCfZUdefezJMYf3v0MhOwHVzLKlQxAWSRJlQlDr%2BzrPcUjjbGwbyBB2mCKH62/K7KwywjWM8b5CQq%2BH9x%2B%2BCSVZiFKH8eI4ldQQOz4jJ/P/Bt86QcSFPPVqZA50Qu4NwFK7i3tHK7HEEJ5reOFr5fwkK97jkk8ywAAAAAElFTkSuQmCC') # pylint: disable=line-too-long
class UserSettings(python_utils.OBJECT):
"""Value object representing a user's settings.
Attributes:
user_id: str. The unique ID of the user.
gae_id: str. The ID of the user retrieved from GAE.
email: str. The user email.
role: str. Role of the user. This is used in conjunction with
PARENT_ROLES to determine which actions the user can perform.
username: str or None. Identifiable username to display in the UI.
last_agreed_to_terms: datetime.datetime or None. When the user last
agreed to the terms of the site.
last_started_state_editor_tutorial: datetime.datetime or None. When
the user last started the state editor tutorial.
last_started_state_translation_tutorial: datetime.datetime or None. When
the user last started the state translation tutorial.
last_logged_in: datetime.datetime or None. When the user last logged in.
last_created_an_exploration: datetime.datetime or None. When the user
last created an exploration.
last_edited_an_exploration: datetime.datetime or None. When the user
last edited an exploration.
profile_picture_data_url: str or None. User uploaded profile picture as
a dataURI string.
default_dashboard: str or None. The default dashboard of the user.
user_bio: str. User-specified biography.
subject_interests: list(str) or None. Subject interests specified by
the user.
first_contribution_msec: float or None. The time in milliseconds when
the user first contributed to Oppia.
preferred_language_codes: list(str) or None. Exploration language
preferences specified by the user.
preferred_site_language_code: str or None. System language preference.
preferred_audio_language_code: str or None. Audio language preference.
"""
def __init__(
self, user_id, gae_id, email, role, username=None,
last_agreed_to_terms=None, last_started_state_editor_tutorial=None,
last_started_state_translation_tutorial=None, last_logged_in=None,
last_created_an_exploration=None, last_edited_an_exploration=None,
profile_picture_data_url=None, default_dashboard=None,
creator_dashboard_display_pref=(
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD']),
user_bio='', subject_interests=None, first_contribution_msec=None,
preferred_language_codes=None, preferred_site_language_code=None,
preferred_audio_language_code=None, deleted=False):
"""Constructs a UserSettings domain object.
Args:
user_id: str. The unique ID of the user.
gae_id: str. The ID of the user retrieved from GAE.
email: str. The user email.
role: str. Role of the user. This is used in conjunction with
PARENT_ROLES to determine which actions the user can perform.
username: str or None. Identifiable username to display in the UI.
last_agreed_to_terms: datetime.datetime or None. When the user
last agreed to the terms of the site.
last_started_state_editor_tutorial: datetime.datetime or None. When
the user last started the state editor tutorial.
last_started_state_translation_tutorial: datetime.datetime or None.
When the user last started the state translation tutorial.
last_logged_in: datetime.datetime or None. When the user last
logged in.
last_created_an_exploration: datetime.datetime or None. When the
user last created an exploration.
last_edited_an_exploration: datetime.datetime or None. When the
user last edited an exploration.
profile_picture_data_url: str or None. User uploaded profile
picture as a dataURI string.
default_dashboard: str|None. The default dashboard of the user.
creator_dashboard_display_pref: str. The creator dashboard of the
user.
user_bio: str. User-specified biography.
subject_interests: list(str) or None. Subject interests specified by
the user.
first_contribution_msec: float or None. The time in milliseconds
when the user first contributed to Oppia.
preferred_language_codes: list(str) or None. Exploration language
preferences specified by the user.
preferred_site_language_code: str or None. System language
preference.
preferred_audio_language_code: str or None. Default language used
for audio translations preference.
deleted: bool. Whether the user has requested removal of their
account.
"""
self.user_id = user_id
self.gae_id = gae_id
self.email = email
self.role = role
self.username = username
self.last_agreed_to_terms = last_agreed_to_terms
self.last_started_state_editor_tutorial = (
last_started_state_editor_tutorial)
self.last_started_state_translation_tutorial = (
last_started_state_translation_tutorial)
self.last_logged_in = last_logged_in
self.last_edited_an_exploration = last_edited_an_exploration
self.last_created_an_exploration = last_created_an_exploration
self.profile_picture_data_url = profile_picture_data_url
self.default_dashboard = default_dashboard
self.creator_dashboard_display_pref = creator_dashboard_display_pref
self.user_bio = user_bio
self.subject_interests = (
subject_interests if subject_interests else [])
self.first_contribution_msec = first_contribution_msec
self.preferred_language_codes = (
preferred_language_codes if preferred_language_codes else [])
self.preferred_site_language_code = preferred_site_language_code
self.preferred_audio_language_code = preferred_audio_language_code
self.deleted = deleted
def validate(self):
"""Checks that user_id and email fields of this UserSettings domain
object are valid.
Raises:
ValidationError: user_id is not str.
ValidationError: gae_id is not str.
ValidationError: email is not str.
ValidationError: email is invalid.
ValidationError: role is not str.
ValidationError: Given role does not exist.
"""
if not isinstance(self.user_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if (self.gae_id is not None and
not isinstance(self.gae_id, python_utils.BASESTRING)):
raise utils.ValidationError(
'Expected gae_id to be a string, received %s' %
self.gae_id
)
if not isinstance(self.email, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected email to be a string, received %s' % self.email)
if not self.email:
raise utils.ValidationError('No user email specified.')
if ('@' not in self.email or self.email.startswith('@')
or self.email.endswith('@')):
raise utils.ValidationError(
'Invalid email address: %s' % self.email)
if not isinstance(self.role, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected role to be a string, received %s' % self.role)
if self.role not in role_services.PARENT_ROLES:
raise utils.ValidationError('Role %s does not exist.' % self.role)
if not isinstance(
self.creator_dashboard_display_pref, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected dashboard display preference to be a string, '
'received %s' % self.creator_dashboard_display_pref)
if (self.creator_dashboard_display_pref not in
list(constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS.values(
))):
raise utils.ValidationError(
'%s is not a valid value for the dashboard display '
'preferences.' % (self.creator_dashboard_display_pref))
@property
def truncated_email(self):
"""Returns truncated email by replacing last two characters before @
with period.
Returns:
str. The truncated email address of this UserSettings
domain object.
"""
first_part = self.email[: self.email.find('@')]
last_part = self.email[self.email.find('@'):]
if len(first_part) <= 1:
first_part = '..'
elif len(first_part) <= 3:
first_part = '%s..' % first_part[0]
else:
first_part = first_part[:-3] + '..'
return '%s%s' % (first_part, last_part)
@property
def normalized_username(self):
"""Returns username in lowercase or None if it does not exist.
Returns:
str or None. If this object has a 'username' property, returns
the normalized version of the username. Otherwise, returns None.
"""
return self.normalize_username(self.username)
@classmethod
def normalize_username(cls, username):
"""Returns the normalized version of the given username,
or None if the passed-in 'username' is None.
Args:
username: str. Identifiable username to display in the UI.
Returns:
str or None. The normalized version of the given username,
or None if the passed-in username is None.
"""
return username.lower() if username else None
@classmethod
def require_valid_username(cls, username):
"""Checks if the given username is valid or not.
Args:
username: str. The username to validate.
Raises:
ValidationError: An empty username is supplied.
ValidationError: The given username exceeds the maximum allowed
number of characters.
ValidationError: The given username contains non-alphanumeric
characters.
ValidationError: The given username contains reserved substrings.
"""
if not username:
raise utils.ValidationError('Empty username supplied.')
elif len(username) > constants.MAX_USERNAME_LENGTH:
raise utils.ValidationError(
'A username can have at most %s characters.'
% constants.MAX_USERNAME_LENGTH)
elif not re.match(feconf.ALPHANUMERIC_REGEX, username):
raise utils.ValidationError(
'Usernames can only have alphanumeric characters.')
else:
# Disallow usernames that contain the system usernames or the
# strings "admin" or "oppia".
reserved_usernames = set(feconf.SYSTEM_USERS.values()) | set([
'admin', 'oppia'])
for reserved_username in reserved_usernames:
if reserved_username in username.lower().strip():
raise utils.ValidationError(
'This username is not available.')
def is_user_id_correct(user_id):
"""Verify that the user ID is in a correct format.
Args:
user_id: str. The user ID to be checked.
Returns:
bool. True when the ID is in a correct format, False otherwise.
"""
return all((
user_id.islower(),
user_id.startswith('uid_'),
len(user_id) == user_models.USER_ID_LENGTH))
def is_username_taken(username):
""""Returns whether the given username has already been taken.
Args:
username: str. Identifiable username to display in the UI.
Returns:
bool. Whether the given username is taken.
"""
return user_models.UserSettingsModel.is_normalized_username_taken(
UserSettings.normalize_username(username))
def get_email_from_user_id(user_id):
"""Gets the email from a given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
str. user_email corresponding to the given user_id.
Raises:
Exception: The user is not found.
"""
user_settings = get_user_settings(user_id)
return user_settings.email
def get_email_from_username(username):
"""Gets the email for a given username.
Args:
username: str. Identifiable username to display in the UI.
Returns:
str or None. If the user with given username does not exist,
return None. Otherwise return the corresponding user_email.
"""
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return user_model.email
def get_user_id_from_username(username):
"""Gets the user_id for a given username.
Args:
username: str. Identifiable username to display in the UI.
Returns:
str or None. If the user with given username does not exist, return
None. Otherwise return the user_id corresponding to given username.
"""
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return user_model.id
def get_user_settings_from_username(username):
"""Gets the user settings for a given username.
Args:
username: str. Identifiable username to display in the UI.
Returns:
UserSettingsModel or None. The UserSettingsModel instance corresponding
to the given username, or None if no such model was found.
"""
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return get_user_settings(user_model.id)
def get_users_settings(user_ids):
"""Gets domain objects representing the settings for the given user_ids.
Args:
user_ids: list(str). The list of user_ids to get UserSettings
domain objects for.
Returns:
list(UserSettings|None). The UserSettings domain objects corresponding
to the given user ids. If the given user_id does not exist, the
corresponding entry in the returned list is None.
"""
user_settings_models = user_models.UserSettingsModel.get_multi(user_ids)
result = []
for i, model in enumerate(user_settings_models):
if user_ids[i] == feconf.SYSTEM_COMMITTER_ID:
result.append(UserSettings(
user_id=feconf.SYSTEM_COMMITTER_ID,
gae_id=feconf.SYSTEM_COMMITTER_ID,
email=feconf.SYSTEM_EMAIL_ADDRESS,
role=feconf.ROLE_ID_ADMIN,
username='admin',
last_agreed_to_terms=datetime.datetime.utcnow()
))
else:
result.append(_transform_user_settings(model))
return result
def generate_initial_profile_picture(user_id):
"""Generates a profile picture for a new user and
updates the user's settings in the datastore.
Args:
user_id: str. The unique ID of the user.
"""
user_email = get_email_from_user_id(user_id)
user_gravatar = fetch_gravatar(user_email)
update_profile_picture_data_url(user_id, user_gravatar)
def get_gravatar_url(email):
"""Returns the gravatar url for the specified email.
Args:
email: str. The user email.
Returns:
str. The gravatar url for the specified email.
"""
return (
'https://www.gravatar.com/avatar/%s?d=identicon&s=%s' %
(hashlib.md5(email).hexdigest(), GRAVATAR_SIZE_PX))
def fetch_gravatar(email):
"""Returns the gravatar corresponding to the user's email, or an
identicon generated from the email if the gravatar doesn't exist.
Args:
email: str. The user email.
Returns:
str. The gravatar url corresponding to the given user email. If the call
to the gravatar service fails, this returns DEFAULT_IDENTICON_DATA_URL
and logs an error.
"""
gravatar_url = get_gravatar_url(email)
try:
result = urlfetch.fetch(
gravatar_url,
headers={'Content-Type': 'image/png'},
follow_redirects=False)
except (urlfetch.InvalidURLError, urlfetch.DownloadError):
logging.error('Failed to fetch Gravatar from %s' % gravatar_url)
else:
if result.status_code == 200:
if imghdr.what(None, h=result.content) == 'png':
return utils.convert_png_binary_to_data_url(result.content)
else:
logging.error(
'[Status %s] Failed to fetch Gravatar from %s' %
(result.status_code, gravatar_url))
return DEFAULT_IDENTICON_DATA_URL
def get_user_settings(user_id, strict=False):
"""Return the user settings for a single user.
Args:
user_id: str. The unique ID of the user.
strict: bool. Whether to fail noisily if no user with the given
id exists in the datastore. Defaults to False.
Returns:
UserSettings or None. If the given user_id does not exist and strict
is False, returns None. Otherwise, returns the corresponding
UserSettings domain object.
Raises:
Exception: strict is True and given user_id does not exist.
"""
user_settings = get_users_settings([user_id])[0]
if strict and user_settings is None:
logging.error('Could not find user with id %s' % user_id)
raise Exception('User not found.')
return user_settings
def get_user_settings_by_gae_id(gae_id, strict=False):
"""Return the user settings for a single user.
Args:
gae_id: str. The GAE user ID of the user.
strict: bool. Whether to fail noisily if no user with the given
id exists in the datastore. Defaults to False.
Returns:
UserSettings or None. If the given gae_id does not exist and strict
is False, returns None. Otherwise, returns the corresponding
UserSettings domain object.
Raises:
Exception: strict is True and given gae_id does not exist.
"""
user_settings = _transform_user_settings(
user_models.UserSettingsModel.get_by_gae_id(gae_id))
if strict and user_settings is None:
logging.error('Could not find user with id %s' % gae_id)
raise Exception('User not found.')
return user_settings
def get_user_role_from_id(user_id):
"""Returns role of the user with given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
str. Role of the user with given id.
"""
user_settings = get_user_settings(user_id, strict=False)
if user_settings is None:
return feconf.ROLE_ID_GUEST
return user_settings.role
def get_user_community_rights(user_id):
"""Returns the UserCommunityRights domain object for the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
UserCommunityRights. The UserCommunityRights domain object for the
corresponding user.
"""
user_model = (
user_models.UserCommunityRightsModel.get_by_id(user_id))
if user_model is not None:
return user_domain.UserCommunityRights(
user_id,
user_model.can_review_translation_for_language_codes,
user_model.can_review_voiceover_for_language_codes,
user_model.can_review_questions)
else:
return user_domain.UserCommunityRights(user_id, [], [], False)
def get_all_community_reviewers():
"""Returns a list of UserCommunityRights objects corresponding to each
UserCommunityRightsModel.
Returns:
list(UserCommunityRights). A list of UserCommunityRights objects.
"""
reviewer_models = user_models.UserCommunityRightsModel.get_all()
return [user_domain.UserCommunityRights(
model.id, model.can_review_translation_for_language_codes,
model.can_review_voiceover_for_language_codes,
model.can_review_questions) for model in reviewer_models]
def _save_user_community_rights(user_community_rights):
"""Saves the UserCommunityRights object into the datastore.
Args:
user_community_rights: UserCommunityRights. The UserCommunityRights
object of the user.
"""
# TODO(#8794): Add limitation on number of reviewers allowed in any
# category.
user_community_rights.validate()
user_models.UserCommunityRightsModel(
id=user_community_rights.id,
can_review_translation_for_language_codes=(
user_community_rights.can_review_translation_for_language_codes),
can_review_voiceover_for_language_codes=(
user_community_rights.can_review_voiceover_for_language_codes),
can_review_questions=user_community_rights.can_review_questions).put()
def _update_user_community_rights(user_community_rights):
"""Updates the users rights model if the updated object has review rights in
at least one item else delete the existing model.
Args:
user_community_rights: UserCommunityRights. The updated
UserCommunityRights object of the user.
"""
if user_community_rights.can_review_at_least_one_item():
_save_user_community_rights(user_community_rights)
else:
remove_community_reviewer(user_community_rights.id)
def get_usernames_by_role(role):
"""Get usernames of all the users with given role ID.
Args:
role: str. The role ID of users requested.
Returns:
list(str). List of usernames of users with given role ID.
"""
user_settings = user_models.UserSettingsModel.get_by_role(role)
return [user.username for user in user_settings]
def get_user_ids_by_role(role):
"""Get user ids of all the users with given role ID.
Args:
role: str. The role ID of users requested.
Returns:
list(str). List of user ids of users with given role ID.
"""
user_settings = user_models.UserSettingsModel.get_by_role(role)
return [user.id for user in user_settings]
class UserActionsInfo(python_utils.OBJECT):
"""A class representing information of user actions.
Attributes:
user_id: str. The unique ID of the user.
role: str. The role ID of the user.
actions: list(str). A list of actions accessible to the role.
"""
def __init__(self, user_id=None):
self._user_id = user_id
self._role = get_user_role_from_id(user_id)
self._actions = role_services.get_all_actions(self._role)
@property
def user_id(self):
"""Returns the unique ID of the user.
Returns:
user_id: str. The unique ID of the user.
"""
return self._user_id
@property
def role(self):
"""Returns the role ID of user.
Returns:
role: str. The role ID of the user.
"""
return self._role
@property
def actions(self):
"""Returns list of actions accessible to a user.
Returns:
actions: list(str). List of actions accessible to a user ID.
"""
return self._actions
def get_system_user():
"""Returns user object with system committer user id.
Returns:
system_user: user object with system committer user id.
"""
system_user = UserActionsInfo(feconf.SYSTEM_COMMITTER_ID)
return system_user
def _save_user_settings(user_settings):
"""Commits a user settings object to the datastore.
Args:
user_settings: UserSettings domain object.
"""
user_settings.validate()
user_settings_dict = {
'gae_id': user_settings.gae_id,
'email': user_settings.email,
'role': user_settings.role,
'username': user_settings.username,
'normalized_username': user_settings.normalized_username,
'last_agreed_to_terms': user_settings.last_agreed_to_terms,
'last_started_state_editor_tutorial': (
user_settings.last_started_state_editor_tutorial),
'last_started_state_translation_tutorial': (
user_settings.last_started_state_translation_tutorial),
'last_logged_in': user_settings.last_logged_in,
'last_edited_an_exploration': user_settings.last_edited_an_exploration,
'last_created_an_exploration': (
user_settings.last_created_an_exploration),
'profile_picture_data_url': user_settings.profile_picture_data_url,
'default_dashboard': user_settings.default_dashboard,
'creator_dashboard_display_pref': (
user_settings.creator_dashboard_display_pref),
'user_bio': user_settings.user_bio,
'subject_interests': user_settings.subject_interests,
'first_contribution_msec': user_settings.first_contribution_msec,
'preferred_language_codes': user_settings.preferred_language_codes,
'preferred_site_language_code': (
user_settings.preferred_site_language_code),
'preferred_audio_language_code': (
user_settings.preferred_audio_language_code),
'deleted': user_settings.deleted
}
# If user with the given user_id already exists, update that model
# with the given user settings, otherwise, create a new one.
user_model = user_models.UserSettingsModel.get_by_id(user_settings.user_id)
if user_model is not None:
user_model.populate(**user_settings_dict)
user_model.put()
else:
user_settings_dict['id'] = user_settings.user_id
user_models.UserSettingsModel(**user_settings_dict).put()
def _transform_user_settings(user_settings_model):
"""Transform user settings storage model to domain object.
Args:
user_settings_model: UserSettingsModel.
Returns:
UserSettings. Domain object for user settings.
"""
if user_settings_model:
return UserSettings(
user_id=user_settings_model.id,
gae_id=user_settings_model.gae_id,
email=user_settings_model.email,
role=user_settings_model.role,
username=user_settings_model.username,
last_agreed_to_terms=user_settings_model.last_agreed_to_terms,
last_started_state_editor_tutorial=(
user_settings_model.last_started_state_editor_tutorial),
last_started_state_translation_tutorial=(
user_settings_model.last_started_state_translation_tutorial),
last_logged_in=user_settings_model.last_logged_in,
last_edited_an_exploration=(
user_settings_model.last_edited_an_exploration),
last_created_an_exploration=(
user_settings_model.last_created_an_exploration),
profile_picture_data_url=(
user_settings_model.profile_picture_data_url),
default_dashboard=user_settings_model.default_dashboard,
creator_dashboard_display_pref=(
user_settings_model.creator_dashboard_display_pref),
user_bio=user_settings_model.user_bio,
subject_interests=user_settings_model.subject_interests,
first_contribution_msec=(
user_settings_model.first_contribution_msec),
preferred_language_codes=(
user_settings_model.preferred_language_codes),
preferred_site_language_code=(
user_settings_model.preferred_site_language_code),
preferred_audio_language_code=(
user_settings_model.preferred_audio_language_code),
deleted=user_settings_model.deleted
)
else:
return None
def is_user_registered(user_id):
"""Checks if a user is registered with the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether a user with the given user_id is registered.
"""
if user_id is None:
return False
user_settings = user_models.UserSettingsModel.get(user_id, strict=False)
return bool(user_settings)
def has_ever_registered(user_id):
"""Checks if a user has ever been registered with given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether a user with the given user_id has ever been registered.
"""
user_settings = get_user_settings(user_id, strict=True)
return bool(user_settings.username and user_settings.last_agreed_to_terms)
def has_fully_registered(user_id):
"""Checks if a user has fully registered.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether a user with the given user_id has fully registered.
"""
if user_id is None:
return False
user_settings = get_user_settings(user_id, strict=True)
return user_settings.username and user_settings.last_agreed_to_terms and (
user_settings.last_agreed_to_terms >=
feconf.REGISTRATION_PAGE_LAST_UPDATED_UTC)
def create_new_user(gae_id, email):
"""Creates a new user.
Args:
gae_id: str. The unique GAE user ID of the user.
email: str. The user email.
Returns:
UserSettings. The newly-created user settings domain object.
Raises:
Exception: If a user with the given gae_id already exists.
"""
user_settings = get_user_settings(gae_id, strict=False)
if user_settings is not None:
raise Exception('User %s already exists.' % gae_id)
user_id = user_models.UserSettingsModel.get_new_id('')
user_settings = UserSettings(
user_id, gae_id, email, feconf.ROLE_ID_EXPLORATION_EDITOR,
preferred_language_codes=[constants.DEFAULT_LANGUAGE_CODE])
_save_user_settings(user_settings)
create_user_contributions(user_id, [], [])
return user_settings
def get_username(user_id):
"""Gets username corresponding to the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
str. Username corresponding to the given user_id.
"""
if user_id in feconf.SYSTEM_USERS:
return feconf.SYSTEM_USERS[user_id]
return get_user_settings(user_id, strict=True).username
def get_usernames(user_ids):
"""Gets usernames corresponding to the given user_ids.
Args:
user_ids: list(str). The list of user_ids to get usernames for.
Returns:
list(str|None). Containing usernames based on given user_ids.
If a user_id does not exist, the corresponding entry in the
returned list is None.
"""
usernames = [None] * len(user_ids)
non_system_user_indices = []
non_system_user_ids = []
for index, user_id in enumerate(user_ids):
if user_id in feconf.SYSTEM_USERS:
usernames[index] = feconf.SYSTEM_USERS[user_id]
else:
non_system_user_indices.append(index)
non_system_user_ids.append(user_id)
non_system_users_settings = get_users_settings(non_system_user_ids)
for index, user_settings in enumerate(non_system_users_settings):
if user_settings:
usernames[non_system_user_indices[index]] = user_settings.username
return usernames
def set_username(user_id, new_username):
"""Updates the username of the user with the given user_id.
Args:
user_id: str. The unique ID of the user.
new_username: str. The new username to set.
Raises:
ValidationError: The new_username supplied is already taken.
"""
user_settings = get_user_settings(user_id, strict=True)
UserSettings.require_valid_username(new_username)
if is_username_taken(new_username):
raise utils.ValidationError(
'Sorry, the username \"%s\" is already taken! Please pick '
'a different one.' % new_username)
user_settings.username = new_username
_save_user_settings(user_settings)
def record_agreement_to_terms(user_id):
"""Records that the user with given user_id has agreed to the license terms.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_agreed_to_terms = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_profile_picture_data_url(user_id, profile_picture_data_url):
"""Updates profile_picture_data_url of user with given user_id.
Args:
user_id: str. The unique ID of the user.
profile_picture_data_url: str. New profile picture url to be set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.profile_picture_data_url = profile_picture_data_url
_save_user_settings(user_settings)
def update_user_bio(user_id, user_bio):
"""Updates user_bio of user with given user_id.
Args:
user_id: str. The unique ID of the user.
user_bio: str. New user biography to be set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.user_bio = user_bio
_save_user_settings(user_settings)
def update_user_default_dashboard(user_id, default_dashboard):
"""Updates the default dashboard of user with given user id.
Args:
user_id: str. The unique ID of the user.
default_dashboard: str. The dashboard the user wants.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.default_dashboard = default_dashboard
_save_user_settings(user_settings)
def update_user_creator_dashboard_display(
user_id, creator_dashboard_display_pref):
"""Updates the creator dashboard preference of user with given user id.
Args:
user_id: str. The unique ID of the user.
creator_dashboard_display_pref: str. The creator dashboard preference
the user wants.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.creator_dashboard_display_pref = (
creator_dashboard_display_pref)
_save_user_settings(user_settings)
def update_subject_interests(user_id, subject_interests):
"""Updates subject_interests of user with given user_id.
Args:
user_id: str. The unique ID of the user.
subject_interests: list(str). New subject interests to be set.
"""
if not isinstance(subject_interests, list):
raise utils.ValidationError('Expected subject_interests to be a list.')
else:
for interest in subject_interests:
if not isinstance(interest, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each subject interest to be a string.')
elif not interest:
raise utils.ValidationError(
'Expected each subject interest to be non-empty.')
elif not re.match(constants.TAG_REGEX, interest):
raise utils.ValidationError(
'Expected each subject interest to consist only of '
'lowercase alphabetic characters and spaces.')
if len(set(subject_interests)) != len(subject_interests):
raise utils.ValidationError(
'Expected each subject interest to be distinct.')
user_settings = get_user_settings(user_id, strict=True)
user_settings.subject_interests = subject_interests
_save_user_settings(user_settings)
def _update_first_contribution_msec(user_id, first_contribution_msec):
"""Updates first_contribution_msec of user with given user_id.
Args:
user_id: str. The unique ID of the user.
first_contribution_msec: float. New time to set in milliseconds
representing user's first contribution to Oppia.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.first_contribution_msec = first_contribution_msec
_save_user_settings(user_settings)
def update_first_contribution_msec_if_not_set(user_id, first_contribution_msec):
"""Updates first_contribution_msec of user with given user_id
if it is set to None.
Args:
user_id: str. The unique ID of the user.
first_contribution_msec: float. New time to set in milliseconds
representing user's first contribution to Oppia.
"""
user_settings = get_user_settings(user_id, strict=True)
if user_settings.first_contribution_msec is None:
_update_first_contribution_msec(
user_id, first_contribution_msec)
def update_preferred_language_codes(user_id, preferred_language_codes):
"""Updates preferred_language_codes of user with given user_id.
Args:
user_id: str. The unique ID of the user.
preferred_language_codes: list(str). New exploration language
preferences to set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_language_codes = preferred_language_codes
_save_user_settings(user_settings)
def update_preferred_site_language_code(user_id, preferred_site_language_code):
"""Updates preferred_site_language_code of user with given user_id.
Args:
user_id: str. The unique ID of the user.
preferred_site_language_code: str. New system language preference
to set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_site_language_code = (
preferred_site_language_code)
_save_user_settings(user_settings)
def update_preferred_audio_language_code(
user_id, preferred_audio_language_code):
"""Updates preferred_audio_language_code of user with given user_id.
Args:
user_id: str. The unique ID of the user.
preferred_audio_language_code: str. New audio language preference
to set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_audio_language_code = (
preferred_audio_language_code)
_save_user_settings(user_settings)
def update_user_role(user_id, role):
"""Updates the role of the user with given user_id.
Args:
user_id: str. The unique ID of the user whose role is to be updated.
role: str. The role to be assigned to user with given id.
Raises:
Exception: The given role does not exist.
"""
if role not in role_services.PARENT_ROLES:
raise Exception('Role %s does not exist.' % role)
user_settings = get_user_settings(user_id, strict=True)
user_settings.role = role
_save_user_settings(user_settings)
def mark_user_for_deletion(
user_id, exploration_ids, collection_ids):
"""Set deleted of the user with given user_id to True and create
PendingDeletionRequestModel for that user.
Args:
user_id: str. The unique ID of the user who should be deleted.
exploration_ids: list(str). List of exploration ids that were soft
deleted and should be hard deleted later.
collection_ids: list(str). List of collection ids that were soft
deleted and should be hard deleted later.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.deleted = True
_save_user_settings(user_settings)
user_models.PendingDeletionRequestModel(
id=user_id,
email=user_settings.email,
exploration_ids=exploration_ids,
collection_ids=collection_ids,
).put()
def get_human_readable_user_ids(user_ids):
"""Converts the given ids to usernames, or truncated email addresses.
Requires all users to be known.
Args:
user_ids: list(str). The list of user_ids to get UserSettings domain
objects for.
Returns:
list(str). List of usernames corresponding to given user_ids. If
username does not exist, the corresponding entry in the returned
list is the user's truncated email address.
Raises:
Exception: At least one of the user_ids does not correspond to a valid
UserSettingsModel.
"""
users_settings = get_users_settings(user_ids)
usernames = []
for ind, user_settings in enumerate(users_settings):
if user_settings is None:
logging.error('User id %s not known in list of user_ids %s' % (
user_ids[ind], user_ids))
raise Exception('User not found.')
elif user_settings.user_id == feconf.SYSTEM_COMMITTER_ID:
usernames.append('admin')
elif user_settings.username:
usernames.append(user_settings.username)
else:
usernames.append(
'[Awaiting user registration: %s]' %
user_settings.truncated_email)
return usernames
def record_user_started_state_editor_tutorial(user_id):
"""Updates last_started_state_editor_tutorial to the current datetime
for the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_started_state_editor_tutorial = (
datetime.datetime.utcnow())
_save_user_settings(user_settings)
def record_user_started_state_translation_tutorial(user_id):
"""Updates last_started_state_translation_tutorial to the current datetime
for the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_started_state_translation_tutorial = (
datetime.datetime.utcnow())
_save_user_settings(user_settings)
def record_user_logged_in(user_id):
"""Updates last_logged_in to the current datetime for the user with
given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_logged_in = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_last_logged_in(user_settings, new_last_logged_in):
"""Updates last_logged_in to the new given datetime for the user with
given user_settings. Should only be used by tests.
Args:
user_settings: UserSettings. The UserSettings domain object.
new_last_logged_in: datetime or None. The new datetime of the last
logged in session.
"""
user_settings.last_logged_in = new_last_logged_in
_save_user_settings(user_settings)
def record_user_edited_an_exploration(user_id):
"""Updates last_edited_an_exploration to the current datetime for
the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id)
if user_settings:
user_settings.last_edited_an_exploration = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def record_user_created_an_exploration(user_id):
"""Updates last_created_an_exploration to the current datetime for
the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id)
if user_settings:
user_settings.last_created_an_exploration = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_email_preferences(
user_id, can_receive_email_updates, can_receive_editor_role_email,
can_receive_feedback_email, can_receive_subscription_email):
"""Updates whether the user has chosen to receive email updates.
If no UserEmailPreferencesModel exists for this user, a new one will
be created.
Args:
user_id: str. The unique ID of the user.
can_receive_email_updates: bool. Whether the given user can receive
email updates.
can_receive_editor_role_email: bool. Whether the given user can receive
emails notifying them of role changes.
can_receive_feedback_email: bool. Whether the given user can receive
emails when users submit feedback to their explorations.
can_receive_subscription_email: bool. Whether the given user can receive
emails related to his/her creator subscriptions.
"""
email_preferences_model = user_models.UserEmailPreferencesModel.get(
user_id, strict=False)
if email_preferences_model is None:
email_preferences_model = user_models.UserEmailPreferencesModel(
id=user_id)
email_preferences_model.site_updates = can_receive_email_updates
email_preferences_model.editor_role_notifications = (
can_receive_editor_role_email)
email_preferences_model.feedback_message_notifications = (
can_receive_feedback_email)
email_preferences_model.subscription_notifications = (
can_receive_subscription_email)
email_preferences_model.put()
def get_email_preferences(user_id):
"""Gives email preferences of user with given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
UserGlobalPrefs. Representing whether the user has chosen to receive
email updates.
"""
email_preferences_model = user_models.UserEmailPreferencesModel.get(
user_id, strict=False)
if email_preferences_model is None:
return user_domain.UserGlobalPrefs.create_default_prefs()
else:
return user_domain.UserGlobalPrefs(
email_preferences_model.site_updates,
email_preferences_model.editor_role_notifications,
email_preferences_model.feedback_message_notifications,
email_preferences_model.subscription_notifications)
def flush_migration_bot_contributions_model():
"""Cleans migration bot contributions model."""
user_contributions = get_user_contributions(
feconf.MIGRATION_BOT_USER_ID, strict=False)
if user_contributions is not None:
user_contributions.edited_exploration_ids = []
user_contributions.created_exploration_ids = []
_save_user_contributions(user_contributions)
def get_users_email_preferences(user_ids):
"""Get email preferences for the list of users.
Args:
user_ids: list(str). A list of user IDs for whom we want to get email
preferences.
Returns:
list(UserGlobalPrefs). Representing whether the users had chosen to
receive email updates.
"""
user_email_preferences_models = (
user_models.UserEmailPreferencesModel.get_multi(user_ids))
result = []
for email_preferences_model in user_email_preferences_models:
if email_preferences_model is None:
result.append(
user_domain.UserGlobalPrefs.create_default_prefs())
else:
result.append(user_domain.UserGlobalPrefs(
email_preferences_model.site_updates,
email_preferences_model.editor_role_notifications,
email_preferences_model.feedback_message_notifications,
email_preferences_model.subscription_notifications))
return result
def set_email_preferences_for_exploration(
user_id, exploration_id, mute_feedback_notifications=None,
mute_suggestion_notifications=None):
"""Sets mute preferences for exploration with given exploration_id of user
with given user_id.
If no ExplorationUserDataModel exists for this user and exploration,
a new one will be created.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
mute_feedback_notifications: bool. Whether the given user has muted
feedback emails. Defaults to None.
mute_suggestion_notifications: bool. Whether the given user has muted
suggestion emails. Defaults to None.
"""
exploration_user_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exploration_user_model is None:
exploration_user_model = user_models.ExplorationUserDataModel.create(
user_id, exploration_id)
if mute_feedback_notifications is not None:
exploration_user_model.mute_feedback_notifications = (
mute_feedback_notifications)
if mute_suggestion_notifications is not None:
exploration_user_model.mute_suggestion_notifications = (
mute_suggestion_notifications)
exploration_user_model.put()
def get_email_preferences_for_exploration(user_id, exploration_id):
"""Gives mute preferences for exploration with given exploration_id of user
with given user_id.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
Returns:
UserExplorationPrefs. Representing whether the user has chosen to
receive email updates for particular exploration.
"""
exploration_user_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exploration_user_model is None:
return user_domain.UserExplorationPrefs.create_default_prefs()
else:
return user_domain.UserExplorationPrefs(
exploration_user_model.mute_feedback_notifications,
exploration_user_model.mute_suggestion_notifications)
def get_users_email_preferences_for_exploration(user_ids, exploration_id):
"""Gives mute preferences for exploration with given exploration_id of user
with given user_id.
Args:
user_ids: list(str). A list of user IDs for whom we want to get email
preferences.
exploration_id: str. The exploration id.
Returns:
list(UserExplorationPrefs). Representing whether the users has chosen to
receive email updates for particular exploration.
"""
exploration_user_models = (
user_models.ExplorationUserDataModel.get_multi(
user_ids, exploration_id))
result = []
for exploration_user_model in exploration_user_models:
if exploration_user_model is None:
result.append(
user_domain.UserExplorationPrefs.create_default_prefs())
else:
result.append(user_domain.UserExplorationPrefs(
exploration_user_model.mute_feedback_notifications,
exploration_user_model.mute_suggestion_notifications))
return result
class UserContributions(python_utils.OBJECT):
"""Value object representing a user's contributions.
Attributes:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
"""
def __init__(
self, user_id, created_exploration_ids, edited_exploration_ids):
"""Constructs a UserContributions domain object.
Args:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
"""
self.user_id = user_id
self.created_exploration_ids = created_exploration_ids
self.edited_exploration_ids = edited_exploration_ids
def validate(self):
"""Checks that user_id, created_exploration_ids and
edited_exploration_ids fields of this UserContributions
domain object are valid.
Raises:
ValidationError: user_id is not str.
ValidationError: created_exploration_ids is not a list.
ValidationError: exploration_id in created_exploration_ids
is not str.
ValidationError: edited_exploration_ids is not a list.
ValidationError: exploration_id in edited_exploration_ids
is not str.
"""
if not isinstance(self.user_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if not isinstance(self.created_exploration_ids, list):
raise utils.ValidationError(
'Expected created_exploration_ids to be a list, received %s'
% self.created_exploration_ids)
for exploration_id in self.created_exploration_ids:
if not isinstance(exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected exploration_id in created_exploration_ids '
'to be a string, received %s' % (
exploration_id))
if not isinstance(self.edited_exploration_ids, list):
raise utils.ValidationError(
'Expected edited_exploration_ids to be a list, received %s'
% self.edited_exploration_ids)
for exploration_id in self.edited_exploration_ids:
if not isinstance(exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected exploration_id in edited_exploration_ids '
'to be a string, received %s' % (
exploration_id))
def get_user_contributions(user_id, strict=False):
"""Gets domain object representing the contributions for the given user_id.
Args:
user_id: str. The unique ID of the user.
strict: bool. Whether to fail noisily if no user with the given
id exists in the datastore. Defaults to False.
Returns:
UserContributions or None. If the given user_id does not exist, return
None. Otherwise, return the corresponding UserContributions domain
object.
"""
model = user_models.UserContributionsModel.get(user_id, strict=strict)
if model is not None:
result = UserContributions(
model.id, model.created_exploration_ids,
model.edited_exploration_ids)
else:
result = None
return result
def create_user_contributions(
user_id, created_exploration_ids, edited_exploration_ids):
"""Creates a new UserContributionsModel and returns the domain object.
Args:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
Returns:
UserContributions. The domain object representing the newly-created
UserContributionsModel.
Raises:
Exception: The UserContributionsModel for the given user_id already
exists.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if user_contributions:
raise Exception(
'User contributions model for user %s already exists.' % user_id)
else:
user_contributions = UserContributions(
user_id, created_exploration_ids, edited_exploration_ids)
_save_user_contributions(user_contributions)
return user_contributions
def update_user_contributions(
user_id, created_exploration_ids, edited_exploration_ids):
"""Updates an existing UserContributionsModel with new calculated
contributions.
Args:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
Raises:
Exception: The UserContributionsModel for the given user_id does not
exist.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
raise Exception(
'User contributions model for user %s does not exist.' % user_id)
user_contributions.created_exploration_ids = created_exploration_ids
user_contributions.edited_exploration_ids = edited_exploration_ids
_save_user_contributions(user_contributions)
def add_created_exploration_id(user_id, exploration_id):
"""Adds an exploration_id to a user_id's UserContributionsModel collection
of created explorations.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
create_user_contributions(user_id, [exploration_id], [])
elif exploration_id not in user_contributions.created_exploration_ids:
user_contributions.created_exploration_ids.append(exploration_id)
user_contributions.created_exploration_ids.sort()
_save_user_contributions(user_contributions)
def add_edited_exploration_id(user_id, exploration_id):
"""Adds an exploration_id to a user_id's UserContributionsModel collection
of edited explorations.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
create_user_contributions(user_id, [], [exploration_id])
elif exploration_id not in user_contributions.edited_exploration_ids:
user_contributions.edited_exploration_ids.append(exploration_id)
user_contributions.edited_exploration_ids.sort()
_save_user_contributions(user_contributions)
def _save_user_contributions(user_contributions):
"""Commits a user contributions object to the datastore.
Args:
user_contributions: UserContributions. Value object representing
a user's contributions.
"""
user_contributions.validate()
user_models.UserContributionsModel(
id=user_contributions.user_id,
created_exploration_ids=user_contributions.created_exploration_ids,
edited_exploration_ids=user_contributions.edited_exploration_ids,
).put()
def _migrate_dashboard_stats_to_latest_schema(versioned_dashboard_stats):
"""Holds responsibility of updating the structure of dashboard stats.
Args:
versioned_dashboard_stats: UserStatsModel. Value object representing
user-specific statistics.
Raises:
Exception: If schema_version > CURRENT_DASHBOARD_STATS_SCHEMA_VERSION.
"""
stats_schema_version = versioned_dashboard_stats.schema_version
if not (1 <= stats_schema_version
<= feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d dashboard stats schemas at '
'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION)
def get_current_date_as_string():
"""Gets the current date.
Returns:
str. Current date as a string of format 'YYYY-MM-DD'.
"""
return datetime.datetime.utcnow().strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
def parse_date_from_string(datetime_str):
"""Parses the given string, and returns the year, month and day of the
date that it represents.
Args:
datetime_str: str. String representing datetime.
Returns:
dict. Representing date with year, month and day as keys.
"""
datetime_obj = datetime.datetime.strptime(
datetime_str, feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
return {
'year': datetime_obj.year,
'month': datetime_obj.month,
'day': datetime_obj.day
}
def get_user_impact_score(user_id):
"""Gets the user impact score for the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
float. The user impact score associated with the given user_id.
Returns 0 if UserStatsModel does not exist for the given user_id.
"""
model = user_models.UserStatsModel.get(user_id, strict=False)
if model:
return model.impact_score
else:
return 0
def get_weekly_dashboard_stats(user_id):
"""Gets weekly dashboard stats for a given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
list(dict): The weekly dashboard stats for the given user. Each dict in
the list denotes the dashboard stats of the user, keyed by a datetime
string. The stats currently being saved are:
- 'average ratings': Average of ratings across all explorations of
a user.
- 'total plays': Total number of plays across all explorations of
a user.
The format of returned value:
[
{
{{datetime_string_1}}: {
'num_ratings': (value),
'average_ratings': (value),
'total_plays': (value)
}
},
{
{{datetime_string_2}}: {
'num_ratings': (value),
'average_ratings': (value),
'total_plays': (value)
}
}
]
If the user doesn't exist, then this function returns None.
"""
model = user_models.UserStatsModel.get(user_id, strict=False)
if model and model.weekly_creator_stats_list:
return model.weekly_creator_stats_list
else:
return None
def get_last_week_dashboard_stats(user_id):
"""Gets last week's dashboard stats for a given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
dict or None: The dict denotes last week dashboard stats of the user,
and contains a single key-value pair. The key is the datetime string and
the value is the dashboard stats in the format:
{
'num_ratings': (value),
'average_ratings': (value),
'total_plays': (value)
}
If the user doesn't exist, then this function returns None.
"""
weekly_dashboard_stats = get_weekly_dashboard_stats(user_id)
if weekly_dashboard_stats:
return weekly_dashboard_stats[-1]
else:
return None
def update_dashboard_stats_log(user_id):
"""Save statistics for creator dashboard of a user by appending to a list
keyed by a datetime string.
Args:
user_id: str. The unique ID of the user.
"""
model = user_models.UserStatsModel.get_or_create(user_id)
if model.schema_version != feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION:
_migrate_dashboard_stats_to_latest_schema(model)
weekly_dashboard_stats = {
get_current_date_as_string(): {
'num_ratings': model.num_ratings or 0,
'average_ratings': model.average_ratings,
'total_plays': model.total_plays or 0
}
}
model.weekly_creator_stats_list.append(weekly_dashboard_stats)
model.put()
def is_at_least_moderator(user_id):
"""Checks if a user with given user_id is at least a moderator.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. True if user is atleast a moderator, False otherwise.
"""
user_role = get_user_role_from_id(user_id)
if (user_role == feconf.ROLE_ID_MODERATOR or
user_role == feconf.ROLE_ID_ADMIN):
return True
return False
def is_admin(user_id):
"""Checks if a user with given user_id is an admin.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. True if user is an admin, False otherwise.
"""
user_role = get_user_role_from_id(user_id)
if user_role == feconf.ROLE_ID_ADMIN:
return True
return False
def is_topic_manager(user_id):
"""Checks if a user with given user_id is a topic manager.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether the user is a topic manager.
"""
user_role = get_user_role_from_id(user_id)
if user_role == feconf.ROLE_ID_TOPIC_MANAGER:
return True
return False
def can_review_translation_suggestions(user_id, language_code=None):
"""Returns whether the user can review translation suggestions in any
language or in the given language.
NOTE: If the language_code is provided then this method will check whether
the user can review translations in the given language code. Otherwise, it
will check whether the user can review in any language.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language.
Returns:
bool. Whether the user can review translation suggestions in any
language or in the given language.
"""
user_community_rights = get_user_community_rights(user_id)
reviewable_language_codes = (
user_community_rights.can_review_translation_for_language_codes)
if language_code is not None:
return language_code in reviewable_language_codes
else:
return bool(reviewable_language_codes)
def can_review_voiceover_applications(user_id, language_code=None):
"""Returns whether the user can review voiceover applications in any
language or in the given language.
NOTE: If the language_code is provided then this method will check whether
the user can review voiceover in the given language code else it will
check whether the user can review in any language.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language.
Returns:
bool. Whether the user can review voiceover applications in any language
or in the given language.
"""
user_community_rights = get_user_community_rights(user_id)
reviewable_language_codes = (
user_community_rights.can_review_voiceover_for_language_codes)
if language_code is not None:
return language_code in reviewable_language_codes
else:
return bool(reviewable_language_codes)
def can_review_question_suggestions(user_id):
"""Checks whether the user can review question suggestions.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether the user can review question suggestions.
"""
user_community_rights = get_user_community_rights(user_id)
return user_community_rights.can_review_questions
def allow_user_to_review_translation_in_language(user_id, language_code):
"""Allows the user with the given user id to review translation in the given
language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user does not have rights to review translations in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
allowed_language_codes = set(
user_community_rights.can_review_translation_for_language_codes)
allowed_language_codes.add(language_code)
user_community_rights.can_review_translation_for_language_codes = (
sorted(list(allowed_language_codes)))
_save_user_community_rights(user_community_rights)
def remove_translation_review_rights_in_language(user_id, language_code):
"""Removes the user's review rights to translation suggestions in the given
language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user already has rights to review translations in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_translation_for_language_codes.remove(
language_code)
_update_user_community_rights(user_community_rights)
def allow_user_to_review_voiceover_in_language(user_id, language_code):
"""Allows the user with the given user id to review voiceover applications
in the given language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user does not have rights to review voiceovers in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
allowed_language_codes = set(
user_community_rights.can_review_voiceover_for_language_codes)
allowed_language_codes.add(language_code)
user_community_rights.can_review_voiceover_for_language_codes = (
sorted(list(allowed_language_codes)))
_save_user_community_rights(user_community_rights)
def remove_voiceover_review_rights_in_language(user_id, language_code):
"""Removes the user's review rights to voiceover applications in the given
language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user already has rights to review voiceovers in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_voiceover_for_language_codes.remove(
language_code)
_update_user_community_rights(user_community_rights)
def allow_user_to_review_question(user_id):
"""Allows the user with the given user id to review question suggestions.
Args:
user_id: str. The unique ID of the user. Callers should ensure that
the given user does not have rights to review questions.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_questions = True
_save_user_community_rights(user_community_rights)
def remove_question_review_rights(user_id):
"""Removes the user's review rights to question suggestions.
Args:
user_id: str. The unique ID of the user. Callers should ensure that
the given user already has rights to review questions.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_questions = False
_update_user_community_rights(user_community_rights)
def remove_community_reviewer(user_id):
"""Deletes the UserCommunityRightsModel corresponding to the given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_community_rights_model = (
user_models.UserCommunityRightsModel.get_by_id(user_id))
if user_community_rights_model is not None:
user_community_rights_model.delete()
def get_community_reviewer_usernames(review_category, language_code=None):
"""Returns a list of usernames of users who has rights to review item of
given review category.
Args:
review_category: str. The review category to find the list of reviewers
for.
language_code: None|str. The language code for translation or voiceover
review category.
Returns:
list(str.) A list of usernames.
"""
reviewer_ids = []
if review_category == constants.REVIEW_CATEGORY_TRANSLATION:
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_translation_reviewer_user_ids(language_code))
elif review_category == constants.REVIEW_CATEGORY_VOICEOVER:
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_voiceover_reviewer_user_ids(language_code))
elif review_category == constants.REVIEW_CATEGORY_QUESTION:
if language_code is not None:
raise Exception('Expected language_code to be None, found: %s' % (
language_code))
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_question_reviewer_user_ids())
else:
raise Exception('Invalid review category: %s' % review_category)
return get_usernames(reviewer_ids)
def log_username_change(committer_id, old_username, new_username):
"""Stores the query to role structure in UsernameChangeAuditModel.
Args:
committer_id: str. The ID of the user that is making the change.
old_username: str. The current username that is being changed.
new_username: str. The new username that the current one is being
changed to.
"""
model_id = '%s.%d' % (committer_id, utils.get_current_time_in_millisecs())
audit_models.UsernameChangeAuditModel(
id=model_id, committer_id=committer_id, old_username=old_username,
new_username=new_username).put()
|
#!/usr/bin/env python
# coding=utf-8
from .traj_gen_base import TrajGen
import numpy as np
import casadi as ca
from scipy.interpolate import interp1d
class CHOMPTrajGen(TrajGen):
def __init__(self, knots_, dim_, pntDensity_):
super().__init__(knots_, dim_)
self.pntDensity = pntDensity_
assert knots_.shape[0]==2, 'For optimalTraj, knots = [t0, tf]'
self.num_variables = int(np.floor((knots_[-1]-knots_[0])*pntDensity_))
self.dt = (knots_[-1]-knots_[0])/(self.num_variables-1)
self.ts = np.linspace(knots_[0], knots_[-1], self.num_variables) # different from Ts
self.Xs = np.zeros((self.dim, self.num_variables))
def findStepIndex(self, t):
"""
find the closest index of the segment
"""
time_diff = (self.ts-t)**2
return np.where(time_diff==np.min(time_diff))[0][0]
def setDerivativeObj(self, weight_mask):
self.weight_mask = weight_mask
def addPin(self, pin_):
if pin_['d'] >= self.num_variables:
print("Warning: The degree of the pin exceed the total number of variables. This pin ignored\n")
super().addPin(pin_)
X_ = pin_['X']
m = 0
if len(X_.shape) == 2: # 2 dimension ==> loose pin
if m in self.loosePinSet.keys():
self.loosePinSet[m].append(pin_)
else:
self.loosePinSet[m] = [pin_]
elif len(X_.shape) == 1: # vector ==> fix pin
if m in self.fixPinSet.keys():
self.fixPinSet[m].append(pin_)
else:
self.fixPinSet[m] = [pin_]
else:
print("Warning: Dim of pin value is invalid\n")
def getDiffMat(self, d_):
if d_ == 0:
mat_ = np.diag(np.ones(self.num_variables))
else:
mat_ = np.diag(np.ones(self.num_variables))
for j in range(1, d_+1):
D_ = np.zeros((self.num_variables-j, self.num_variables-j+1))
for i in range(self.num_variables-j):
D_[i, i:i+2] = np.array([-1, 1])
D_ = D_/self.dt
mat_ = np.dot(D_, mat_)
return mat_
def loosePin2InequalityMat(self,):
ASet = None
BSet = None
if len(self.loosePinSet.keys()) == 0:
return ASet, BSet
for pin in self.loosePinSet[0]:
a_set_ = []
b_set_ = []
for dd in range(self.dim):
n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])
a_ = np.zeros((2, self.num_variables-pin['d']))
a_[:, n_] = np.array([1, -1])
a_ = np.dot(a_, self.getDiffMat(pin['d']))
a_set_.append(a_)
b_ = np.array([pin['X'][dd, 1], -pin['X'][dd, 0]]).reshape(-1, 1)
b_set_.append(b_)
if ASet is None:
ASet = np.array(a_set_)
BSet = np.array(b_set_).reshape(self.dim, -1, 1)
else:
ASet = np.concatenate((ASet, np.array(a_set_)), axis=1)
BSet = np.concatenate((BSet, np.array(b_set_).reshape(self.dim, -1, 1)), axis=1)
print('Bset final in {}'.format(BSet.shape))
return ASet, BSet
def fixPin2EqualityMat(self,):
AeqSet = None
BeqSet = None
if len(self.fixPinSet.keys())==0:
return AeqSet, BeqSet
for pin in self.fixPinSet[0]:
aeq_set_ = []
beq_set_ = []
for dd in range(self.dim):
n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])
a_ = np.zeros(self.num_variables-pin['d'])
a_[n_] = 1.0
a_ = np.dot(a_, self.getDiffMat(pin['d']))
aeq_set_.append(a_)
# print(aeq_set_)
b_ = pin['X'][dd]
beq_set_.append(b_)
if AeqSet is None:
AeqSet = np.array(aeq_set_).reshape(self.dim, 1, -1)
BeqSet = np.array(beq_set_).reshape(self.dim, 1, -1)
# print(AeqSet.shape)
# print(BeqSet.shape)
else:
AeqSet = np.concatenate((AeqSet, np.array(aeq_set_).reshape(self.dim, 1, -1)), axis=1)
BeqSet = np.concatenate((BeqSet, np.array(beq_set_).reshape(self.dim, 1, -1)), axis=1)
# print(BeqSet.shape)
return AeqSet, BeqSet
def getQPset(self,):
# 1. objective
QSet = np.zeros((self.dim, self.num_variables, self.num_variables))
for dd in range(self.dim):
Q_ = np.zeros((self.num_variables, self.num_variables))
for d in range(1, self.weight_mask.shape[0]+1):
if self.weight_mask[d-1]>0:
temp_ = self.getDiffMat(d)
Qd_ = np.dot(temp_.T, temp_)
Q_ = Q_ + self.weight_mask[d-1]*Qd_
QSet[dd] = Q_
# 2. constraints
ASet, BSet = self.loosePin2InequalityMat()
AeqSet, BeqSet = self.fixPin2EqualityMat()
return QSet, ASet, BSet, AeqSet, BeqSet
def solve(self,):
self.isSolved = True
# prepare QP
QSet, ASet, BSet, AeqSet, BeqSet = self.getQPset()
if ASet is None:
print("Please define the beginning and also the end pins")
return False
for dd in range(self.dim):
print('solving {}-th dimension.. \n'.format(dd))
x_sym = ca.SX.sym('x', QSet[0].shape[0])
opts_setting = {'ipopt.max_iter':100, 'ipopt.print_level':0, 'print_time':0, 'ipopt.acceptable_tol':1e-8, 'ipopt.acceptable_obj_change_tol':1e-6}
obj = ca.mtimes([x_sym.T, QSet[dd], x_sym])
if ASet is None:
a_set = AeqSet[dd].copy()
else:
a_set = np.concatenate((ASet[dd], AeqSet[dd]))
Ax_sym = ca.mtimes([a_set, x_sym])
if BSet is None:
b_set_u = BeqSet[dd]
b_set_l = BeqSet[dd]
else:
b_set_u = np.concatenate((BSet[dd], BeqSet[dd]), axis=0) # Ax <= b_set_u
b_set_l = np.concatenate((-np.inf*np.ones(BSet[dd].shape), BeqSet[dd]), axis=0) # Ax >= b_set_l
nlp_prob = {'f': obj, 'x': x_sym, 'g':Ax_sym}
solver = ca.nlpsol('solver', 'ipopt', nlp_prob, opts_setting)
try:
result = solver(lbg=b_set_l, ubg=b_set_u,)
Phat_ = result['x']
# print(Phat_)
flag_ = True
except:
Phat_ = None
flag_ = False
if flag_:
self.Xs[dd] = Phat_.full().flatten()
else:
self.isSolved = False
print("Failure ..")
return False
return True
def eval(self, t_, d_):
val_ = np.zeros((self.dim, t_.shape[0]))
for dd in range(self.dim):
for idx in range(t_.shape[0]):
t_i = t_[idx]
if t_i < self.Ts[0] or t_i > self.Ts[-1]:
print("WARNING: Eval of t: out of bound. Extrapolation\n")
Xsd_ = np.dot(self.getDiffMat(d_), self.Xs[dd].T)
if d_ >0:
t_v_ = self.ts[:-d_]
else:
t_v_ = self.ts
# print(t_v_.shape)
# print(Xsd_.shape)
set_interp = interp1d(t_v_, Xsd_, kind='linear')
# print(t_v_[-1])
# print(t_[idx])
if t_[idx] <= t_v_[-1]:
val_[dd, idx] = set_interp(t_[idx])
else:
val_[dd, idx] = set_interp(t_v_[-1])
return val_
|
"""Package setup script."""
from setuptools import setup, find_packages
# Python packaging constants
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
LICENSES = {
'MIT license':
'License :: OSI Approved :: MIT License',
'BSD license':
'License :: OSI Approved :: BSD License',
'Apache Software License 2.0':
'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3':
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
}
REQUIREMENTS = {
'install': [],
'setup': ['pytest-runner'],
'tests': ['pytest']
}
# Project constants
EMAIL = 'infosmith@prontonmail.com'
FULL_NAME = "David S."
GITHUB_ACCOUNT = 'infosmith'
LICENSE = 'MIT license'
PROJECT_SLUG = 'helpers'
PROJECT_SHORT_DESCRIPTION = 'Improved developer experience, accumulated.'
VERSION = '0.3.0'
# Project conditional configuration
if 'MIT license' in LICENSES.keys():
CLASSIFIERS.append(LICENSES['MIT license'])
# Configure project
setup(
author=FULL_NAME,
author_email=EMAIL,
classifiers=CLASSIFIERS,
description=PROJECT_SHORT_DESCRIPTION,
include_package_data=True,
install_requires=REQUIREMENTS['install'],
keywords=PROJECT_SLUG,
license=LICENSE,
name=PROJECT_SLUG,
packages=find_packages(include=[PROJECT_SLUG]),
setup_requires=REQUIREMENTS['setup'],
test_suite='tests',
tests_require=REQUIREMENTS['tests'],
url="https://github.com/{}/{}".format(GITHUB_ACCOUNT, PROJECT_SLUG),
version=VERSION,
zip_safe=False,
)
|
"""
WSGI config for DiscordOauth2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DiscordOauth2.settings')
application = get_wsgi_application()
|
"""
A module to show off a timed animation using coroutines
Making timed animations is messy, because we have to add a lot of
class attributes for all of the loop variables. A cleaner way is
to do this with coroutines. Each animation is its own coroutine.
The advantage of the coroutine is that yield allows you to pause
to let the game class draw. If you do not do that, then your
loop will keep going and you will never get a chance to draw. And
if you do not draw, there is no animation.
Author: Walker M. White (wmw2)
Date: November 20, 2019
"""
import introcs
import random
import math
from game2d import *
import time
import random
############# CONSTANTS #############
# Window Size
WINDOW_WIDTH = 512
WINDOW_HEIGHT = 512
# THE ANIMATION SPEED IN SECONDS
ANIMATION_SPEED = 1
############# CONTROLLER CLASS #############
class Animation(GameApp):
"""
This class is an application to animate an image with the arrow keys
At each step, the update() method checks for key input
and moves the image accordingly.
Attribute view : the view (inherited from GameApp)
Invariant: view is an instance of GView
Attribute image: the image to animate
Invariant: image is a GImage made from a PNG file
"""
# Attribute _animator: A coroutine for performing an animation
# Invariant: _animator is a generator-based coroutine (or None)
# THE THREE MAIN METHODS
def start(self):
"""
Initializes the application, creating new attributes.
"""
self.image = GImage(x=WINDOW_WIDTH/2,y=WINDOW_HEIGHT/2,source='Walker.png')
self.image.angle = 0 # Doing this prevents a slow down due to initialization
self._animator = None
def update(self,dt):
"""
Animates the image.
Parameter dt: The time since the last animation frame.
Precondition: dt is a float.
"""
if not self._animator is None: # We have something to animate
try:
self._animator.send(dt) # Tell it how far to animate
except:
self._animator = None # Stop animating
elif self.input.is_key_down('left'):
self._animator = self._animate_turn('left')
next(self._animator) # Start up the animator
elif self.input.is_key_down('right'):
self._animator = self._animate_turn('right')
next(self._animator) # Start up the animator
elif self.input.is_key_down('up'):
self._animator = self._animate_slide('up')
next(self._animator) # Start up the animator
elif self.input.is_key_down('down'):
self._animator = self._animate_slide('down')
next(self._animator) # Start up the animator
def draw(self):
"""
Draws the image
"""
self.image.draw(self.view)
def _animate_turn(self,direction):
"""
Animates a rotation of the image over ANIMATION_SPEED seconds
This method is a coroutine that takes a break (so that the game
can redraw the image) every time it moves it. The coroutine takes
the dt as periodic input so it knows how many (parts of) seconds
to animate.
Parameter dt: The time since the last animation frame.
Precondition: dt is a float.
Parameter direction: The direction to rotate.
Precondition: direction is a string and one of 'left' or 'right'.
"""
sangle = self.image.angle
if direction == 'left':
fangle = sangle+90
else:
fangle = sangle-90
# Degrees per second
steps = (fangle-sangle)/ANIMATION_SPEED
animating = True
while animating:
# Get the current time
dt = (yield)
amount = steps*dt
# Update the angle
self.image.angle = self.image.angle+amount
# If we go to far, clamp and stop animating
if abs(self.image.angle-sangle) >= 90:
self.image.angle = fangle
animating = False
def _animate_slide(self,direction):
"""
Animates a vertical up or down of the image over ANIMATION_SPEED seconds
This method is a coroutine that takes a break (so that the game
can redraw the image) every time it moves it. The coroutine takes
the dt as periodic input so it knows how many (parts of) seconds
to animate.
Parameter dt: The time since the last animation frame.
Precondition: dt is a float.
Parameter direction: The direction to slide.
Precondition: direction is a string and one of 'up' or 'down'.
"""
svert = self.image.y
if direction == 'up':
fvert = svert+self.image.height
else:
fvert = svert-self.image.height
# Degrees per second
steps = (fvert-svert)/ANIMATION_SPEED
animating = True
while animating:
# Get the current time
dt = (yield)
amount = steps*dt
# Update the angle
self.image.y = self.image.y+amount
# If we go to far, clamp and stop animating
if abs(self.image.y-svert) >= self.image.height:
self.image.y = fvert
animating = False
# Application Code
if __name__ == '__main__':
Animation(left=150,width=WINDOW_WIDTH,height=WINDOW_HEIGHT,fps=60.0).run()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ApiResourceLocation(Model):
"""ApiResourceLocation.
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'area': {'key': 'area', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'route_template': {'key': 'routeTemplate', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'min_version': {'key': 'minVersion', 'type': 'float'},
'max_version': {'key': 'maxVersion', 'type': 'float'},
'released_version': {'key': 'releasedVersion', 'type': 'str'},
}
def __init__(self, id=None, area=None, resource_name=None,
route_template=None, resource_version=None,
min_version=None, max_version=None,
released_version=None):
super(ApiResourceLocation, self).__init__()
self.id = id
self.area = area
self.resource_name = resource_name
self.route_template = route_template
self.resource_version = resource_version
self.min_version = min_version
self.max_version = max_version
self.released_version = released_version
class ImproperException(Model):
"""ImproperException.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str'}
}
def __init__(self, message=None):
super(ImproperException, self).__init__()
self.message = message
class SystemException(Model):
"""SystemException.
:param class_name:
:type class_name: str
:param inner_exception:
:type inner_exception: :class:`SystemException <vsts.models.SystemException>`
:param message:
:type message: str
"""
_attribute_map = {
'class_name': {'key': 'ClassName', 'type': 'str'},
'message': {'key': 'Message', 'type': 'str'},
'inner_exception': {'key': 'InnerException', 'type': 'SystemException'}
}
def __init__(self, class_name=None, message=None, inner_exception=None):
super(SystemException, self).__init__()
self.class_name = class_name
self.message = message
self.inner_exception = inner_exception
class VssJsonCollectionWrapperBase(Model):
"""VssJsonCollectionWrapperBase.
:param count:
:type count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'}
}
def __init__(self, count=None):
super(VssJsonCollectionWrapperBase, self).__init__()
self.count = count
class WrappedException(Model):
"""WrappedException.
:param exception_id:
:type exception_id: str
:param inner_exception:
:type inner_exception: :class:`WrappedException <vsts.models.WrappedException>`
:param message:
:type message: str
:param type_name:
:type type_name: str
:param type_key:
:type type_key: str
:param error_code:
:type error_code: int
:param event_id:
:type event_id: int
:param custom_properties:
:type custom_properties: dict
"""
_attribute_map = {
'exception_id': {'key': '$id', 'type': 'str'},
'inner_exception': {'key': 'innerException', 'type': 'WrappedException'},
'message': {'key': 'message', 'type': 'str'},
'type_name': {'key': 'typeName', 'type': 'str'},
'type_key': {'key': 'typeKey', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'int'},
'event_id': {'key': 'eventId', 'type': 'int'},
'custom_properties': {'key': 'customProperties', 'type': '{object}'}
}
def __init__(self, exception_id=None, inner_exception=None, message=None,
type_name=None, type_key=None, error_code=None, event_id=None, custom_properties=None):
super(WrappedException, self).__init__()
self.exception_id = exception_id
self.inner_exception = inner_exception
self.message = message
self.type_name = type_name
self.type_key = type_key
self.error_code = error_code
self.event_id = event_id
self.custom_properties = custom_properties
class VssJsonCollectionWrapper(VssJsonCollectionWrapperBase):
"""VssJsonCollectionWrapper.
:param count:
:type count: int
:param value:
:type value: object
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, count=None, value=None):
super(VssJsonCollectionWrapper, self).__init__(count=count)
self.value = value
|
#from lib2to3.pytree import convert
import socket
import sys
import _thread
import json
import os
import time
import zmq
IP_ADDRESS = '127.0.0.1'
TOPIC = None
fila_msgs = []
conf = []
# Envia os dados
def enviar():
ctx = zmq.Context()
sock = ctx.socket(zmq.PUB)
sock.connect(f"tcp://{IP_ADDRESS}:5500")
codigo = 5
# Executa uma acao de acordo com o codigo informado
while True:
if(len(fila_msgs) == 0):
pass
else:
data = fila_msgs.pop(0)
data_converted = json.loads(data)
codigo = data_converted['codigo']
if(codigo == 1):
msg_json = data
TOPIC = 'login'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if (codigo == 2):
msg_json = data
TOPIC = 'cadastrar'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 4 :
msg_json = data
TOPIC = 'usuario'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 9:
msg_json = data
TOPIC = 'pedirListaAnuncios'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 10:
msg_json = data
TOPIC = 'anuncio'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
# Recebe a confirmacao do broker
def receberConfirmacao():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
TOPIC = 'confirmacao'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
#print(msg_json)
# Dados da confirmacao
data = msg_json
data_converted = json.loads(data)
codigo = data_converted['codigo']
codigo2 = data_converted['codigo2']
confirmacao = data_converted['confirmacao']
# Adiciona a confirmacao
conf.append(confirmacao)
# Recebe a lista de anuncios
def receberAnuncios():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
# Recebe os dados do usuario
TOPIC = 'anuncios'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
#print(msg_json)
# Mostra os dados do usuario
data = msg_json
anuncios = json.loads(data)
os.system('clear') or None
for anuncio in anuncios:
print("================================")
print('Anuncio ID: ', anuncio['id'])
print('Produto ID: ', anuncio['produto_id'])
print('Descricao: ', anuncio['descricao'])
print('De cliente: ', anuncio['de_cliente'])
print('Data: ', anuncio['data'])
print("================================")
# Recebe o perfil
def verPerfil():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
# Recebe os dados do usuario
TOPIC = 'dados_usuario'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
#print(msg_json)
# Mostra os dados do usuario
data = msg_json
converted = json.loads(data)
nome = converted['nome']
dataNasc = converted['nascimento']
cpf = converted['cpf']
email = converted['email']
senha = converted['senha']
os.system('clear') or None
print("================================")
print("Nome : " + nome)
print("Data de Nascimento : " + dataNasc)
print("CPF : " + cpf)
print("Email : " + email)
print("Senha : " + senha)
print("================================")
# Roda o menu
def client():
_thread.start_new_thread(enviar,())
_thread.start_new_thread(receberConfirmacao,())
_thread.start_new_thread(verPerfil,())
ri = 'nao'
ctx = zmq.Context()
sock = ctx.socket(zmq.PUB)
sock.connect(f"tcp://{IP_ADDRESS}:5500")
opc = None
#time.sleep(20)
while opc != "4" :
os.system('clear') or None
print("================================")
print(" 1 - Logar")
print(" 2 - Criar Conta")
print(" 4 - Sair")
print("================================")
opc = input('Digite uma Opcao: ')
if opc == '1' :
os.system('clear') or None
email = input("Digite o email: ")
senha = input("Digite a senha: ")
msg= {}
msg ['codigo'] = 1
msg ['codigo2'] = 1
msg ['email'] = email
msg ['senha'] = senha
msg_json = json.dumps(msg)
fila_msgs.append(msg_json)
if opc == '2':
os.system('clear') or None
nome = input("Digite o seu nome: ")
nascimento = input("Digite sua data Nascimento: ")
endereco = input("Digite seu endereço: ")
cpf = input("Digite seu cpf: ")
email = input("Digite seu Email: ")
senha = input("Digite sua senha: ")
msg= {}
msg ['codigo'] = 2
msg ['codigo2'] = 2
msg ['nome'] = nome
msg ['nascimento'] = nascimento
msg ['endereco'] = endereco
msg ['cpf'] = cpf
msg ['email'] = email
msg ['senha'] = senha
msg_json = json.dumps(msg)
fila_msgs.append(msg_json)
if __name__ == "__main__":
client()
|
# Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""cryptomath module
This module has basic math/crypto code."""
from __future__ import print_function
import os
import math
import base64
import binascii
from .compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Simple hash functions
# **************************************************************************
import hmac
import hashlib
def MD5(b):
return bytearray(hashlib.md5(compat26Str(b)).digest())
def SHA1(b):
return bytearray(hashlib.sha1(compat26Str(b)).digest())
def SHA256(b):
return bytearray(hashlib.sha256(compat26Str(b)).digest())
def HMAC_MD5(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.md5).digest())
def HMAC_SHA1(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha1).digest())
def HMAC_SHA256(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha256).digest())
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
|
"""
Files for testing.
"""
import base64
import tempfile
from PIL import Image
from six import BytesIO
__all__ = (
'BASE64_PREFIX',
'TEMPORARY_FILE_LIST',
'TEMPORARY_FILE_LIST_FILE_CONTENT',
'TEMPORARY_FILE_LIST_FILE_BASE64',
'TEMPORARY_FILE_VIEW',
'TEMPORARY_FILE_VIEW_FILE_CONTENT',
'TEMPORARY_FILE_VIEW_FILE_BASE64',
'TEMPORARY_FILE_ADD',
'TEMPORARY_FILE_ADD_FILE_CONTENT',
'TEMPORARY_FILE_ADD_FILE_BASE64',
'TEMPORARY_FILE_CHANGE',
'TEMPORARY_FILE_CHANGE_FILE_CONTENT',
'TEMPORARY_FILE_CHANGE_FILE_BASE64',
'TEMPORARY_FILE_CHANGE_CHANGED',
'TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT',
'TEMPORARY_FILE_CHANGE_CHANGED_FILE_BASE64',
'TEMPORARY_FILE_DELETE',
'TEMPORARY_FILE_DELETE_FILE_CONTENT',
'TEMPORARY_FILE_DELETE_FILE_BASE64',
)
def get_temporary_file(prefix):
"""Get a temporary file.
:return:
"""
image = Image.new('RGBA', size=(100, 100), color=(256, 0, 0))
tmp_file = BytesIO()
_tmp_file = tempfile.NamedTemporaryFile(prefix=prefix, suffix='.png')
image.save(tmp_file, "PNG")
tmp_file.seek(0)
tmp_file.name = _tmp_file.name
return tmp_file
BASE64_PREFIX = 'data:image/png;base64,'
TEMPORARY_FILE_LIST = get_temporary_file(prefix='LIST')
TEMPORARY_FILE_LIST_FILE_CONTENT = TEMPORARY_FILE_LIST.read()
TEMPORARY_FILE_LIST_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_LIST_FILE_CONTENT
).decode()
TEMPORARY_FILE_LIST.seek(0)
TEMPORARY_FILE_VIEW = get_temporary_file(prefix='VIEW')
TEMPORARY_FILE_VIEW_FILE_CONTENT = TEMPORARY_FILE_VIEW.read()
TEMPORARY_FILE_VIEW_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_VIEW_FILE_CONTENT
).decode()
TEMPORARY_FILE_VIEW.seek(0)
TEMPORARY_FILE_ADD = get_temporary_file(prefix='ADD')
TEMPORARY_FILE_ADD_FILE_CONTENT = TEMPORARY_FILE_ADD.read()
TEMPORARY_FILE_ADD_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_ADD_FILE_CONTENT
).decode()
TEMPORARY_FILE_ADD.seek(0)
TEMPORARY_FILE_CHANGE = get_temporary_file(prefix='CHANGE')
TEMPORARY_FILE_CHANGE_FILE_CONTENT = TEMPORARY_FILE_CHANGE.read()
TEMPORARY_FILE_CHANGE_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_CHANGE_FILE_CONTENT
).decode()
TEMPORARY_FILE_CHANGE.seek(0)
TEMPORARY_FILE_CHANGE_CHANGED = get_temporary_file(prefix='CHANGE_CHANGED')
TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT = \
TEMPORARY_FILE_CHANGE_CHANGED.read()
TEMPORARY_FILE_CHANGE_CHANGED_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT
).decode()
TEMPORARY_FILE_CHANGE_CHANGED.seek(0)
TEMPORARY_FILE_DELETE = get_temporary_file(prefix='DELETE')
TEMPORARY_FILE_DELETE_FILE_CONTENT = TEMPORARY_FILE_DELETE.read()
TEMPORARY_FILE_DELETE_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_DELETE_FILE_CONTENT
).decode()
TEMPORARY_FILE_DELETE.seek(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.